diff --git a/SOURCES/rsync-3.1.3-cve-2018-25032.patch b/SOURCES/rsync-3.1.3-cve-2018-25032.patch new file mode 100644 index 0000000..e82e29c --- /dev/null +++ b/SOURCES/rsync-3.1.3-cve-2018-25032.patch @@ -0,0 +1,343 @@ +From 5c44459c3b28a9bd3283aaceab7c615f8020c531 Mon Sep 17 00:00:00 2001 +From: Mark Adler +Date: Tue, 17 Apr 2018 22:09:22 -0700 +Subject: [PATCH] Fix a bug that can crash deflate on some input when using + Z_FIXED. + +This bug was reported by Danilo Ramos of Eideticom, Inc. It has +lain in wait 13 years before being found! The bug was introduced +in zlib 1.2.2.2, with the addition of the Z_FIXED option. That +option forces the use of fixed Huffman codes. For rare inputs with +a large number of distant matches, the pending buffer into which +the compressed data is written can overwrite the distance symbol +table which it overlays. That results in corrupted output due to +invalid distances, and can result in out-of-bound accesses, +crashing the application. + +The fix here combines the distance buffer and literal/length +buffers into a single symbol buffer. Now three bytes of pending +buffer space are opened up for each literal or length/distance +pair consumed, instead of the previous two bytes. This assures +that the pending buffer cannot overwrite the symbol table, since +the maximum fixed code compressed length/distance is 31 bits, and +since there are four bytes of pending space for every three bytes +of symbol space. +--- + deflate.c | 74 ++++++++++++++++++++++++++++++++++++++++--------------- + deflate.h | 25 +++++++++---------- + trees.c | 50 +++++++++++-------------------------- + 3 files changed, 79 insertions(+), 70 deletions(-) + +diff --git a/zlib/deflate.c b/zlib/deflate.c +index 425babc00..19cba873a 100644 +--- a/zlib/deflate.c ++++ b/zlib/deflate.c +@@ -255,11 +255,6 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy, + int wrap = 1; + static const char my_version[] = ZLIB_VERSION; + +- ushf *overlay; +- /* We overlay pending_buf and d_buf+l_buf. This works since the average +- * output size for (length,distance) codes is <= 24 bits. +- */ +- + if (version == Z_NULL || version[0] != my_version[0] || + stream_size != sizeof(z_stream)) { + return Z_VERSION_ERROR; +@@ -329,9 +324,47 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy, + + s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */ + +- overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2); +- s->pending_buf = (uchf *) overlay; +- s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L); ++ /* We overlay pending_buf and sym_buf. This works since the average size ++ * for length/distance pairs over any compressed block is assured to be 31 ++ * bits or less. ++ * ++ * Analysis: The longest fixed codes are a length code of 8 bits plus 5 ++ * extra bits, for lengths 131 to 257. The longest fixed distance codes are ++ * 5 bits plus 13 extra bits, for distances 16385 to 32768. The longest ++ * possible fixed-codes length/distance pair is then 31 bits total. ++ * ++ * sym_buf starts one-fourth of the way into pending_buf. So there are ++ * three bytes in sym_buf for every four bytes in pending_buf. Each symbol ++ * in sym_buf is three bytes -- two for the distance and one for the ++ * literal/length. As each symbol is consumed, the pointer to the next ++ * sym_buf value to read moves forward three bytes. From that symbol, up to ++ * 31 bits are written to pending_buf. The closest the written pending_buf ++ * bits gets to the next sym_buf symbol to read is just before the last ++ * code is written. At that time, 31*(n-2) bits have been written, just ++ * after 24*(n-2) bits have been consumed from sym_buf. sym_buf starts at ++ * 8*n bits into pending_buf. (Note that the symbol buffer fills when n-1 ++ * symbols are written.) The closest the writing gets to what is unread is ++ * then n+14 bits. Here n is lit_bufsize, which is 16384 by default, and ++ * can range from 128 to 32768. ++ * ++ * Therefore, at a minimum, there are 142 bits of space between what is ++ * written and what is read in the overlain buffers, so the symbols cannot ++ * be overwritten by the compressed data. That space is actually 139 bits, ++ * due to the three-bit fixed-code block header. ++ * ++ * That covers the case where either Z_FIXED is specified, forcing fixed ++ * codes, or when the use of fixed codes is chosen, because that choice ++ * results in a smaller compressed block than dynamic codes. That latter ++ * condition then assures that the above analysis also covers all dynamic ++ * blocks. A dynamic-code block will only be chosen to be emitted if it has ++ * fewer bits than a fixed-code block would for the same set of symbols. ++ * Therefore its average symbol length is assured to be less than 31. So ++ * the compressed data for a dynamic block also cannot overwrite the ++ * symbols from which it is being constructed. ++ */ ++ ++ s->pending_buf = (uchf *) ZALLOC(strm, s->lit_bufsize, 4); ++ s->pending_buf_size = (ulg)s->lit_bufsize * 4; + + if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL || + s->pending_buf == Z_NULL) { +@@ -340,8 +373,12 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy, + deflateEnd (strm); + return Z_MEM_ERROR; + } +- s->d_buf = overlay + s->lit_bufsize/sizeof(ush); +- s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize; ++ s->sym_buf = s->pending_buf + s->lit_bufsize; ++ s->sym_end = (s->lit_bufsize - 1) * 3; ++ /* We avoid equality with lit_bufsize*3 because of wraparound at 64K ++ * on 16 bit machines and because stored blocks are restricted to ++ * 64K-1 bytes. ++ */ + + s->level = level; + s->strategy = strategy; +@@ -552,7 +589,7 @@ int ZEXPORT deflatePrime (strm, bits, value) + + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; + s = strm->state; +- if ((Bytef *)(s->d_buf) < s->pending_out + ((Buf_size + 7) >> 3)) ++ if (s->sym_buf < s->pending_out + ((Buf_size + 7) >> 3)) + return Z_BUF_ERROR; + do { + put = Buf_size - s->bi_valid; +@@ -1113,7 +1150,6 @@ int ZEXPORT deflateCopy (dest, source) + #else + deflate_state *ds; + deflate_state *ss; +- ushf *overlay; + + + if (source == Z_NULL || dest == Z_NULL || source->state == Z_NULL) { +@@ -1133,8 +1169,7 @@ int ZEXPORT deflateCopy (dest, source) + ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte)); + ds->prev = (Posf *) ZALLOC(dest, ds->w_size, sizeof(Pos)); + ds->head = (Posf *) ZALLOC(dest, ds->hash_size, sizeof(Pos)); +- overlay = (ushf *) ZALLOC(dest, ds->lit_bufsize, sizeof(ush)+2); +- ds->pending_buf = (uchf *) overlay; ++ ds->pending_buf = (uchf *) ZALLOC(dest, ds->lit_bufsize, 4); + + if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL || + ds->pending_buf == Z_NULL) { +@@ -1148,8 +1183,7 @@ int ZEXPORT deflateCopy (dest, source) + zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size); + + ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf); +- ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush); +- ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize; ++ ds->sym_buf = ds->pending_buf + ds->lit_bufsize; + + ds->l_desc.dyn_tree = ds->dyn_ltree; + ds->d_desc.dyn_tree = ds->dyn_dtree; +@@ -1771,7 +1771,7 @@ local block_state deflate_fast(s, flush) + FLUSH_BLOCK(s, 1); + return finish_done; + } +- if (s->last_lit) ++ if (s->sym_next) + FLUSH_BLOCK(s, 0); + return block_done; + } +@@ -1912,7 +1912,7 @@ local block_state deflate_slow(s, flush) + FLUSH_BLOCK(s, 1); + return finish_done; + } +- if (s->last_lit) ++ if (s->sym_next) + FLUSH_BLOCK(s, 0); + return block_done; + } +@@ -1987,7 +1987,7 @@ local block_state deflate_rle(s, flush) + FLUSH_BLOCK(s, 1); + return finish_done; + } +- if (s->last_lit) ++ if (s->sym_next) + FLUSH_BLOCK(s, 0); + return block_done; + } +@@ -2026,7 +2026,7 @@ local block_state deflate_huff(s, flush) + FLUSH_BLOCK(s, 1); + return finish_done; + } +- if (s->last_lit) ++ if (s->sym_next) + FLUSH_BLOCK(s, 0); + return block_done; + } +diff --git a/zlib/deflate.h b/zlib/deflate.h +index 23ecdd312..d4cf1a98b 100644 +--- a/zlib/deflate.h ++++ b/zlib/deflate.h +@@ -217,7 +217,7 @@ typedef struct internal_state { + /* Depth of each subtree used as tie breaker for trees of equal frequency + */ + +- uchf *l_buf; /* buffer for literals or lengths */ ++ uchf *sym_buf; /* buffer for distances and literals/lengths */ + + uInt lit_bufsize; + /* Size of match buffer for literals/lengths. There are 4 reasons for +@@ -239,13 +239,8 @@ typedef struct internal_state { + * - I can't count above 4 + */ + +- uInt last_lit; /* running index in l_buf */ +- +- ushf *d_buf; +- /* Buffer for distances. To simplify the code, d_buf and l_buf have +- * the same number of elements. To use different lengths, an extra flag +- * array would be necessary. +- */ ++ uInt sym_next; /* running index in sym_buf */ ++ uInt sym_end; /* symbol table full when sym_next reaches this */ + + ulg opt_len; /* bit length of current block with optimal trees */ + ulg static_len; /* bit length of current block with static trees */ +@@ -317,20 +317,22 @@ void ZLIB_INTERNAL _tr_stored_block OF((deflate_state *s, charf *buf, + + # define _tr_tally_lit(s, c, flush) \ + { uch cc = (c); \ +- s->d_buf[s->last_lit] = 0; \ +- s->l_buf[s->last_lit++] = cc; \ ++ s->sym_buf[s->sym_next++] = 0; \ ++ s->sym_buf[s->sym_next++] = 0; \ ++ s->sym_buf[s->sym_next++] = cc; \ + s->dyn_ltree[cc].Freq++; \ +- flush = (s->last_lit == s->lit_bufsize-1); \ ++ flush = (s->sym_next == s->sym_end); \ + } + # define _tr_tally_dist(s, distance, length, flush) \ + { uch len = (length); \ + ush dist = (distance); \ +- s->d_buf[s->last_lit] = dist; \ +- s->l_buf[s->last_lit++] = len; \ ++ s->sym_buf[s->sym_next++] = dist; \ ++ s->sym_buf[s->sym_next++] = dist >> 8; \ ++ s->sym_buf[s->sym_next++] = len; \ + dist--; \ + s->dyn_ltree[_length_code[len]+LITERALS+1].Freq++; \ + s->dyn_dtree[d_code(dist)].Freq++; \ +- flush = (s->last_lit == s->lit_bufsize-1); \ ++ flush = (s->sym_next == s->sym_end); \ + } + #else + # define _tr_tally_lit(s, c, flush) flush = _tr_tally(s, 0, c) +diff --git a/zlib/trees.c b/zlib/trees.c +index 4f4a65011..decaeb7c3 100644 +--- a/zlib/trees.c ++++ b/zlib/trees.c +@@ -416,7 +416,7 @@ local void init_block(s) + + s->dyn_ltree[END_BLOCK].Freq = 1; + s->opt_len = s->static_len = 0L; +- s->last_lit = s->matches = 0; ++ s->sym_next = s->matches = 0; + } + + #define SMALLEST 1 +@@ -948,7 +948,7 @@ void ZLIB_INTERNAL _tr_flush_block(s, buf, stored_len, last) + + Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ", + opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len, +- s->last_lit)); ++ s->sym_next / 3)); + + if (static_lenb <= opt_lenb) opt_lenb = static_lenb; + +@@ -1017,8 +1017,9 @@ int ZLIB_INTERNAL _tr_tally (s, dist, lc) + unsigned dist; /* distance of matched string */ + unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */ + { +- s->d_buf[s->last_lit] = (ush)dist; +- s->l_buf[s->last_lit++] = (uch)lc; ++ s->sym_buf[s->sym_next++] = dist; ++ s->sym_buf[s->sym_next++] = dist >> 8; ++ s->sym_buf[s->sym_next++] = lc; + if (dist == 0) { + /* lc is the unmatched char */ + s->dyn_ltree[lc].Freq++; +@@ -1033,30 +1034,7 @@ int ZLIB_INTERNAL _tr_tally (s, dist, lc) + s->dyn_ltree[_length_code[lc]+LITERALS+1].Freq++; + s->dyn_dtree[d_code(dist)].Freq++; + } +- +-#ifdef TRUNCATE_BLOCK +- /* Try to guess if it is profitable to stop the current block here */ +- if ((s->last_lit & 0x1fff) == 0 && s->level > 2) { +- /* Compute an upper bound for the compressed length */ +- ulg out_length = (ulg)s->last_lit*8L; +- ulg in_length = (ulg)((long)s->strstart - s->block_start); +- int dcode; +- for (dcode = 0; dcode < D_CODES; dcode++) { +- out_length += (ulg)s->dyn_dtree[dcode].Freq * +- (5L+extra_dbits[dcode]); +- } +- out_length >>= 3; +- Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ", +- s->last_lit, in_length, out_length, +- 100L - out_length*100L/in_length)); +- if (s->matches < s->last_lit/2 && out_length < in_length/2) return 1; +- } +-#endif +- return (s->last_lit == s->lit_bufsize-1); +- /* We avoid equality with lit_bufsize because of wraparound at 64K +- * on 16 bit machines and because stored blocks are restricted to +- * 64K-1 bytes. +- */ ++ return (s->sym_next == s->sym_end); + } + + /* =========================================================================== +@@ -1069,13 +1047,14 @@ local void compress_block(s, ltree, dtree) + { + unsigned dist; /* distance of matched string */ + int lc; /* match length or unmatched char (if dist == 0) */ +- unsigned lx = 0; /* running index in l_buf */ ++ unsigned sx = 0; /* running index in sym_buf */ + unsigned code; /* the code to send */ + int extra; /* number of extra bits to send */ + +- if (s->last_lit != 0) do { +- dist = s->d_buf[lx]; +- lc = s->l_buf[lx++]; ++ if (s->sym_next != 0) do { ++ dist = s->sym_buf[sx++] & 0xff; ++ dist += (unsigned)(s->sym_buf[sx++] & 0xff) << 8; ++ lc = s->sym_buf[sx++]; + if (dist == 0) { + send_code(s, lc, ltree); /* send a literal byte */ + Tracecv(isgraph(lc), (stderr," '%c' ", lc)); +@@ -1100,11 +1079,10 @@ local void compress_block(s, ltree, dtree) + } + } /* literal or match pair ? */ + +- /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */ +- Assert((uInt)(s->pending) < s->lit_bufsize + 2*lx, +- "pendingBuf overflow"); ++ /* Check that the overlay between pending_buf and sym_buf is ok: */ ++ Assert(s->pending < s->lit_bufsize + sx, "pendingBuf overflow"); + +- } while (lx < s->last_lit); ++ } while (sx < s->sym_next); + + send_code(s, END_BLOCK, ltree); + } diff --git a/SOURCES/rsync-3.2.3-atimes.patch b/SOURCES/rsync-3.2.3-atimes.patch new file mode 100644 index 0000000..aeba803 --- /dev/null +++ b/SOURCES/rsync-3.2.3-atimes.patch @@ -0,0 +1,48 @@ +diff --git a/rsync.c b/rsync.c +index bcecac63..ff9489be 100644 +--- a/rsync.c ++++ b/rsync.c +@@ -63,8 +63,7 @@ extern char *iconv_opt; + #define UPDATED_ATIME (1<<3) + #define UPDATED_ACLS (1<<4) + #define UPDATED_MODE (1<<5) +- +-#define UPDATED_TIMES (UPDATED_MTIME|UPDATED_ATIME) ++#define UPDATED_CRTIME (1<<6) + + #ifdef ICONV_CONST + iconv_t ic_chck = (iconv_t)-1; +@@ -576,10 +575,11 @@ int set_file_attrs(const char *fname, struct file_struct *file, stat_x *sxp, + set_xattr(fname, file, fnamecmp, sxp); + #endif + +- if (!preserve_times +- || (!(preserve_times & PRESERVE_DIR_TIMES) && S_ISDIR(sxp->st.st_mode)) +- || (!(preserve_times & PRESERVE_LINK_TIMES) && S_ISLNK(sxp->st.st_mode))) +- flags |= ATTRS_SKIP_MTIME | ATTRS_SKIP_ATIME; ++ if (!preserve_times) ++ flags |= ATTRS_SKIP_MTIME | (atimes_ndx ? 0 : ATTRS_SKIP_ATIME) | (crtimes_ndx ? 0 : ATTRS_SKIP_CRTIME); ++ else if ((!(preserve_times & PRESERVE_DIR_TIMES) && S_ISDIR(sxp->st.st_mode)) ++ || (!(preserve_times & PRESERVE_LINK_TIMES) && S_ISLNK(sxp->st.st_mode))) ++ flags |= ATTRS_SKIP_MTIME | ATTRS_SKIP_ATIME | ATTRS_SKIP_CRTIME; + else if (sxp != &sx2) + memcpy(&sx2.st, &sxp->st, sizeof (sx2.st)); + if (!atimes_ndx || S_ISDIR(sxp->st.st_mode)) +@@ -606,7 +606,7 @@ int set_file_attrs(const char *fname, struct file_struct *file, stat_x *sxp, + updated |= UPDATED_ATIME; + } + } +- if (updated & UPDATED_TIMES) { ++ if (updated & (UPDATED_MTIME|UPDATED_ATIME)) { + int ret = set_times(fname, &sx2.st); + if (ret < 0) { + rsyserr(FERROR_XFER, errno, "failed to set times on %s", +@@ -614,7 +614,7 @@ int set_file_attrs(const char *fname, struct file_struct *file, stat_x *sxp, + goto cleanup; + } + if (ret > 0) { /* ret == 1 if symlink could not be set */ +- updated &= ~UPDATED_TIMES; ++ updated &= ~(UPDATED_MTIME|UPDATED_ATIME); + file->flags |= FLAG_TIME_FAILED; + } + } diff --git a/SOURCES/rsync-3.2.3-cve-2018-25032.patch b/SOURCES/rsync-3.2.3-cve-2018-25032.patch deleted file mode 100644 index e82e29c..0000000 --- a/SOURCES/rsync-3.2.3-cve-2018-25032.patch +++ /dev/null @@ -1,343 +0,0 @@ -From 5c44459c3b28a9bd3283aaceab7c615f8020c531 Mon Sep 17 00:00:00 2001 -From: Mark Adler -Date: Tue, 17 Apr 2018 22:09:22 -0700 -Subject: [PATCH] Fix a bug that can crash deflate on some input when using - Z_FIXED. - -This bug was reported by Danilo Ramos of Eideticom, Inc. It has -lain in wait 13 years before being found! The bug was introduced -in zlib 1.2.2.2, with the addition of the Z_FIXED option. That -option forces the use of fixed Huffman codes. For rare inputs with -a large number of distant matches, the pending buffer into which -the compressed data is written can overwrite the distance symbol -table which it overlays. That results in corrupted output due to -invalid distances, and can result in out-of-bound accesses, -crashing the application. - -The fix here combines the distance buffer and literal/length -buffers into a single symbol buffer. Now three bytes of pending -buffer space are opened up for each literal or length/distance -pair consumed, instead of the previous two bytes. This assures -that the pending buffer cannot overwrite the symbol table, since -the maximum fixed code compressed length/distance is 31 bits, and -since there are four bytes of pending space for every three bytes -of symbol space. ---- - deflate.c | 74 ++++++++++++++++++++++++++++++++++++++++--------------- - deflate.h | 25 +++++++++---------- - trees.c | 50 +++++++++++-------------------------- - 3 files changed, 79 insertions(+), 70 deletions(-) - -diff --git a/zlib/deflate.c b/zlib/deflate.c -index 425babc00..19cba873a 100644 ---- a/zlib/deflate.c -+++ b/zlib/deflate.c -@@ -255,11 +255,6 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy, - int wrap = 1; - static const char my_version[] = ZLIB_VERSION; - -- ushf *overlay; -- /* We overlay pending_buf and d_buf+l_buf. This works since the average -- * output size for (length,distance) codes is <= 24 bits. -- */ -- - if (version == Z_NULL || version[0] != my_version[0] || - stream_size != sizeof(z_stream)) { - return Z_VERSION_ERROR; -@@ -329,9 +324,47 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy, - - s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */ - -- overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2); -- s->pending_buf = (uchf *) overlay; -- s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L); -+ /* We overlay pending_buf and sym_buf. This works since the average size -+ * for length/distance pairs over any compressed block is assured to be 31 -+ * bits or less. -+ * -+ * Analysis: The longest fixed codes are a length code of 8 bits plus 5 -+ * extra bits, for lengths 131 to 257. The longest fixed distance codes are -+ * 5 bits plus 13 extra bits, for distances 16385 to 32768. The longest -+ * possible fixed-codes length/distance pair is then 31 bits total. -+ * -+ * sym_buf starts one-fourth of the way into pending_buf. So there are -+ * three bytes in sym_buf for every four bytes in pending_buf. Each symbol -+ * in sym_buf is three bytes -- two for the distance and one for the -+ * literal/length. As each symbol is consumed, the pointer to the next -+ * sym_buf value to read moves forward three bytes. From that symbol, up to -+ * 31 bits are written to pending_buf. The closest the written pending_buf -+ * bits gets to the next sym_buf symbol to read is just before the last -+ * code is written. At that time, 31*(n-2) bits have been written, just -+ * after 24*(n-2) bits have been consumed from sym_buf. sym_buf starts at -+ * 8*n bits into pending_buf. (Note that the symbol buffer fills when n-1 -+ * symbols are written.) The closest the writing gets to what is unread is -+ * then n+14 bits. Here n is lit_bufsize, which is 16384 by default, and -+ * can range from 128 to 32768. -+ * -+ * Therefore, at a minimum, there are 142 bits of space between what is -+ * written and what is read in the overlain buffers, so the symbols cannot -+ * be overwritten by the compressed data. That space is actually 139 bits, -+ * due to the three-bit fixed-code block header. -+ * -+ * That covers the case where either Z_FIXED is specified, forcing fixed -+ * codes, or when the use of fixed codes is chosen, because that choice -+ * results in a smaller compressed block than dynamic codes. That latter -+ * condition then assures that the above analysis also covers all dynamic -+ * blocks. A dynamic-code block will only be chosen to be emitted if it has -+ * fewer bits than a fixed-code block would for the same set of symbols. -+ * Therefore its average symbol length is assured to be less than 31. So -+ * the compressed data for a dynamic block also cannot overwrite the -+ * symbols from which it is being constructed. -+ */ -+ -+ s->pending_buf = (uchf *) ZALLOC(strm, s->lit_bufsize, 4); -+ s->pending_buf_size = (ulg)s->lit_bufsize * 4; - - if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL || - s->pending_buf == Z_NULL) { -@@ -340,8 +373,12 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy, - deflateEnd (strm); - return Z_MEM_ERROR; - } -- s->d_buf = overlay + s->lit_bufsize/sizeof(ush); -- s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize; -+ s->sym_buf = s->pending_buf + s->lit_bufsize; -+ s->sym_end = (s->lit_bufsize - 1) * 3; -+ /* We avoid equality with lit_bufsize*3 because of wraparound at 64K -+ * on 16 bit machines and because stored blocks are restricted to -+ * 64K-1 bytes. -+ */ - - s->level = level; - s->strategy = strategy; -@@ -552,7 +589,7 @@ int ZEXPORT deflatePrime (strm, bits, value) - - if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; - s = strm->state; -- if ((Bytef *)(s->d_buf) < s->pending_out + ((Buf_size + 7) >> 3)) -+ if (s->sym_buf < s->pending_out + ((Buf_size + 7) >> 3)) - return Z_BUF_ERROR; - do { - put = Buf_size - s->bi_valid; -@@ -1113,7 +1150,6 @@ int ZEXPORT deflateCopy (dest, source) - #else - deflate_state *ds; - deflate_state *ss; -- ushf *overlay; - - - if (source == Z_NULL || dest == Z_NULL || source->state == Z_NULL) { -@@ -1133,8 +1169,7 @@ int ZEXPORT deflateCopy (dest, source) - ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte)); - ds->prev = (Posf *) ZALLOC(dest, ds->w_size, sizeof(Pos)); - ds->head = (Posf *) ZALLOC(dest, ds->hash_size, sizeof(Pos)); -- overlay = (ushf *) ZALLOC(dest, ds->lit_bufsize, sizeof(ush)+2); -- ds->pending_buf = (uchf *) overlay; -+ ds->pending_buf = (uchf *) ZALLOC(dest, ds->lit_bufsize, 4); - - if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL || - ds->pending_buf == Z_NULL) { -@@ -1148,8 +1183,7 @@ int ZEXPORT deflateCopy (dest, source) - zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size); - - ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf); -- ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush); -- ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize; -+ ds->sym_buf = ds->pending_buf + ds->lit_bufsize; - - ds->l_desc.dyn_tree = ds->dyn_ltree; - ds->d_desc.dyn_tree = ds->dyn_dtree; -@@ -1771,7 +1771,7 @@ local block_state deflate_fast(s, flush) - FLUSH_BLOCK(s, 1); - return finish_done; - } -- if (s->last_lit) -+ if (s->sym_next) - FLUSH_BLOCK(s, 0); - return block_done; - } -@@ -1912,7 +1912,7 @@ local block_state deflate_slow(s, flush) - FLUSH_BLOCK(s, 1); - return finish_done; - } -- if (s->last_lit) -+ if (s->sym_next) - FLUSH_BLOCK(s, 0); - return block_done; - } -@@ -1987,7 +1987,7 @@ local block_state deflate_rle(s, flush) - FLUSH_BLOCK(s, 1); - return finish_done; - } -- if (s->last_lit) -+ if (s->sym_next) - FLUSH_BLOCK(s, 0); - return block_done; - } -@@ -2026,7 +2026,7 @@ local block_state deflate_huff(s, flush) - FLUSH_BLOCK(s, 1); - return finish_done; - } -- if (s->last_lit) -+ if (s->sym_next) - FLUSH_BLOCK(s, 0); - return block_done; - } -diff --git a/zlib/deflate.h b/zlib/deflate.h -index 23ecdd312..d4cf1a98b 100644 ---- a/zlib/deflate.h -+++ b/zlib/deflate.h -@@ -217,7 +217,7 @@ typedef struct internal_state { - /* Depth of each subtree used as tie breaker for trees of equal frequency - */ - -- uchf *l_buf; /* buffer for literals or lengths */ -+ uchf *sym_buf; /* buffer for distances and literals/lengths */ - - uInt lit_bufsize; - /* Size of match buffer for literals/lengths. There are 4 reasons for -@@ -239,13 +239,8 @@ typedef struct internal_state { - * - I can't count above 4 - */ - -- uInt last_lit; /* running index in l_buf */ -- -- ushf *d_buf; -- /* Buffer for distances. To simplify the code, d_buf and l_buf have -- * the same number of elements. To use different lengths, an extra flag -- * array would be necessary. -- */ -+ uInt sym_next; /* running index in sym_buf */ -+ uInt sym_end; /* symbol table full when sym_next reaches this */ - - ulg opt_len; /* bit length of current block with optimal trees */ - ulg static_len; /* bit length of current block with static trees */ -@@ -317,20 +317,22 @@ void ZLIB_INTERNAL _tr_stored_block OF((deflate_state *s, charf *buf, - - # define _tr_tally_lit(s, c, flush) \ - { uch cc = (c); \ -- s->d_buf[s->last_lit] = 0; \ -- s->l_buf[s->last_lit++] = cc; \ -+ s->sym_buf[s->sym_next++] = 0; \ -+ s->sym_buf[s->sym_next++] = 0; \ -+ s->sym_buf[s->sym_next++] = cc; \ - s->dyn_ltree[cc].Freq++; \ -- flush = (s->last_lit == s->lit_bufsize-1); \ -+ flush = (s->sym_next == s->sym_end); \ - } - # define _tr_tally_dist(s, distance, length, flush) \ - { uch len = (length); \ - ush dist = (distance); \ -- s->d_buf[s->last_lit] = dist; \ -- s->l_buf[s->last_lit++] = len; \ -+ s->sym_buf[s->sym_next++] = dist; \ -+ s->sym_buf[s->sym_next++] = dist >> 8; \ -+ s->sym_buf[s->sym_next++] = len; \ - dist--; \ - s->dyn_ltree[_length_code[len]+LITERALS+1].Freq++; \ - s->dyn_dtree[d_code(dist)].Freq++; \ -- flush = (s->last_lit == s->lit_bufsize-1); \ -+ flush = (s->sym_next == s->sym_end); \ - } - #else - # define _tr_tally_lit(s, c, flush) flush = _tr_tally(s, 0, c) -diff --git a/zlib/trees.c b/zlib/trees.c -index 4f4a65011..decaeb7c3 100644 ---- a/zlib/trees.c -+++ b/zlib/trees.c -@@ -416,7 +416,7 @@ local void init_block(s) - - s->dyn_ltree[END_BLOCK].Freq = 1; - s->opt_len = s->static_len = 0L; -- s->last_lit = s->matches = 0; -+ s->sym_next = s->matches = 0; - } - - #define SMALLEST 1 -@@ -948,7 +948,7 @@ void ZLIB_INTERNAL _tr_flush_block(s, buf, stored_len, last) - - Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ", - opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len, -- s->last_lit)); -+ s->sym_next / 3)); - - if (static_lenb <= opt_lenb) opt_lenb = static_lenb; - -@@ -1017,8 +1017,9 @@ int ZLIB_INTERNAL _tr_tally (s, dist, lc) - unsigned dist; /* distance of matched string */ - unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */ - { -- s->d_buf[s->last_lit] = (ush)dist; -- s->l_buf[s->last_lit++] = (uch)lc; -+ s->sym_buf[s->sym_next++] = dist; -+ s->sym_buf[s->sym_next++] = dist >> 8; -+ s->sym_buf[s->sym_next++] = lc; - if (dist == 0) { - /* lc is the unmatched char */ - s->dyn_ltree[lc].Freq++; -@@ -1033,30 +1034,7 @@ int ZLIB_INTERNAL _tr_tally (s, dist, lc) - s->dyn_ltree[_length_code[lc]+LITERALS+1].Freq++; - s->dyn_dtree[d_code(dist)].Freq++; - } -- --#ifdef TRUNCATE_BLOCK -- /* Try to guess if it is profitable to stop the current block here */ -- if ((s->last_lit & 0x1fff) == 0 && s->level > 2) { -- /* Compute an upper bound for the compressed length */ -- ulg out_length = (ulg)s->last_lit*8L; -- ulg in_length = (ulg)((long)s->strstart - s->block_start); -- int dcode; -- for (dcode = 0; dcode < D_CODES; dcode++) { -- out_length += (ulg)s->dyn_dtree[dcode].Freq * -- (5L+extra_dbits[dcode]); -- } -- out_length >>= 3; -- Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ", -- s->last_lit, in_length, out_length, -- 100L - out_length*100L/in_length)); -- if (s->matches < s->last_lit/2 && out_length < in_length/2) return 1; -- } --#endif -- return (s->last_lit == s->lit_bufsize-1); -- /* We avoid equality with lit_bufsize because of wraparound at 64K -- * on 16 bit machines and because stored blocks are restricted to -- * 64K-1 bytes. -- */ -+ return (s->sym_next == s->sym_end); - } - - /* =========================================================================== -@@ -1069,13 +1047,14 @@ local void compress_block(s, ltree, dtree) - { - unsigned dist; /* distance of matched string */ - int lc; /* match length or unmatched char (if dist == 0) */ -- unsigned lx = 0; /* running index in l_buf */ -+ unsigned sx = 0; /* running index in sym_buf */ - unsigned code; /* the code to send */ - int extra; /* number of extra bits to send */ - -- if (s->last_lit != 0) do { -- dist = s->d_buf[lx]; -- lc = s->l_buf[lx++]; -+ if (s->sym_next != 0) do { -+ dist = s->sym_buf[sx++] & 0xff; -+ dist += (unsigned)(s->sym_buf[sx++] & 0xff) << 8; -+ lc = s->sym_buf[sx++]; - if (dist == 0) { - send_code(s, lc, ltree); /* send a literal byte */ - Tracecv(isgraph(lc), (stderr," '%c' ", lc)); -@@ -1100,11 +1079,10 @@ local void compress_block(s, ltree, dtree) - } - } /* literal or match pair ? */ - -- /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */ -- Assert((uInt)(s->pending) < s->lit_bufsize + 2*lx, -- "pendingBuf overflow"); -+ /* Check that the overlay between pending_buf and sym_buf is ok: */ -+ Assert(s->pending < s->lit_bufsize + sx, "pendingBuf overflow"); - -- } while (lx < s->last_lit); -+ } while (sx < s->sym_next); - - send_code(s, END_BLOCK, ltree); - } diff --git a/SOURCES/rsync-3.2.3-cve-2022-37434.patch b/SOURCES/rsync-3.2.3-cve-2022-37434.patch new file mode 100644 index 0000000..2f8ec42 --- /dev/null +++ b/SOURCES/rsync-3.2.3-cve-2022-37434.patch @@ -0,0 +1,16 @@ +diff --git a/zlib/inflate.c b/zlib/inflate.c +index e43abd9..bd33c19 100644 +--- a/zlib/inflate.c ++++ b/zlib/inflate.c +@@ -740,8 +740,9 @@ int flush; + if (copy > have) copy = have; + if (copy) { + if (state->head != Z_NULL && +- state->head->extra != Z_NULL) { +- len = state->head->extra_len - state->length; ++ state->head->extra != Z_NULL && ++ (len = state->head->extra_len - state->length) < ++ state->head->extra_max) { + zmemcpy(state->head->extra + len, next, + len + copy > state->head->extra_max ? + state->head->extra_max - len : copy); diff --git a/SOURCES/rsync-3.2.3-segfault.patch b/SOURCES/rsync-3.2.3-segfault.patch new file mode 100644 index 0000000..7240f6a --- /dev/null +++ b/SOURCES/rsync-3.2.3-segfault.patch @@ -0,0 +1,24 @@ +From f5a9a1013873580d0ad2ae4f5c5038c324d71bfe Mon Sep 17 00:00:00 2001 +From: Wayne Davison +Date: Mon, 21 Feb 2022 14:19:31 -0800 +Subject: [PATCH] Fix possible array deref using invalid index. + +--- + copy-devices.diff | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/patches/copy-devices.diff b/patches/copy-devices.diff +index 797d046..4138474 100644 +--- a/patches/copy-devices.diff ++++ b/patches/copy-devices.diff +@@ -111,8 +111,8 @@ diff --git a/rsync.c b/rsync.c + if (iflags & ITEM_TRANSFER) { + int i = ndx - cur_flist->ndx_start; + - if (i < 0 || !S_ISREG(cur_flist->files[i]->mode)) { +-+ struct file_struct *file = cur_flist->files[i]; +-+ if (i < 0 || !(S_ISREG(file->mode) || (copy_devices && IS_DEVICE(file->mode)))) { +++ if (i < 0 +++ || !(S_ISREG(cur_flist->files[i]->mode) || (copy_devices && IS_DEVICE(cur_flist->files[i]->mode)))) { + rprintf(FERROR, + "received request to transfer non-regular file: %d [%s]\n", + ndx, who_am_i()); diff --git a/SOURCES/rsync-3.2.3-xattr.patch b/SOURCES/rsync-3.2.3-xattr.patch new file mode 100644 index 0000000..91b77c2 --- /dev/null +++ b/SOURCES/rsync-3.2.3-xattr.patch @@ -0,0 +1,38 @@ +diff --git a/xattrs.c b/xattrs.c +index 508649c0..3c549192 100644 +--- a/xattrs.c ++++ b/xattrs.c +@@ -1055,7 +1055,7 @@ int set_xattr(const char *fname, const struct file_struct *file, const char *fna + { + rsync_xa_list *glst = rsync_xal_l.items; + item_list *lst; +- int ndx; ++ int ndx, added_write_perm = 0; + + if (dry_run) + return 1; /* FIXME: --dry-run needs to compute this value */ +@@ -1084,10 +1084,23 @@ int set_xattr(const char *fname, const struct file_struct *file, const char *fna + } + #endif + ++ /* If the target file lacks write permission, we try to add it ++ * temporarily so we can change the extended attributes. */ ++ if (!am_root ++#ifdef SUPPORT_LINKS ++ && !S_ISLNK(sxp->st.st_mode) ++#endif ++ && access(fname, W_OK) < 0 ++ && do_chmod(fname, (sxp->st.st_mode & CHMOD_BITS) | S_IWUSR) == 0) ++ added_write_perm = 1; ++ + ndx = F_XATTR(file); + glst += ndx; + lst = &glst->xa_items; +- return rsync_xal_set(fname, lst, fnamecmp, sxp); ++ int return_value = rsync_xal_set(fname, lst, fnamecmp, sxp); ++ if (added_write_perm) /* remove the temporary write permission */ ++ do_chmod(fname, sxp->st.st_mode); ++ return return_value; + } + + #ifdef SUPPORT_ACLS diff --git a/SPECS/rsync.spec b/SPECS/rsync.spec index da08fed..f82c30f 100644 --- a/SPECS/rsync.spec +++ b/SPECS/rsync.spec @@ -10,7 +10,7 @@ Summary: A program for synchronizing files over a network Name: rsync Version: 3.2.3 -Release: 9%{?dist}.2 +Release: 18%{?dist} URL: https://rsync.samba.org/ Source0: https://download.samba.org/pub/rsync/src/rsync-%{version}%{?prerelease}.tar.gz @@ -35,8 +35,12 @@ Patch0: rsync-3.2.2-ssl-verify-hostname.patch Patch1: rsync-3.2.2-runtests.patch Patch2: rsync-3.2.3-lchmod.patch Patch3: rsync-3.2.3-append-mode.patch -Patch4: rsync-3.2.3-cve-2018-25032.patch -Patch5: rsync-3.2.3-cve-2022-29154.patch +Patch4: rsync-3.2.3-xattr.patch +Patch5: rsync-3.2.3-segfault.patch +Patch6: rsync-3.2.3-atimes.patch +Patch7: rsync-3.1.3-cve-2018-25032.patch +Patch8: rsync-3.2.3-cve-2022-37434.patch +Patch9: rsync-3.2.3-cve-2022-29154.patch %description Rsync uses a reliable algorithm to bring remote and host files into @@ -74,8 +78,12 @@ patch -p1 -i patches/copy-devices.diff %patch1 -p1 -b .runtests %patch2 -p1 -b .lchmod %patch3 -p1 -b .append-mode -%patch4 -p1 -b .cve-2018-25032 -%patch5 -p1 -b .cve-2022-29154 +%patch4 -p1 -b .xattr +%patch5 -p1 -b .segfault +%patch6 -p1 -b .atimes +%patch7 -p1 -b .cve-2018-25032 +%patch8 -p1 -b .cve-2022-37434 +%patch9 -p1 -b .cve-2022-29154 %build %configure --disable-xxhash @@ -122,11 +130,32 @@ install -D -m644 %{SOURCE6} $RPM_BUILD_ROOT/%{_unitdir}/rsyncd@.service %systemd_postun_with_restart rsyncd.service %changelog -* Mon Aug 15 2022 Michal Ruprich - 3.2.3-9.2 -- Resolves: #2111176 - remote arbitrary files write inside the directories of connecting peers +* Thu Aug 25 2022 Michal Ruprich - 3.2.3-18 +- Resolves: #2111177 - remote arbitrary files write inside the directories of connecting peers -* Thu Apr 21 2022 Michal Ruprich - 3.2.3-9.1 -- Resolves: #2074784 - A flaw found in zlib v1.2.2.2 through zlib v1.2.11 when compressing certain inputs +* Thu Aug 18 2022 Michal Ruprich - 3.2.3-17 +- Resolves: #2116669 - zlib: a heap-based buffer over-read or buffer overflow in inflate in inflate.c via a large gzip header extra field + +* Wed May 18 2022 Michal Ruprich - 3.2.3-16 +- Related: #2081296 - Adding ci.fmf for separation of testing results + +* Wed May 18 2022 Michal Ruprich - 3.2.3-15 +- Related: #2081296 - Disabling STI + +* Wed May 18 2022 Michal Ruprich - 3.2.3-14 +- Resolves: #2071514 - A flaw found in zlib when compressing (not decompressing) certain inputs + +* Wed May 11 2022 Michal Ruprich - 3.2.3-13 +- Resolves: #2079639 - rsync --atimes doesn't work + +* Tue May 03 2022 Michal Ruprich - 3.2.3-12 +- Resolves: #2081296 - Enable fmf tests in centos stream + +* Tue Apr 26 2022 Michal Ruprich - 3.2.3-11 +- Resolves: #2053198 - rsync segmentation fault + +* Fri Apr 22 2022 Michal Ruprich - 3.2.3-10 +- Resolves: #2077431 - Read-only files that have changed xattrs fail to allow xattr changes * Tue Aug 10 2021 Mohan Boddu - 3.2.3-9 - Rebuilt for IMA sigs, glibc 2.34, aarch64 flags