adenilson / rpms / zlib

Forked from rpms/zlib 8 months ago
Clone
bbae91
From 5c44459c3b28a9bd3283aaceab7c615f8020c531 Mon Sep 17 00:00:00 2001
bbae91
From: Mark Adler <madler@alumni.caltech.edu>
bbae91
Date: Tue, 17 Apr 2018 22:09:22 -0700
bbae91
Subject: [PATCH] Fix a bug that can crash deflate on some input when using
bbae91
 Z_FIXED.
bbae91
bbae91
This bug was reported by Danilo Ramos of Eideticom, Inc. It has
bbae91
lain in wait 13 years before being found! The bug was introduced
bbae91
in zlib 1.2.2.2, with the addition of the Z_FIXED option. That
bbae91
option forces the use of fixed Huffman codes. For rare inputs with
bbae91
a large number of distant matches, the pending buffer into which
bbae91
the compressed data is written can overwrite the distance symbol
bbae91
table which it overlays. That results in corrupted output due to
bbae91
invalid distances, and can result in out-of-bound accesses,
bbae91
crashing the application.
bbae91
bbae91
The fix here combines the distance buffer and literal/length
bbae91
buffers into a single symbol buffer. Now three bytes of pending
bbae91
buffer space are opened up for each literal or length/distance
bbae91
pair consumed, instead of the previous two bytes. This assures
bbae91
that the pending buffer cannot overwrite the symbol table, since
bbae91
the maximum fixed code compressed length/distance is 31 bits, and
bbae91
since there are four bytes of pending space for every three bytes
bbae91
of symbol space.
bbae91
---
bbae91
 deflate.c | 74 ++++++++++++++++++++++++++++++++++++++++---------------
bbae91
 deflate.h | 25 +++++++++----------
bbae91
 trees.c   | 50 +++++++++++--------------------------
bbae91
 3 files changed, 79 insertions(+), 70 deletions(-)
bbae91
bbae91
diff --git a/deflate.c b/deflate.c
bbae91
index 425babc..19cba87 100644
bbae91
--- a/deflate.c
bbae91
+++ b/deflate.c
bbae91
@@ -255,11 +255,6 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
bbae91
     int wrap = 1;
bbae91
     static const char my_version[] = ZLIB_VERSION;
bbae91
 
bbae91
-    ushf *overlay;
bbae91
-    /* We overlay pending_buf and d_buf+l_buf. This works since the average
bbae91
-     * output size for (length,distance) codes is <= 24 bits.
bbae91
-     */
bbae91
-
bbae91
     if (version == Z_NULL || version[0] != my_version[0] ||
bbae91
         stream_size != sizeof(z_stream)) {
bbae91
         return Z_VERSION_ERROR;
bbae91
@@ -329,9 +324,47 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
bbae91
 
bbae91
     s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */
bbae91
 
bbae91
-    overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2);
bbae91
-    s->pending_buf = (uchf *) overlay;
bbae91
-    s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L);
bbae91
+    /* We overlay pending_buf and sym_buf. This works since the average size
bbae91
+     * for length/distance pairs over any compressed block is assured to be 31
bbae91
+     * bits or less.
bbae91
+     *
bbae91
+     * Analysis: The longest fixed codes are a length code of 8 bits plus 5
bbae91
+     * extra bits, for lengths 131 to 257. The longest fixed distance codes are
bbae91
+     * 5 bits plus 13 extra bits, for distances 16385 to 32768. The longest
bbae91
+     * possible fixed-codes length/distance pair is then 31 bits total.
bbae91
+     *
bbae91
+     * sym_buf starts one-fourth of the way into pending_buf. So there are
bbae91
+     * three bytes in sym_buf for every four bytes in pending_buf. Each symbol
bbae91
+     * in sym_buf is three bytes -- two for the distance and one for the
bbae91
+     * literal/length. As each symbol is consumed, the pointer to the next
bbae91
+     * sym_buf value to read moves forward three bytes. From that symbol, up to
bbae91
+     * 31 bits are written to pending_buf. The closest the written pending_buf
bbae91
+     * bits gets to the next sym_buf symbol to read is just before the last
bbae91
+     * code is written. At that time, 31*(n-2) bits have been written, just
bbae91
+     * after 24*(n-2) bits have been consumed from sym_buf. sym_buf starts at
bbae91
+     * 8*n bits into pending_buf. (Note that the symbol buffer fills when n-1
bbae91
+     * symbols are written.) The closest the writing gets to what is unread is
bbae91
+     * then n+14 bits. Here n is lit_bufsize, which is 16384 by default, and
bbae91
+     * can range from 128 to 32768.
bbae91
+     *
bbae91
+     * Therefore, at a minimum, there are 142 bits of space between what is
bbae91
+     * written and what is read in the overlain buffers, so the symbols cannot
bbae91
+     * be overwritten by the compressed data. That space is actually 139 bits,
bbae91
+     * due to the three-bit fixed-code block header.
bbae91
+     *
bbae91
+     * That covers the case where either Z_FIXED is specified, forcing fixed
bbae91
+     * codes, or when the use of fixed codes is chosen, because that choice
bbae91
+     * results in a smaller compressed block than dynamic codes. That latter
bbae91
+     * condition then assures that the above analysis also covers all dynamic
bbae91
+     * blocks. A dynamic-code block will only be chosen to be emitted if it has
bbae91
+     * fewer bits than a fixed-code block would for the same set of symbols.
bbae91
+     * Therefore its average symbol length is assured to be less than 31. So
bbae91
+     * the compressed data for a dynamic block also cannot overwrite the
bbae91
+     * symbols from which it is being constructed.
bbae91
+     */
bbae91
+
bbae91
+    s->pending_buf = (uchf *) ZALLOC(strm, s->lit_bufsize, 4);
bbae91
+    s->pending_buf_size = (ulg)s->lit_bufsize * 4;
bbae91
 
bbae91
     if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL ||
bbae91
         s->pending_buf == Z_NULL) {
bbae91
@@ -340,8 +373,12 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
bbae91
         deflateEnd (strm);
bbae91
         return Z_MEM_ERROR;
bbae91
     }
bbae91
-    s->d_buf = overlay + s->lit_bufsize/sizeof(ush);
bbae91
-    s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize;
bbae91
+    s->sym_buf = s->pending_buf + s->lit_bufsize;
bbae91
+    s->sym_end = (s->lit_bufsize - 1) * 3;
bbae91
+    /* We avoid equality with lit_bufsize*3 because of wraparound at 64K
bbae91
+     * on 16 bit machines and because stored blocks are restricted to
bbae91
+     * 64K-1 bytes.
bbae91
+     */
bbae91
 
bbae91
     s->level = level;
bbae91
     s->strategy = strategy;
bbae91
@@ -552,7 +589,7 @@ int ZEXPORT deflatePrime (strm, bits, value)
bbae91
 
bbae91
     if (deflateStateCheck(strm)) return Z_STREAM_ERROR;
bbae91
     s = strm->state;
bbae91
-    if ((Bytef *)(s->d_buf) < s->pending_out + ((Buf_size + 7) >> 3))
bbae91
+    if (s->sym_buf < s->pending_out + ((Buf_size + 7) >> 3))
bbae91
         return Z_BUF_ERROR;
bbae91
     do {
bbae91
         put = Buf_size - s->bi_valid;
bbae91
@@ -1113,7 +1150,6 @@ int ZEXPORT deflateCopy (dest, source)
bbae91
 #else
bbae91
     deflate_state *ds;
bbae91
     deflate_state *ss;
bbae91
-    ushf *overlay;
bbae91
 
bbae91
 
bbae91
     if (deflateStateCheck(source) || dest == Z_NULL) {
bbae91
@@ -1133,8 +1169,7 @@ int ZEXPORT deflateCopy (dest, source)
bbae91
     ds->window = (Bytef *) ZALLOC_WINDOW(dest, ds->w_size, 2*sizeof(Byte));
bbae91
     ds->prev   = (Posf *)  ZALLOC(dest, ds->w_size, sizeof(Pos));
bbae91
     ds->head   = (Posf *)  ZALLOC(dest, ds->hash_size, sizeof(Pos));
bbae91
-    overlay = (ushf *) ZALLOC(dest, ds->lit_bufsize, sizeof(ush)+2);
bbae91
-    ds->pending_buf = (uchf *) overlay;
bbae91
+    ds->pending_buf = (uchf *) ZALLOC(dest, ds->lit_bufsize, 4);
bbae91
 
bbae91
     if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL ||
bbae91
         ds->pending_buf == Z_NULL) {
bbae91
@@ -1148,8 +1183,7 @@ int ZEXPORT deflateCopy (dest, source)
bbae91
     zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size);
bbae91
 
bbae91
     ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf);
bbae91
-    ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush);
bbae91
-    ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize;
bbae91
+    ds->sym_buf = ds->pending_buf + ds->lit_bufsize;
bbae91
 
bbae91
     ds->l_desc.dyn_tree = ds->dyn_ltree;
bbae91
     ds->d_desc.dyn_tree = ds->dyn_dtree;
bbae91
@@ -1925,7 +1959,7 @@ local block_state deflate_fast(s, flush)
bbae91
         FLUSH_BLOCK(s, 1);
bbae91
         return finish_done;
bbae91
     }
bbae91
-    if (s->last_lit)
bbae91
+    if (s->sym_next)
bbae91
         FLUSH_BLOCK(s, 0);
bbae91
     return block_done;
bbae91
 }
bbae91
@@ -2056,7 +2090,7 @@ local block_state deflate_slow(s, flush)
bbae91
         FLUSH_BLOCK(s, 1);
bbae91
         return finish_done;
bbae91
     }
bbae91
-    if (s->last_lit)
bbae91
+    if (s->sym_next)
bbae91
         FLUSH_BLOCK(s, 0);
bbae91
     return block_done;
bbae91
 }
bbae91
@@ -2131,7 +2165,7 @@ local block_state deflate_rle(s, flush)
bbae91
         FLUSH_BLOCK(s, 1);
bbae91
         return finish_done;
bbae91
     }
bbae91
-    if (s->last_lit)
bbae91
+    if (s->sym_next)
bbae91
         FLUSH_BLOCK(s, 0);
bbae91
     return block_done;
bbae91
 }
bbae91
@@ -2170,7 +2204,7 @@ local block_state deflate_huff(s, flush)
bbae91
         FLUSH_BLOCK(s, 1);
bbae91
         return finish_done;
bbae91
     }
bbae91
-    if (s->last_lit)
bbae91
+    if (s->sym_next)
bbae91
         FLUSH_BLOCK(s, 0);
bbae91
     return block_done;
bbae91
 }
bbae91
diff --git a/deflate.h b/deflate.h
bbae91
index 23ecdd3..d4cf1a9 100644
bbae91
--- a/deflate.h
bbae91
+++ b/deflate.h
bbae91
@@ -217,7 +217,7 @@ typedef struct internal_state {
bbae91
     /* Depth of each subtree used as tie breaker for trees of equal frequency
bbae91
      */
bbae91
 
bbae91
-    uchf *l_buf;          /* buffer for literals or lengths */
bbae91
+    uchf *sym_buf;        /* buffer for distances and literals/lengths */
bbae91
 
bbae91
     uInt  lit_bufsize;
bbae91
     /* Size of match buffer for literals/lengths.  There are 4 reasons for
bbae91
@@ -239,13 +239,8 @@ typedef struct internal_state {
bbae91
      *   - I can't count above 4
bbae91
      */
bbae91
 
bbae91
-    uInt last_lit;      /* running index in l_buf */
bbae91
-
bbae91
-    ushf *d_buf;
bbae91
-    /* Buffer for distances. To simplify the code, d_buf and l_buf have
bbae91
-     * the same number of elements. To use different lengths, an extra flag
bbae91
-     * array would be necessary.
bbae91
-     */
bbae91
+    uInt sym_next;      /* running index in sym_buf */
bbae91
+    uInt sym_end;       /* symbol table full when sym_next reaches this */
bbae91
 
bbae91
     ulg opt_len;        /* bit length of current block with optimal trees */
bbae91
     ulg static_len;     /* bit length of current block with static trees */
bbae91
@@ -325,20 +320,22 @@ void ZLIB_INTERNAL _tr_stored_block OF((deflate_state *s, charf *buf,
bbae91
 
bbae91
 # define _tr_tally_lit(s, c, flush) \
bbae91
   { uch cc = (c); \
bbae91
-    s->d_buf[s->last_lit] = 0; \
bbae91
-    s->l_buf[s->last_lit++] = cc; \
bbae91
+    s->sym_buf[s->sym_next++] = 0; \
bbae91
+    s->sym_buf[s->sym_next++] = 0; \
bbae91
+    s->sym_buf[s->sym_next++] = cc; \
bbae91
     s->dyn_ltree[cc].Freq++; \
bbae91
-    flush = (s->last_lit == s->lit_bufsize-1); \
bbae91
+    flush = (s->sym_next == s->sym_end); \
bbae91
    }
bbae91
 # define _tr_tally_dist(s, distance, length, flush) \
bbae91
   { uch len = (uch)(length); \
bbae91
     ush dist = (ush)(distance); \
bbae91
-    s->d_buf[s->last_lit] = dist; \
bbae91
-    s->l_buf[s->last_lit++] = len; \
bbae91
+    s->sym_buf[s->sym_next++] = dist; \
bbae91
+    s->sym_buf[s->sym_next++] = dist >> 8; \
bbae91
+    s->sym_buf[s->sym_next++] = len; \
bbae91
     dist--; \
bbae91
     s->dyn_ltree[_length_code[len]+LITERALS+1].Freq++; \
bbae91
     s->dyn_dtree[d_code(dist)].Freq++; \
bbae91
-    flush = (s->last_lit == s->lit_bufsize-1); \
bbae91
+    flush = (s->sym_next == s->sym_end); \
bbae91
   }
bbae91
 #else
bbae91
 # define _tr_tally_lit(s, c, flush) flush = _tr_tally(s, 0, c)
bbae91
diff --git a/trees.c b/trees.c
bbae91
index 4f4a650..decaeb7 100644
bbae91
--- a/trees.c
bbae91
+++ b/trees.c
bbae91
@@ -416,7 +416,7 @@ local void init_block(s)
bbae91
 
bbae91
     s->dyn_ltree[END_BLOCK].Freq = 1;
bbae91
     s->opt_len = s->static_len = 0L;
bbae91
-    s->last_lit = s->matches = 0;
bbae91
+    s->sym_next = s->matches = 0;
bbae91
 }
bbae91
 
bbae91
 #define SMALLEST 1
bbae91
@@ -948,7 +948,7 @@ void ZLIB_INTERNAL _tr_flush_block(s, buf, stored_len, last)
bbae91
 
bbae91
         Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ",
bbae91
                 opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len,
bbae91
-                s->last_lit));
bbae91
+                s->sym_next / 3));
bbae91
 
bbae91
         if (static_lenb <= opt_lenb) opt_lenb = static_lenb;
bbae91
 
bbae91
@@ -1017,8 +1017,9 @@ int ZLIB_INTERNAL _tr_tally (s, dist, lc)
bbae91
     unsigned dist;  /* distance of matched string */
bbae91
     unsigned lc;    /* match length-MIN_MATCH or unmatched char (if dist==0) */
bbae91
 {
bbae91
-    s->d_buf[s->last_lit] = (ush)dist;
bbae91
-    s->l_buf[s->last_lit++] = (uch)lc;
bbae91
+    s->sym_buf[s->sym_next++] = dist;
bbae91
+    s->sym_buf[s->sym_next++] = dist >> 8;
bbae91
+    s->sym_buf[s->sym_next++] = lc;
bbae91
     if (dist == 0) {
bbae91
         /* lc is the unmatched char */
bbae91
         s->dyn_ltree[lc].Freq++;
bbae91
@@ -1033,30 +1034,7 @@ int ZLIB_INTERNAL _tr_tally (s, dist, lc)
bbae91
         s->dyn_ltree[_length_code[lc]+LITERALS+1].Freq++;
bbae91
         s->dyn_dtree[d_code(dist)].Freq++;
bbae91
     }
bbae91
-
bbae91
-#ifdef TRUNCATE_BLOCK
bbae91
-    /* Try to guess if it is profitable to stop the current block here */
bbae91
-    if ((s->last_lit & 0x1fff) == 0 && s->level > 2) {
bbae91
-        /* Compute an upper bound for the compressed length */
bbae91
-        ulg out_length = (ulg)s->last_lit*8L;
bbae91
-        ulg in_length = (ulg)((long)s->strstart - s->block_start);
bbae91
-        int dcode;
bbae91
-        for (dcode = 0; dcode < D_CODES; dcode++) {
bbae91
-            out_length += (ulg)s->dyn_dtree[dcode].Freq *
bbae91
-                (5L+extra_dbits[dcode]);
bbae91
-        }
bbae91
-        out_length >>= 3;
bbae91
-        Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ",
bbae91
-               s->last_lit, in_length, out_length,
bbae91
-               100L - out_length*100L/in_length));
bbae91
-        if (s->matches < s->last_lit/2 && out_length < in_length/2) return 1;
bbae91
-    }
bbae91
-#endif
bbae91
-    return (s->last_lit == s->lit_bufsize-1);
bbae91
-    /* We avoid equality with lit_bufsize because of wraparound at 64K
bbae91
-     * on 16 bit machines and because stored blocks are restricted to
bbae91
-     * 64K-1 bytes.
bbae91
-     */
bbae91
+    return (s->sym_next == s->sym_end);
bbae91
 }
bbae91
 
bbae91
 /* ===========================================================================
bbae91
@@ -1069,13 +1047,14 @@ local void compress_block(s, ltree, dtree)
bbae91
 {
bbae91
     unsigned dist;      /* distance of matched string */
bbae91
     int lc;             /* match length or unmatched char (if dist == 0) */
bbae91
-    unsigned lx = 0;    /* running index in l_buf */
bbae91
+    unsigned sx = 0;    /* running index in sym_buf */
bbae91
     unsigned code;      /* the code to send */
bbae91
     int extra;          /* number of extra bits to send */
bbae91
 
bbae91
-    if (s->last_lit != 0) do {
bbae91
-        dist = s->d_buf[lx];
bbae91
-        lc = s->l_buf[lx++];
bbae91
+    if (s->sym_next != 0) do {
bbae91
+        dist = s->sym_buf[sx++] & 0xff;
bbae91
+        dist += (unsigned)(s->sym_buf[sx++] & 0xff) << 8;
bbae91
+        lc = s->sym_buf[sx++];
bbae91
         if (dist == 0) {
bbae91
             send_code(s, lc, ltree); /* send a literal byte */
bbae91
             Tracecv(isgraph(lc), (stderr," '%c' ", lc));
bbae91
@@ -1100,11 +1079,10 @@ local void compress_block(s, ltree, dtree)
bbae91
             }
bbae91
         } /* literal or match pair ? */
bbae91
 
bbae91
-        /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */
bbae91
-        Assert((uInt)(s->pending) < s->lit_bufsize + 2*lx,
bbae91
-               "pendingBuf overflow");
bbae91
+        /* Check that the overlay between pending_buf and sym_buf is ok: */
bbae91
+        Assert(s->pending < s->lit_bufsize + sx, "pendingBuf overflow");
bbae91
 
bbae91
-    } while (lx < s->last_lit);
bbae91
+    } while (sx < s->sym_next);
bbae91
 
bbae91
     send_code(s, END_BLOCK, ltree);
bbae91
 }
bbae91
-- 
bbae91
2.34.1
bbae91