diff --git a/SOURCES/mod_http2-1.15.7-CVE-2020-11993.patch b/SOURCES/mod_http2-1.15.7-CVE-2020-11993.patch new file mode 100644 index 0000000..a0ffc08 --- /dev/null +++ b/SOURCES/mod_http2-1.15.7-CVE-2020-11993.patch @@ -0,0 +1,119 @@ +diff --git a/mod_http2/h2_mplx.c b/mod_http2/h2_mplx.c +index c3d590d..33ea45e 100644 +--- a/mod_http2/h2_mplx.c ++++ b/mod_http2/h2_mplx.c +@@ -56,7 +56,7 @@ typedef struct { + apr_size_t count; + } stream_iter_ctx; + +-static apr_status_t mplx_be_happy(h2_mplx *m); ++static apr_status_t mplx_be_happy(h2_mplx *m, h2_task *task); + static apr_status_t mplx_be_annoyed(h2_mplx *m); + + apr_status_t h2_mplx_child_init(apr_pool_t *pool, server_rec *s) +@@ -526,10 +526,10 @@ static apr_status_t out_open(h2_mplx *m, int stream_id, h2_bucket_beam *beam) + stream->output = beam; + + if (APLOGctrace2(m->c)) { +- h2_beam_log(beam, m->c, APLOG_TRACE2, "out_open"); ++ h2_beam_log(beam, stream->task->c, APLOG_TRACE2, "out_open"); + } + else { +- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, ++ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->task->c, + "h2_mplx(%s): out open", stream->task->id); + } + +@@ -579,10 +579,10 @@ static apr_status_t out_close(h2_mplx *m, h2_task *task) + return APR_ECONNABORTED; + } + +- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, m->c, ++ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, task->c, + "h2_mplx(%s): close", task->id); + status = h2_beam_close(task->output.beam); +- h2_beam_log(task->output.beam, m->c, APLOG_TRACE2, "out_close"); ++ h2_beam_log(task->output.beam, task->c, APLOG_TRACE2, "out_close"); + output_consumed_signal(m, task); + check_data_for(m, stream, 1); + return status; +@@ -782,18 +782,18 @@ static void task_done(h2_mplx *m, h2_task *task) + { + h2_stream *stream; + +- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, ++ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c, + "h2_mplx(%ld): task(%s) done", m->id, task->id); + out_close(m, task); + + task->worker_done = 1; + task->done_at = apr_time_now(); +- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c, ++ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, task->c, + "h2_mplx(%s): request done, %f ms elapsed", task->id, + (task->done_at - task->started_at) / 1000.0); + + if (task->c && !task->c->aborted && task->started_at > m->last_mood_change) { +- mplx_be_happy(m); ++ mplx_be_happy(m, task); + } + + ap_assert(task->done_done == 0); +@@ -805,13 +805,13 @@ static void task_done(h2_mplx *m, h2_task *task) + /* reset and schedule again */ + h2_task_redo(task); + h2_iq_add(m->q, stream->id, NULL, NULL); +- ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, m->c, ++ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, task->c, + H2_STRM_MSG(stream, "redo, added to q")); + } + else { + /* stream not cleaned up, stay around */ + task->done_done = 1; +- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c, ++ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, task->c, + H2_STRM_MSG(stream, "task_done, stream open")); + if (stream->input) { + h2_beam_leave(stream->input); +@@ -824,7 +824,7 @@ static void task_done(h2_mplx *m, h2_task *task) + else if ((stream = h2_ihash_get(m->shold, task->stream_id)) != NULL) { + /* stream is done, was just waiting for this. */ + task->done_done = 1; +- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c, ++ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, task->c, + H2_STRM_MSG(stream, "task_done, in hold")); + if (stream->input) { + h2_beam_leave(stream->input); +@@ -832,12 +832,12 @@ static void task_done(h2_mplx *m, h2_task *task) + stream_joined(m, stream); + } + else if ((stream = h2_ihash_get(m->spurge, task->stream_id)) != NULL) { +- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, ++ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, task->c, + H2_STRM_LOG(APLOGNO(03517), stream, "already in spurge")); + ap_assert("stream should not be in spurge" == NULL); + } + else { +- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, APLOGNO(03518) ++ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, task->c, APLOGNO(03518) + "h2_mplx(%s): task_done, stream not found", + task->id); + ap_assert("stream should still be available" == NULL); +@@ -963,7 +963,7 @@ static apr_status_t unschedule_slow_tasks(h2_mplx *m) + return rv; + } + +-static apr_status_t mplx_be_happy(h2_mplx *m) ++static apr_status_t mplx_be_happy(h2_mplx *m, h2_task *task) + { + apr_time_t now; + +@@ -975,7 +975,7 @@ static apr_status_t mplx_be_happy(h2_mplx *m) + m->limit_active = H2MIN(m->limit_active * 2, m->max_active); + m->last_mood_change = now; + m->irritations_since = 0; +- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, ++ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c, + "h2_mplx(%ld): mood update, increasing worker limit to %d", + m->id, m->limit_active); + } diff --git a/SOURCES/mod_http2-1.15.7-CVE-2020-9490.patch b/SOURCES/mod_http2-1.15.7-CVE-2020-9490.patch new file mode 100644 index 0000000..e7f50b2 --- /dev/null +++ b/SOURCES/mod_http2-1.15.7-CVE-2020-9490.patch @@ -0,0 +1,391 @@ +From b8a8c5061eada0ce3339b24ba1d587134552bc0c Mon Sep 17 00:00:00 2001 +From: Stefan Eissing +Date: Wed, 29 Jul 2020 14:41:38 +0200 +Subject: [PATCH] * Removing support for abandoned draft of http-wg regarding + cache-digests. + +--- + +diff --git a/mod_http2/h2_push.c b/mod_http2/h2_push.c +index 4a70674..8ae0b49 100644 +--- a/mod_http2/h2_push.c ++++ b/mod_http2/h2_push.c +@@ -464,33 +464,6 @@ apr_array_header_t *h2_push_collect(apr_pool_t *p, const h2_request *req, + return NULL; + } + +-/******************************************************************************* +- * push diary +- * +- * - The push diary keeps track of resources already PUSHed via HTTP/2 on this +- * connection. It records a hash value from the absolute URL of the resource +- * pushed. +- * - Lacking openssl, it uses 'apr_hashfunc_default' for the value +- * - with openssl, it uses SHA256 to calculate the hash value +- * - whatever the method to generate the hash, the diary keeps a maximum of 64 +- * bits per hash, limiting the memory consumption to about +- * H2PushDiarySize * 8 +- * bytes. Entries are sorted by most recently used and oldest entries are +- * forgotten first. +- * - Clients can initialize/replace the push diary by sending a 'Cache-Digest' +- * header. Currently, this is the base64url encoded value of the cache digest +- * as specified in https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/ +- * This draft can be expected to evolve and the definition of the header +- * will be added there and refined. +- * - The cache digest header is a Golomb Coded Set of hash values, but it may +- * limit the amount of bits per hash value even further. For a good description +- * of GCS, read here: +- * http://giovanni.bajo.it/post/47119962313/golomb-coded-sets-smaller-than-bloom-filters +- * - The means that the push diary might be initialized with hash values of much +- * less than 64 bits, leading to more false positives, but smaller digest size. +- ******************************************************************************/ +- +- + #define GCSLOG_LEVEL APLOG_TRACE1 + + typedef struct h2_push_diary_entry { +@@ -618,38 +591,48 @@ static int h2_push_diary_find(h2_push_diary *diary, apr_uint64_t hash) + return -1; + } + +-static h2_push_diary_entry *move_to_last(h2_push_diary *diary, apr_size_t idx) ++static void move_to_last(h2_push_diary *diary, apr_size_t idx) + { + h2_push_diary_entry *entries = (h2_push_diary_entry*)diary->entries->elts; + h2_push_diary_entry e; +- apr_size_t lastidx = (apr_size_t)diary->entries->nelts; ++ int lastidx; + ++ /* Move an existing entry to the last place */ ++ if (diary->entries->nelts <= 0) ++ return; ++ + /* move entry[idx] to the end */ +- if (idx+1 < lastidx) { ++ lastidx = diary->entries->nelts - 1; ++ if (idx < lastidx) { + e = entries[idx]; +- memmove(entries+idx, entries+idx+1, sizeof(e) * (lastidx - idx)); ++ memmove(entries+idx, entries+idx+1, sizeof(h2_push_diary_entry) * (lastidx - idx)); + entries[lastidx] = e; + } +- return &entries[lastidx]; + } + +-static void h2_push_diary_append(h2_push_diary *diary, h2_push_diary_entry *e) ++static void remove_first(h2_push_diary *diary) + { +- h2_push_diary_entry *ne; ++ h2_push_diary_entry *entries = (h2_push_diary_entry*)diary->entries->elts; ++ int lastidx; + +- if (diary->entries->nelts < diary->N) { +- /* append a new diary entry at the end */ +- APR_ARRAY_PUSH(diary->entries, h2_push_diary_entry) = *e; +- ne = &APR_ARRAY_IDX(diary->entries, diary->entries->nelts-1, h2_push_diary_entry); ++ /* move remaining entries to index 0 */ ++ lastidx = diary->entries->nelts - 1; ++ if (lastidx > 0) { ++ --diary->entries->nelts; ++ memmove(entries, entries+1, sizeof(h2_push_diary_entry) * diary->entries->nelts); + } +- else { +- /* replace content with new digest. keeps memory usage constant once diary is full */ +- ne = move_to_last(diary, 0); +- *ne = *e; ++} ++ ++static void h2_push_diary_append(h2_push_diary *diary, h2_push_diary_entry *e) ++{ ++ while (diary->entries->nelts >= diary->N) { ++ remove_first(diary); + } ++ /* append a new diary entry at the end */ ++ APR_ARRAY_PUSH(diary->entries, h2_push_diary_entry) = *e; + /* Intentional no APLOGNO */ + ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, diary->entries->pool, +- "push_diary_append: %"APR_UINT64_T_HEX_FMT, ne->hash); ++ "push_diary_append: %"APR_UINT64_T_HEX_FMT, e->hash); + } + + apr_array_header_t *h2_push_diary_update(h2_session *session, apr_array_header_t *pushes) +@@ -692,30 +675,12 @@ apr_array_header_t *h2_push_collect_update(h2_stream *stream, + const struct h2_request *req, + const struct h2_headers *res) + { +- h2_session *session = stream->session; +- const char *cache_digest = apr_table_get(req->headers, "Cache-Digest"); + apr_array_header_t *pushes; +- apr_status_t status; + +- if (cache_digest && session->push_diary) { +- status = h2_push_diary_digest64_set(session->push_diary, req->authority, +- cache_digest, stream->pool); +- if (status != APR_SUCCESS) { +- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, +- H2_SSSN_LOG(APLOGNO(03057), session, +- "push diary set from Cache-Digest: %s"), cache_digest); +- } +- } + pushes = h2_push_collect(stream->pool, req, stream->push_policy, res); + return h2_push_diary_update(stream->session, pushes); + } + +-static apr_int32_t h2_log2inv(unsigned char log2) +-{ +- return log2? (1 << log2) : 1; +-} +- +- + typedef struct { + h2_push_diary *diary; + unsigned char log2p; +@@ -830,11 +795,6 @@ apr_status_t h2_push_diary_digest_get(h2_push_diary *diary, apr_pool_t *pool, + apr_size_t hash_count; + + nelts = diary->entries->nelts; +- +- if ((apr_uint32_t)nelts > APR_UINT32_MAX) { +- /* should not happen */ +- return APR_ENOTIMPL; +- } + N = ceil_power_of_2(nelts); + log2n = h2_log2(N); + +@@ -896,166 +856,3 @@ apr_status_t h2_push_diary_digest_get(h2_push_diary *diary, apr_pool_t *pool, + return APR_SUCCESS; + } + +-typedef struct { +- h2_push_diary *diary; +- apr_pool_t *pool; +- unsigned char log2p; +- const unsigned char *data; +- apr_size_t datalen; +- apr_size_t offset; +- unsigned int bit; +- apr_uint64_t last_val; +-} gset_decoder; +- +-static int gset_decode_next_bit(gset_decoder *decoder) +-{ +- if (++decoder->bit >= 8) { +- if (++decoder->offset >= decoder->datalen) { +- return -1; +- } +- decoder->bit = 0; +- } +- return (decoder->data[decoder->offset] & cbit_mask[decoder->bit])? 1 : 0; +-} +- +-static apr_status_t gset_decode_next(gset_decoder *decoder, apr_uint64_t *phash) +-{ +- apr_uint64_t flex = 0, fixed = 0, delta; +- int i; +- +- /* read 1 bits until we encounter 0, then read log2n(diary-P) bits. +- * On a malformed bit-string, this will not fail, but produce results +- * which are pbly too large. Luckily, the diary will modulo the hash. +- */ +- while (1) { +- int bit = gset_decode_next_bit(decoder); +- if (bit == -1) { +- return APR_EINVAL; +- } +- if (!bit) { +- break; +- } +- ++flex; +- } +- +- for (i = 0; i < decoder->log2p; ++i) { +- int bit = gset_decode_next_bit(decoder); +- if (bit == -1) { +- return APR_EINVAL; +- } +- fixed = (fixed << 1) | bit; +- } +- +- delta = (flex << decoder->log2p) | fixed; +- *phash = delta + decoder->last_val; +- decoder->last_val = *phash; +- +- /* Intentional no APLOGNO */ +- ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, decoder->pool, +- "h2_push_diary_digest_dec: val=%"APR_UINT64_T_HEX_FMT", delta=%" +- APR_UINT64_T_HEX_FMT", flex=%d, fixed=%"APR_UINT64_T_HEX_FMT, +- *phash, delta, (int)flex, fixed); +- +- return APR_SUCCESS; +-} +- +-/** +- * Initialize the push diary by a cache digest as described in +- * https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/ +- * . +- * @param diary the diary to set the digest into +- * @param data the binary cache digest +- * @param len the length of the cache digest +- * @return APR_EINVAL if digest was not successfully parsed +- */ +-apr_status_t h2_push_diary_digest_set(h2_push_diary *diary, const char *authority, +- const char *data, apr_size_t len) +-{ +- gset_decoder decoder; +- unsigned char log2n, log2p; +- int N, i; +- apr_pool_t *pool = diary->entries->pool; +- h2_push_diary_entry e; +- apr_status_t status = APR_SUCCESS; +- +- if (len < 2) { +- /* at least this should be there */ +- return APR_EINVAL; +- } +- log2n = data[0]; +- log2p = data[1]; +- diary->mask_bits = log2n + log2p; +- if (diary->mask_bits > 64) { +- /* cannot handle */ +- return APR_ENOTIMPL; +- } +- +- /* whatever is in the digest, it replaces the diary entries */ +- apr_array_clear(diary->entries); +- if (!authority || !strcmp("*", authority)) { +- diary->authority = NULL; +- } +- else if (!diary->authority || strcmp(diary->authority, authority)) { +- diary->authority = apr_pstrdup(diary->entries->pool, authority); +- } +- +- N = h2_log2inv(log2n + log2p); +- +- decoder.diary = diary; +- decoder.pool = pool; +- decoder.log2p = log2p; +- decoder.data = (const unsigned char*)data; +- decoder.datalen = len; +- decoder.offset = 1; +- decoder.bit = 8; +- decoder.last_val = 0; +- +- diary->N = N; +- /* Determine effective N we use for storage */ +- if (!N) { +- /* a totally empty cache digest. someone tells us that she has no +- * entries in the cache at all. Use our own preferences for N+mask +- */ +- diary->N = diary->NMax; +- return APR_SUCCESS; +- } +- else if (N > diary->NMax) { +- /* Store not more than diary is configured to hold. We open us up +- * to DOS attacks otherwise. */ +- diary->N = diary->NMax; +- } +- +- /* Intentional no APLOGNO */ +- ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool, +- "h2_push_diary_digest_set: N=%d, log2n=%d, " +- "diary->mask_bits=%d, dec.log2p=%d", +- (int)diary->N, (int)log2n, diary->mask_bits, +- (int)decoder.log2p); +- +- for (i = 0; i < diary->N; ++i) { +- if (gset_decode_next(&decoder, &e.hash) != APR_SUCCESS) { +- /* the data may have less than N values */ +- break; +- } +- h2_push_diary_append(diary, &e); +- } +- +- /* Intentional no APLOGNO */ +- ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool, +- "h2_push_diary_digest_set: diary now with %d entries, mask_bits=%d", +- (int)diary->entries->nelts, diary->mask_bits); +- return status; +-} +- +-apr_status_t h2_push_diary_digest64_set(h2_push_diary *diary, const char *authority, +- const char *data64url, apr_pool_t *pool) +-{ +- const char *data; +- apr_size_t len = h2_util_base64url_decode(&data, data64url, pool); +- /* Intentional no APLOGNO */ +- ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool, +- "h2_push_diary_digest64_set: digest=%s, dlen=%d", +- data64url, (int)len); +- return h2_push_diary_digest_set(diary, authority, data, len); +-} +- +diff --git a/mod_http2/h2_push.h b/mod_http2/h2_push.h +index 0533853..5dc189f 100644 +--- a/mod_http2/h2_push.h ++++ b/mod_http2/h2_push.h +@@ -35,6 +35,44 @@ typedef enum { + H2_PUSH_DIGEST_SHA256 + } h2_push_digest_type; + ++/******************************************************************************* ++ * push diary ++ * ++ * - The push diary keeps track of resources already PUSHed via HTTP/2 on this ++ * connection. It records a hash value from the absolute URL of the resource ++ * pushed. ++ * - Lacking openssl, ++ * - with openssl, it uses SHA256 to calculate the hash value, otherwise it ++ * falls back to apr_hashfunc_default() ++ * - whatever the method to generate the hash, the diary keeps a maximum of 64 ++ * bits per hash, limiting the memory consumption to about ++ * H2PushDiarySize * 8 ++ * bytes. Entries are sorted by most recently used and oldest entries are ++ * forgotten first. ++ * - While useful by itself to avoid duplicated PUSHes on the same connection, ++ * the original idea was that clients provided a 'Cache-Digest' header with ++ * the values of *their own* cached resources. This was described in ++ * ++ * and some subsequent revisions that tweaked values but kept the overall idea. ++ * - The draft was abandoned by the IETF http-wg, as support from major clients, ++ * e.g. browsers, was lacking for various reasons. ++ * - For these reasons, mod_h2 abandoned its support for client supplied values ++ * but keeps the diary. It seems to provide value for applications using PUSH, ++ * is configurable in size and defaults to a very moderate amount of memory ++ * used. ++ * - The cache digest header is a Golomb Coded Set of hash values, but it may ++ * limit the amount of bits per hash value even further. For a good description ++ * of GCS, read here: ++ * ++ ******************************************************************************/ ++ ++ ++/* ++ * The push diary is based on the abandoned draft ++ * ++ * that describes how to use golomb filters. ++ */ ++ + typedef struct h2_push_diary h2_push_diary; + + typedef void h2_push_digest_calc(h2_push_diary *diary, apr_uint64_t *phash, h2_push *push); +@@ -101,20 +139,4 @@ apr_status_t h2_push_diary_digest_get(h2_push_diary *diary, apr_pool_t *p, + int maxP, const char *authority, + const char **pdata, apr_size_t *plen); + +-/** +- * Initialize the push diary by a cache digest as described in +- * https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/ +- * . +- * @param diary the diary to set the digest into +- * @param authority the authority to set the data for +- * @param data the binary cache digest +- * @param len the length of the cache digest +- * @return APR_EINVAL if digest was not successfully parsed +- */ +-apr_status_t h2_push_diary_digest_set(h2_push_diary *diary, const char *authority, +- const char *data, apr_size_t len); +- +-apr_status_t h2_push_diary_digest64_set(h2_push_diary *diary, const char *authority, +- const char *data64url, apr_pool_t *pool); +- + #endif /* defined(__mod_h2__h2_push__) */ diff --git a/SPECS/mod_http2.spec b/SPECS/mod_http2.spec index 7a0d7eb..aa3cfc3 100644 --- a/SPECS/mod_http2.spec +++ b/SPECS/mod_http2.spec @@ -3,12 +3,14 @@ Name: mod_http2 Version: 1.15.7 -Release: 1%{?dist} +Release: 3%{?dist} Summary: module implementing HTTP/2 for Apache 2 Group: System Environment/Daemons License: ASL 2.0 URL: https://icing.github.io/mod_h2/ Source0: https://github.com/icing/mod_h2/releases/download/v%{version}/mod_http2-%{version}.tar.gz +Patch1: mod_http2-1.15.7-CVE-2020-9490.patch +Patch2: mod_http2-1.15.7-CVE-2020-11993.patch BuildRequires: pkgconfig, httpd-devel >= 2.4.20, libnghttp2-devel >= 1.7.0, openssl-devel >= 1.0.2 Requires: httpd-mmn = %{_httpd_mmn} Conflicts: httpd < 2.4.25-8 @@ -20,6 +22,8 @@ top of libnghttp2 for httpd 2.4 servers. %prep %setup -q +%patch1 -p1 -b .CVE-2020-9490 +%patch2 -p1 -b .CVE-2020-11993 %build %configure @@ -46,6 +50,14 @@ make check %{_httpd_moddir}/mod_proxy_http2.so %changelog +* Fri Oct 30 2020 Lubos Uhliarik - 1.15.7-3 +- Resolves: #1869077 - CVE-2020-11993 httpd:2.4/mod_http2: httpd: + mod_http2 concurrent pool usage + +* Mon Aug 17 2020 Lubos Uhliarik - 1.15.7-2 +- Resolves: #1869073 - CVE-2020-9490 httpd:2.4/mod_http2: httpd: + Push diary crash on specifically crafted HTTP/2 header + * Tue Apr 14 2020 Lubos Uhliarik - 1.15.7-1 - new version 1.15.7 - Resolves: #1814236 - RFE: mod_http2 rebase