diff --git a/.gitignore b/.gitignore index 6e3f15e..cfbe234 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,2 @@ +SOURCES/apache-poweredby.png SOURCES/httpd-2.4.37.tar.bz2 diff --git a/.httpd.metadata b/.httpd.metadata index e540f7e..06980e0 100644 --- a/.httpd.metadata +++ b/.httpd.metadata @@ -1 +1,2 @@ +3a7449d6cff00e5ccb3ed8571f34c0528555d38f SOURCES/apache-poweredby.png 4a38471de821288b0300148016f2b03dfee8adf2 SOURCES/httpd-2.4.37.tar.bz2 diff --git a/SOURCES/httpd-2.4.37-CVE-2020-35452.patch b/SOURCES/httpd-2.4.37-CVE-2020-35452.patch new file mode 100644 index 0000000..998c1e5 --- /dev/null +++ b/SOURCES/httpd-2.4.37-CVE-2020-35452.patch @@ -0,0 +1,21 @@ +diff --git a/modules/aaa/mod_auth_digest.c b/modules/aaa/mod_auth_digest.c +index b760941..0825b1b 100644 +--- a/modules/aaa/mod_auth_digest.c ++++ b/modules/aaa/mod_auth_digest.c +@@ -1422,9 +1422,14 @@ static int check_nonce(request_rec *r, digest_header_rec *resp, + time_rec nonce_time; + char tmp, hash[NONCE_HASH_LEN+1]; + +- if (strlen(resp->nonce) != NONCE_LEN) { ++ /* Since the time part of the nonce is a base64 encoding of an ++ * apr_time_t (8 bytes), it should end with a '=', fail early otherwise. ++ */ ++ if (strlen(resp->nonce) != NONCE_LEN ++ || resp->nonce[NONCE_TIME_LEN - 1] != '=') { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01775) +- "invalid nonce %s received - length is not %d", ++ "invalid nonce '%s' received - length is not %d " ++ "or time encoding is incorrect", + resp->nonce, NONCE_LEN); + note_digest_auth_failure(r, conf, resp, 1); + return HTTP_UNAUTHORIZED; diff --git a/SOURCES/httpd-2.4.37-CVE-2021-26690.patch b/SOURCES/httpd-2.4.37-CVE-2021-26690.patch new file mode 100644 index 0000000..f606576 --- /dev/null +++ b/SOURCES/httpd-2.4.37-CVE-2021-26690.patch @@ -0,0 +1,14 @@ +diff --git a/modules/session/mod_session.c b/modules/session/mod_session.c +index 7ee477c..049255d 100644 +--- a/modules/session/mod_session.c ++++ b/modules/session/mod_session.c +@@ -404,8 +404,8 @@ static apr_status_t session_identity_decode(request_rec * r, session_rec * z) + char *plast = NULL; + const char *psep = "="; + char *key = apr_strtok(pair, psep, &plast); +- char *val = apr_strtok(NULL, psep, &plast); + if (key && *key) { ++ char *val = apr_strtok(NULL, sep, &plast); + if (!val || !*val) { + apr_table_unset(z->entries, key); + } diff --git a/SOURCES/httpd-2.4.37-CVE-2021-26691.patch b/SOURCES/httpd-2.4.37-CVE-2021-26691.patch new file mode 100644 index 0000000..786aea2 --- /dev/null +++ b/SOURCES/httpd-2.4.37-CVE-2021-26691.patch @@ -0,0 +1,13 @@ +diff --git a/modules/session/mod_session.c b/modules/session/mod_session.c +index 049255d..af70f6b 100644 +--- a/modules/session/mod_session.c ++++ b/modules/session/mod_session.c +@@ -317,7 +317,7 @@ static apr_status_t ap_session_set(request_rec * r, session_rec * z, + static int identity_count(void *v, const char *key, const char *val) + { + int *count = v; +- *count += strlen(key) * 3 + strlen(val) * 3 + 1; ++ *count += strlen(key) * 3 + strlen(val) * 3 + 2; + return 1; + } + diff --git a/SOURCES/httpd-2.4.37-CVE-2021-30641.patch b/SOURCES/httpd-2.4.37-CVE-2021-30641.patch new file mode 100644 index 0000000..5a34afb --- /dev/null +++ b/SOURCES/httpd-2.4.37-CVE-2021-30641.patch @@ -0,0 +1,44 @@ +diff --git a/server/request.c b/server/request.c +index d5c558a..18625af 100644 +--- a/server/request.c ++++ b/server/request.c +@@ -1419,7 +1419,20 @@ AP_DECLARE(int) ap_location_walk(request_rec *r) + + cache = prep_walk_cache(AP_NOTE_LOCATION_WALK, r); + cached = (cache->cached != NULL); +- entry_uri = r->uri; ++ ++ /* ++ * When merge_slashes is set to AP_CORE_CONFIG_OFF the slashes in r->uri ++ * have not been merged. But for Location walks we always go with merged ++ * slashes no matter what merge_slashes is set to. ++ */ ++ if (sconf->merge_slashes != AP_CORE_CONFIG_OFF) { ++ entry_uri = r->uri; ++ } ++ else { ++ char *uri = apr_pstrdup(r->pool, r->uri); ++ ap_no2slash(uri); ++ entry_uri = uri; ++ } + + /* If we have an cache->cached location that matches r->uri, + * and the vhost's list of locations hasn't changed, we can skip +@@ -1486,7 +1499,7 @@ AP_DECLARE(int) ap_location_walk(request_rec *r) + pmatch = apr_palloc(rxpool, nmatch*sizeof(ap_regmatch_t)); + } + +- if (ap_regexec(entry_core->r, entry_uri, nmatch, pmatch, 0)) { ++ if (ap_regexec(entry_core->r, r->uri, nmatch, pmatch, 0)) { + continue; + } + +@@ -1496,7 +1509,7 @@ AP_DECLARE(int) ap_location_walk(request_rec *r) + apr_table_setn(r->subprocess_env, + ((const char **)entry_core->refs->elts)[i], + apr_pstrndup(r->pool, +- entry_uri + pmatch[i].rm_so, ++ r->uri + pmatch[i].rm_so, + pmatch[i].rm_eo - pmatch[i].rm_so)); + } + } diff --git a/SOURCES/httpd-2.4.37-CVE-2021-33193.patch b/SOURCES/httpd-2.4.37-CVE-2021-33193.patch new file mode 100644 index 0000000..cec1c96 --- /dev/null +++ b/SOURCES/httpd-2.4.37-CVE-2021-33193.patch @@ -0,0 +1,706 @@ +diff --git a/include/http_core.h b/include/http_core.h +index 8e10988..3ba8069 100644 +--- a/include/http_core.h ++++ b/include/http_core.h +@@ -741,6 +741,7 @@ typedef struct { + #define AP_HTTP_METHODS_REGISTERED 2 + char http_methods; + unsigned int merge_slashes; ++ unsigned int strict_host_check; + } core_server_config; + + /* for AddOutputFiltersByType in core.c */ +@@ -769,6 +770,11 @@ AP_DECLARE(void) ap_set_server_protocol(server_rec* s, const char* proto); + typedef struct core_output_filter_ctx core_output_filter_ctx_t; + typedef struct core_filter_ctx core_ctx_t; + ++struct core_filter_ctx { ++ apr_bucket_brigade *b; ++ apr_bucket_brigade *tmpbb; ++}; ++ + typedef struct core_net_rec { + /** Connection to the client */ + apr_socket_t *client_socket; +diff --git a/include/http_protocol.h b/include/http_protocol.h +index 11c7b2d..e7abdd9 100644 +--- a/include/http_protocol.h ++++ b/include/http_protocol.h +@@ -53,6 +53,13 @@ AP_DECLARE_DATA extern ap_filter_rec_t *ap_old_write_func; + * or control the ones that eventually do. + */ + ++/** ++ * Read an empty request and set reasonable defaults. ++ * @param c The current connection ++ * @return The new request_rec ++ */ ++AP_DECLARE(request_rec *) ap_create_request(conn_rec *c); ++ + /** + * Read a request and fill in the fields. + * @param c The current connection +@@ -60,6 +67,20 @@ AP_DECLARE_DATA extern ap_filter_rec_t *ap_old_write_func; + */ + request_rec *ap_read_request(conn_rec *c); + ++/** ++ * Parse and validate the request line. ++ * @param r The current request ++ * @return 1 on success, 0 on failure ++ */ ++AP_DECLARE(int) ap_parse_request_line(request_rec *r); ++ ++/** ++ * Validate the request header and select vhost. ++ * @param r The current request ++ * @return 1 on success, 0 on failure ++ */ ++AP_DECLARE(int) ap_check_request_header(request_rec *r); ++ + /** + * Read the mime-encoded headers. + * @param r The current request +diff --git a/include/http_vhost.h b/include/http_vhost.h +index 473c9c7..d2d9c97 100644 +--- a/include/http_vhost.h ++++ b/include/http_vhost.h +@@ -99,6 +99,19 @@ AP_DECLARE(void) ap_update_vhost_given_ip(conn_rec *conn); + */ + AP_DECLARE(void) ap_update_vhost_from_headers(request_rec *r); + ++/** ++ * Updates r->server with the best name-based virtual host match, within ++ * the chain of matching virtual hosts selected by ap_update_vhost_given_ip. ++ * @param r The current request ++ * @param require_match 1 to return an HTTP error if the requested hostname is ++ * not explicitly matched to a VirtualHost. ++ * @return return HTTP_OK unless require_match was specified and the requested ++ * hostname did not match any ServerName, ServerAlias, or VirtualHost ++ * address-spec. ++ */ ++AP_DECLARE(int) ap_update_vhost_from_headers_ex(request_rec *r, int require_match); ++ ++ + /** + * Match the host in the header with the hostname of the server for this + * request. +diff --git a/server/core.c b/server/core.c +index 84e80f2..23abf57 100644 +--- a/server/core.c ++++ b/server/core.c +@@ -498,6 +498,8 @@ static void *create_core_server_config(apr_pool_t *a, server_rec *s) + conf->protocols = apr_array_make(a, 5, sizeof(const char *)); + conf->protocols_honor_order = -1; + ++ conf->strict_host_check= AP_CORE_CONFIG_UNSET; ++ + return (void *)conf; + } + +@@ -565,6 +567,12 @@ static void *merge_core_server_configs(apr_pool_t *p, void *basev, void *virtv) + + AP_CORE_MERGE_FLAG(merge_slashes, conf, base, virt); + ++ conf->strict_host_check = (virt->strict_host_check != AP_CORE_CONFIG_UNSET) ++ ? virt->strict_host_check ++ : base->strict_host_check; ++ ++ AP_CORE_MERGE_FLAG(strict_host_check, conf, base, virt); ++ + return conf; + } + +@@ -4546,7 +4554,10 @@ AP_INIT_TAKE2("CGIVar", set_cgi_var, NULL, OR_FILEINFO, + AP_INIT_FLAG("QualifyRedirectURL", set_qualify_redirect_url, NULL, OR_FILEINFO, + "Controls whether HTTP authorization headers, normally hidden, will " + "be passed to scripts"), +- ++AP_INIT_FLAG("StrictHostCheck", set_core_server_flag, ++ (void *)APR_OFFSETOF(core_server_config, strict_host_check), ++ RSRC_CONF, ++ "Controls whether a hostname match is required"), + AP_INIT_TAKE1("ForceType", ap_set_string_slot_lower, + (void *)APR_OFFSETOF(core_dir_config, mime_type), OR_FILEINFO, + "a mime type that overrides other configured type"), +@@ -5581,4 +5592,3 @@ AP_DECLARE_MODULE(core) = { + core_cmds, /* command apr_table_t */ + register_hooks /* register hooks */ + }; +- +diff --git a/server/core_filters.c b/server/core_filters.c +index a6c2bd6..e08801f 100644 +--- a/server/core_filters.c ++++ b/server/core_filters.c +@@ -84,11 +84,6 @@ struct core_output_filter_ctx { + apr_size_t bytes_written; + }; + +-struct core_filter_ctx { +- apr_bucket_brigade *b; +- apr_bucket_brigade *tmpbb; +-}; +- + + apr_status_t ap_core_input_filter(ap_filter_t *f, apr_bucket_brigade *b, + ap_input_mode_t mode, apr_read_type_e block, +diff --git a/server/protocol.c b/server/protocol.c +index 8d1fdd2..430d91e 100644 +--- a/server/protocol.c ++++ b/server/protocol.c +@@ -609,8 +609,15 @@ AP_CORE_DECLARE(void) ap_parse_uri(request_rec *r, const char *uri) + } + + r->args = r->parsed_uri.query; +- r->uri = r->parsed_uri.path ? r->parsed_uri.path +- : apr_pstrdup(r->pool, "/"); ++ if (r->parsed_uri.path) { ++ r->uri = r->parsed_uri.path; ++ } ++ else if (r->method_number == M_OPTIONS) { ++ r->uri = apr_pstrdup(r->pool, "*"); ++ } ++ else { ++ r->uri = apr_pstrdup(r->pool, "/"); ++ } + + #if defined(OS2) || defined(WIN32) + /* Handle path translations for OS/2 and plug security hole. +@@ -645,13 +652,6 @@ static int field_name_len(const char *field) + + static int read_request_line(request_rec *r, apr_bucket_brigade *bb) + { +- enum { +- rrl_none, rrl_badmethod, rrl_badwhitespace, rrl_excesswhitespace, +- rrl_missinguri, rrl_baduri, rrl_badprotocol, rrl_trailingtext, +- rrl_badmethod09, rrl_reject09 +- } deferred_error = rrl_none; +- char *ll; +- char *uri; + apr_size_t len; + int num_blank_lines = DEFAULT_LIMIT_BLANK_LINES; + core_server_config *conf = ap_get_core_module_config(r->server->module_config); +@@ -711,6 +711,20 @@ static int read_request_line(request_rec *r, apr_bucket_brigade *bb) + } + + r->request_time = apr_time_now(); ++ return 1; ++} ++ ++AP_DECLARE(int) ap_parse_request_line(request_rec *r) ++{ ++ core_server_config *conf = ap_get_core_module_config(r->server->module_config); ++ int strict = (conf->http_conformance != AP_HTTP_CONFORMANCE_UNSAFE); ++ enum { ++ rrl_none, rrl_badmethod, rrl_badwhitespace, rrl_excesswhitespace, ++ rrl_missinguri, rrl_baduri, rrl_badprotocol, rrl_trailingtext, ++ rrl_badmethod09, rrl_reject09 ++ } deferred_error = rrl_none; ++ apr_size_t len = 0; ++ char *uri, *ll; + + r->method = r->the_request; + +@@ -742,7 +756,6 @@ static int read_request_line(request_rec *r, apr_bucket_brigade *bb) + if (deferred_error == rrl_none) + deferred_error = rrl_missinguri; + r->protocol = uri = ""; +- len = 0; + goto rrl_done; + } + else if (strict && ll[0] && apr_isspace(ll[1]) +@@ -773,7 +786,6 @@ static int read_request_line(request_rec *r, apr_bucket_brigade *bb) + /* Verify URI terminated with a single SP, or mark as specific error */ + if (!ll) { + r->protocol = ""; +- len = 0; + goto rrl_done; + } + else if (strict && ll[0] && apr_isspace(ll[1]) +@@ -866,6 +878,14 @@ rrl_done: + r->header_only = 1; + + ap_parse_uri(r, uri); ++ if (r->status == HTTP_OK ++ && (r->parsed_uri.path != NULL) ++ && (r->parsed_uri.path[0] != '/') ++ && (r->method_number != M_OPTIONS ++ || strcmp(r->parsed_uri.path, "*") != 0)) { ++ /* Invalid request-target per RFC 7230 section 5.3 */ ++ r->status = HTTP_BAD_REQUEST; ++ } + + /* With the request understood, we can consider HTTP/0.9 specific errors */ + if (r->proto_num == HTTP_VERSION(0, 9) && deferred_error == rrl_none) { +@@ -973,6 +993,79 @@ rrl_failed: + return 0; + } + ++AP_DECLARE(int) ap_check_request_header(request_rec *r) ++{ ++ core_server_config *conf; ++ int strict_host_check; ++ const char *expect; ++ int access_status; ++ ++ conf = ap_get_core_module_config(r->server->module_config); ++ ++ /* update what we think the virtual host is based on the headers we've ++ * now read. may update status. ++ */ ++ strict_host_check = (conf->strict_host_check == AP_CORE_CONFIG_ON); ++ access_status = ap_update_vhost_from_headers_ex(r, strict_host_check); ++ if (strict_host_check && access_status != HTTP_OK) { ++ if (r->server == ap_server_conf) { ++ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(10156) ++ "Requested hostname '%s' did not match any ServerName/ServerAlias " ++ "in the global server configuration ", r->hostname); ++ } ++ else { ++ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(10157) ++ "Requested hostname '%s' did not match any ServerName/ServerAlias " ++ "in the matching virtual host (default vhost for " ++ "current connection is %s:%u)", ++ r->hostname, r->server->defn_name, r->server->defn_line_number); ++ } ++ r->status = access_status; ++ } ++ if (r->status != HTTP_OK) { ++ return 0; ++ } ++ ++ if ((!r->hostname && (r->proto_num >= HTTP_VERSION(1, 1))) ++ || ((r->proto_num == HTTP_VERSION(1, 1)) ++ && !apr_table_get(r->headers_in, "Host"))) { ++ /* ++ * Client sent us an HTTP/1.1 or later request without telling us the ++ * hostname, either with a full URL or a Host: header. We therefore ++ * need to (as per the 1.1 spec) send an error. As a special case, ++ * HTTP/1.1 mentions twice (S9, S14.23) that a request MUST contain ++ * a Host: header, and the server MUST respond with 400 if it doesn't. ++ */ ++ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00569) ++ "client sent HTTP/1.1 request without hostname " ++ "(see RFC2616 section 14.23): %s", r->uri); ++ r->status = HTTP_BAD_REQUEST; ++ return 0; ++ } ++ ++ if (((expect = apr_table_get(r->headers_in, "Expect")) != NULL) ++ && (expect[0] != '\0')) { ++ /* ++ * The Expect header field was added to HTTP/1.1 after RFC 2068 ++ * as a means to signal when a 100 response is desired and, ++ * unfortunately, to signal a poor man's mandatory extension that ++ * the server must understand or return 417 Expectation Failed. ++ */ ++ if (ap_cstr_casecmp(expect, "100-continue") == 0) { ++ r->expecting_100 = 1; ++ } ++ else { ++ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(00570) ++ "client sent an unrecognized expectation value " ++ "of Expect: %s", expect); ++ r->status = HTTP_EXPECTATION_FAILED; ++ return 0; ++ } ++ } ++ ++ return 1; ++} ++ + static int table_do_fn_check_lengths(void *r_, const char *key, + const char *value) + { +@@ -1256,16 +1349,10 @@ AP_DECLARE(void) ap_get_mime_headers(request_rec *r) + apr_brigade_destroy(tmp_bb); + } + +-request_rec *ap_read_request(conn_rec *conn) ++AP_DECLARE(request_rec *) ap_create_request(conn_rec *conn) + { + request_rec *r; + apr_pool_t *p; +- const char *expect; +- int access_status; +- apr_bucket_brigade *tmp_bb; +- apr_socket_t *csd; +- apr_interval_time_t cur_timeout; +- + + apr_pool_create(&p, conn->pool); + apr_pool_tag(p, "request"); +@@ -1304,6 +1391,7 @@ request_rec *ap_read_request(conn_rec *conn) + r->read_body = REQUEST_NO_BODY; + + r->status = HTTP_OK; /* Until further notice */ ++ r->header_only = 0; + r->the_request = NULL; + + /* Begin by presuming any module can make its own path_info assumptions, +@@ -1314,12 +1402,33 @@ request_rec *ap_read_request(conn_rec *conn) + r->useragent_addr = conn->client_addr; + r->useragent_ip = conn->client_ip; + ++ return r; ++} ++ ++/* Apply the server's timeout/config to the connection/request. */ ++static void apply_server_config(request_rec *r) ++{ ++ apr_socket_t *csd; ++ ++ csd = ap_get_conn_socket(r->connection); ++ apr_socket_timeout_set(csd, r->server->timeout); ++ ++ r->per_dir_config = r->server->lookup_defaults; ++} ++ ++request_rec *ap_read_request(conn_rec *conn) ++{ ++ int access_status; ++ apr_bucket_brigade *tmp_bb; ++ ++ request_rec *r = ap_create_request(conn); + tmp_bb = apr_brigade_create(r->pool, r->connection->bucket_alloc); + + ap_run_pre_read_request(r, conn); + + /* Get the request... */ +- if (!read_request_line(r, tmp_bb)) { ++ if (!read_request_line(r, tmp_bb) || !ap_parse_request_line(r)) { ++ apr_brigade_cleanup(tmp_bb); + switch (r->status) { + case HTTP_REQUEST_URI_TOO_LARGE: + case HTTP_BAD_REQUEST: +@@ -1335,49 +1444,38 @@ request_rec *ap_read_request(conn_rec *conn) + "request failed: malformed request line"); + } + access_status = r->status; +- r->status = HTTP_OK; +- ap_die(access_status, r); +- ap_update_child_status(conn->sbh, SERVER_BUSY_LOG, r); +- ap_run_log_transaction(r); +- r = NULL; +- apr_brigade_destroy(tmp_bb); +- goto traceout; ++ goto die_unusable_input; ++ + case HTTP_REQUEST_TIME_OUT: ++ /* Just log, no further action on this connection. */ + ap_update_child_status(conn->sbh, SERVER_BUSY_LOG, NULL); + if (!r->connection->keepalives) + ap_run_log_transaction(r); +- apr_brigade_destroy(tmp_bb); +- goto traceout; +- default: +- apr_brigade_destroy(tmp_bb); +- r = NULL; +- goto traceout; ++ break; + } ++ /* Not worth dying with. */ ++ conn->keepalive = AP_CONN_CLOSE; ++ apr_pool_destroy(r->pool); ++ goto ignore; + } ++ apr_brigade_cleanup(tmp_bb); + + /* We may have been in keep_alive_timeout mode, so toggle back + * to the normal timeout mode as we fetch the header lines, + * as necessary. + */ +- csd = ap_get_conn_socket(conn); +- apr_socket_timeout_get(csd, &cur_timeout); +- if (cur_timeout != conn->base_server->timeout) { +- apr_socket_timeout_set(csd, conn->base_server->timeout); +- cur_timeout = conn->base_server->timeout; +- } ++ apply_server_config(r); + + if (!r->assbackwards) { + const char *tenc; + + ap_get_mime_headers_core(r, tmp_bb); ++ apr_brigade_cleanup(tmp_bb); + if (r->status != HTTP_OK) { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00567) + "request failed: error reading the headers"); +- ap_send_error_response(r, 0); +- ap_update_child_status(conn->sbh, SERVER_BUSY_LOG, r); +- ap_run_log_transaction(r); +- apr_brigade_destroy(tmp_bb); +- goto traceout; ++ access_status = r->status; ++ goto die_unusable_input; + } + + tenc = apr_table_get(r->headers_in, "Transfer-Encoding"); +@@ -1393,13 +1491,8 @@ request_rec *ap_read_request(conn_rec *conn) + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02539) + "client sent unknown Transfer-Encoding " + "(%s): %s", tenc, r->uri); +- r->status = HTTP_BAD_REQUEST; +- conn->keepalive = AP_CONN_CLOSE; +- ap_send_error_response(r, 0); +- ap_update_child_status(conn->sbh, SERVER_BUSY_LOG, r); +- ap_run_log_transaction(r); +- apr_brigade_destroy(tmp_bb); +- goto traceout; ++ access_status = HTTP_BAD_REQUEST; ++ goto die_unusable_input; + } + + /* http://tools.ietf.org/html/draft-ietf-httpbis-p1-messaging-23 +@@ -1412,88 +1505,81 @@ request_rec *ap_read_request(conn_rec *conn) + } + } + +- apr_brigade_destroy(tmp_bb); +- +- /* update what we think the virtual host is based on the headers we've +- * now read. may update status. +- */ +- ap_update_vhost_from_headers(r); +- access_status = r->status; +- +- /* Toggle to the Host:-based vhost's timeout mode to fetch the +- * request body and send the response body, if needed. +- */ +- if (cur_timeout != r->server->timeout) { +- apr_socket_timeout_set(csd, r->server->timeout); +- cur_timeout = r->server->timeout; +- } +- +- /* we may have switched to another server */ +- r->per_dir_config = r->server->lookup_defaults; +- +- if ((!r->hostname && (r->proto_num >= HTTP_VERSION(1, 1))) +- || ((r->proto_num == HTTP_VERSION(1, 1)) +- && !apr_table_get(r->headers_in, "Host"))) { +- /* +- * Client sent us an HTTP/1.1 or later request without telling us the +- * hostname, either with a full URL or a Host: header. We therefore +- * need to (as per the 1.1 spec) send an error. As a special case, +- * HTTP/1.1 mentions twice (S9, S14.23) that a request MUST contain +- * a Host: header, and the server MUST respond with 400 if it doesn't. +- */ +- access_status = HTTP_BAD_REQUEST; +- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00569) +- "client sent HTTP/1.1 request without hostname " +- "(see RFC2616 section 14.23): %s", r->uri); +- } +- + /* + * Add the HTTP_IN filter here to ensure that ap_discard_request_body + * called by ap_die and by ap_send_error_response works correctly on + * status codes that do not cause the connection to be dropped and + * in situations where the connection should be kept alive. + */ +- + ap_add_input_filter_handle(ap_http_input_filter_handle, + NULL, r, r->connection); + +- if (access_status != HTTP_OK +- || (access_status = ap_run_post_read_request(r))) { +- ap_die(access_status, r); +- ap_update_child_status(conn->sbh, SERVER_BUSY_LOG, r); +- ap_run_log_transaction(r); +- r = NULL; +- goto traceout; ++ /* Validate Host/Expect headers and select vhost. */ ++ if (!ap_check_request_header(r)) { ++ /* we may have switched to another server still */ ++ apply_server_config(r); ++ access_status = r->status; ++ goto die_before_hooks; + } + +- if (((expect = apr_table_get(r->headers_in, "Expect")) != NULL) +- && (expect[0] != '\0')) { +- /* +- * The Expect header field was added to HTTP/1.1 after RFC 2068 +- * as a means to signal when a 100 response is desired and, +- * unfortunately, to signal a poor man's mandatory extension that +- * the server must understand or return 417 Expectation Failed. +- */ +- if (strcasecmp(expect, "100-continue") == 0) { +- r->expecting_100 = 1; +- } +- else { +- r->status = HTTP_EXPECTATION_FAILED; +- ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(00570) +- "client sent an unrecognized expectation value of " +- "Expect: %s", expect); +- ap_send_error_response(r, 0); +- ap_update_child_status(conn->sbh, SERVER_BUSY_LOG, r); +- ap_run_log_transaction(r); +- goto traceout; +- } ++ /* we may have switched to another server */ ++ apply_server_config(r); ++ ++ if ((access_status = ap_run_post_read_request(r))) { ++ goto die; + } + +- AP_READ_REQUEST_SUCCESS((uintptr_t)r, (char *)r->method, (char *)r->uri, (char *)r->server->defn_name, r->status); ++ AP_READ_REQUEST_SUCCESS((uintptr_t)r, (char *)r->method, ++ (char *)r->uri, (char *)r->server->defn_name, ++ r->status); ++ + return r; +- traceout: ++ ++ /* Everything falls through on failure */ ++ ++die_unusable_input: ++ /* Input filters are in an undeterminate state, cleanup (including ++ * CORE_IN's socket) such that any further attempt to read is EOF. ++ */ ++ { ++ ap_filter_t *f = conn->input_filters; ++ while (f) { ++ if (f->frec == ap_core_input_filter_handle) { ++ core_net_rec *net = f->ctx; ++ apr_brigade_cleanup(net->in_ctx->b); ++ break; ++ } ++ ap_remove_input_filter(f); ++ f = f->next; ++ } ++ conn->input_filters = r->input_filters = f; ++ conn->keepalive = AP_CONN_CLOSE; ++ } ++ ++die_before_hooks: ++ /* First call to ap_die() (non recursive) */ ++ r->status = HTTP_OK; ++ ++die: ++ ap_die(access_status, r); ++ ++ /* ap_die() sent the response through the output filters, we must now ++ * end the request with an EOR bucket for stream/pipeline accounting. ++ */ ++ { ++ apr_bucket_brigade *eor_bb; ++ eor_bb = apr_brigade_create(conn->pool, conn->bucket_alloc); ++ APR_BRIGADE_INSERT_TAIL(eor_bb, ++ ap_bucket_eor_create(conn->bucket_alloc, r)); ++ ap_pass_brigade(conn->output_filters, eor_bb); ++ apr_brigade_cleanup(eor_bb); ++ } ++ ++ignore: ++ r = NULL; ++ + AP_READ_REQUEST_FAILURE((uintptr_t)r); +- return r; ++ return NULL; + } + + /* if a request with a body creates a subrequest, remove original request's +diff --git a/server/vhost.c b/server/vhost.c +index b23b2dd..6e233b5 100644 +--- a/server/vhost.c ++++ b/server/vhost.c +@@ -34,6 +34,7 @@ + #include "http_vhost.h" + #include "http_protocol.h" + #include "http_core.h" ++#include "http_main.h" + + #if APR_HAVE_ARPA_INET_H + #include +@@ -973,7 +974,13 @@ AP_DECLARE(int) ap_matches_request_vhost(request_rec *r, const char *host, + } + + +-static void check_hostalias(request_rec *r) ++/* ++ * Updates r->server from ServerName/ServerAlias. Per the interaction ++ * of ip and name-based vhosts, it only looks in the best match from the ++ * connection-level ip-based matching. ++ * Returns HTTP_BAD_REQUEST if there was no match. ++ */ ++static int update_server_from_aliases(request_rec *r) + { + /* + * Even if the request has a Host: header containing a port we ignore +@@ -1050,11 +1057,18 @@ static void check_hostalias(request_rec *r) + goto found; + } + +- return; ++ if (!r->connection->vhost_lookup_data) { ++ if (matches_aliases(r->server, host)) { ++ s = r->server; ++ goto found; ++ } ++ } ++ return HTTP_BAD_REQUEST; + + found: + /* s is the first matching server, we're done */ + r->server = s; ++ return HTTP_OK; + } + + +@@ -1071,7 +1085,7 @@ static void check_serverpath(request_rec *r) + * This is in conjunction with the ServerPath code in http_core, so we + * get the right host attached to a non- Host-sending request. + * +- * See the comment in check_hostalias about how each vhost can be ++ * See the comment in update_server_from_aliases about how each vhost can be + * listed multiple times. + */ + +@@ -1134,11 +1148,17 @@ static APR_INLINE const char *construct_host_header(request_rec *r, + } + + AP_DECLARE(void) ap_update_vhost_from_headers(request_rec *r) ++{ ++ ap_update_vhost_from_headers_ex(r, 0); ++} ++ ++AP_DECLARE(int) ap_update_vhost_from_headers_ex(request_rec *r, int require_match) + { + core_server_config *conf = ap_get_core_module_config(r->server->module_config); + const char *host_header = apr_table_get(r->headers_in, "Host"); + int is_v6literal = 0; + int have_hostname_from_url = 0; ++ int rc = HTTP_OK; + + if (r->hostname) { + /* +@@ -1151,8 +1171,8 @@ AP_DECLARE(void) ap_update_vhost_from_headers(request_rec *r) + else if (host_header != NULL) { + is_v6literal = fix_hostname(r, host_header, conf->http_conformance); + } +- if (r->status != HTTP_OK) +- return; ++ if (!require_match && r->status != HTTP_OK) ++ return HTTP_OK; + + if (conf->http_conformance != AP_HTTP_CONFORMANCE_UNSAFE) { + /* +@@ -1173,10 +1193,16 @@ AP_DECLARE(void) ap_update_vhost_from_headers(request_rec *r) + /* check if we tucked away a name_chain */ + if (r->connection->vhost_lookup_data) { + if (r->hostname) +- check_hostalias(r); ++ rc = update_server_from_aliases(r); + else + check_serverpath(r); + } ++ else if (require_match && r->hostname) { ++ /* check the base server config */ ++ rc = update_server_from_aliases(r); ++ } ++ ++ return rc; + } + + /** diff --git a/SOURCES/httpd-2.4.37-CVE-2021-34798.patch b/SOURCES/httpd-2.4.37-CVE-2021-34798.patch new file mode 100644 index 0000000..4a03341 --- /dev/null +++ b/SOURCES/httpd-2.4.37-CVE-2021-34798.patch @@ -0,0 +1,13 @@ +diff --git a/server/scoreboard.c b/server/scoreboard.c +index 23e3d70..7b01bdf 100644 +--- a/server/scoreboard.c ++++ b/server/scoreboard.c +@@ -376,7 +376,7 @@ AP_DECLARE(void) ap_increment_counts(ap_sb_handle_t *sb, request_rec *r) + if (pfn_ap_logio_get_last_bytes != NULL) { + bytes = pfn_ap_logio_get_last_bytes(r->connection); + } +- else if (r->method_number == M_GET && r->method[0] == 'H') { ++ else if (r->method_number == M_GET && r->method && r->method[0] == 'H') { + bytes = 0; + } + else { diff --git a/SOURCES/httpd-2.4.37-CVE-2021-36160.patch b/SOURCES/httpd-2.4.37-CVE-2021-36160.patch new file mode 100644 index 0000000..f67a391 --- /dev/null +++ b/SOURCES/httpd-2.4.37-CVE-2021-36160.patch @@ -0,0 +1,45 @@ +diff --git a/modules/proxy/mod_proxy_uwsgi.c b/modules/proxy/mod_proxy_uwsgi.c +index 792d35e..9dcbed1 100644 +--- a/modules/proxy/mod_proxy_uwsgi.c ++++ b/modules/proxy/mod_proxy_uwsgi.c +@@ -453,11 +453,8 @@ static int uwsgi_handler(request_rec *r, proxy_worker * worker, + const char *proxyname, apr_port_t proxyport) + { + int status; +- int delta = 0; +- int decode_status; + proxy_conn_rec *backend = NULL; + apr_pool_t *p = r->pool; +- size_t w_len; + char server_portstr[32]; + char *u_path_info; + apr_uri_t *uri; +@@ -469,23 +466,14 @@ static int uwsgi_handler(request_rec *r, proxy_worker * worker, + + uri = apr_palloc(r->pool, sizeof(*uri)); + +- /* ADD PATH_INFO */ +-#if AP_MODULE_MAGIC_AT_LEAST(20111130,0) +- w_len = strlen(worker->s->name); +-#else +- w_len = strlen(worker->name); +-#endif +- u_path_info = r->filename + 6 + w_len; +- if (u_path_info[0] != '/') { +- delta = 1; +- } +- decode_status = ap_unescape_url(url + w_len - delta); +- if (decode_status) { ++ /* ADD PATH_INFO (unescaped) */ ++ u_path_info = ap_strchr(url + sizeof(UWSGI_SCHEME) + 2, '/'); ++ if (!u_path_info || ap_unescape_url(u_path_info) != OK) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10100) +- "unable to decode uri: %s", url + w_len - delta); ++ "unable to decode uwsgi uri: %s", url); + return HTTP_INTERNAL_SERVER_ERROR; + } +- apr_table_add(r->subprocess_env, "PATH_INFO", url + w_len - delta); ++ apr_table_add(r->subprocess_env, "PATH_INFO", u_path_info); + + + /* Create space for state information */ diff --git a/SOURCES/httpd-2.4.37-CVE-2021-39275.patch b/SOURCES/httpd-2.4.37-CVE-2021-39275.patch new file mode 100644 index 0000000..590268f --- /dev/null +++ b/SOURCES/httpd-2.4.37-CVE-2021-39275.patch @@ -0,0 +1,21 @@ +diff --git a/server/util.c b/server/util.c +index e0c558c..2a5dd04 100644 +--- a/server/util.c ++++ b/server/util.c +@@ -2460,13 +2460,12 @@ AP_DECLARE(char *) ap_escape_quotes(apr_pool_t *p, const char *instring) + * in front of every " that doesn't already have one. + */ + while (*inchr != '\0') { +- if ((*inchr == '\\') && (inchr[1] != '\0')) { +- *outchr++ = *inchr++; +- *outchr++ = *inchr++; +- } + if (*inchr == '"') { + *outchr++ = '\\'; + } ++ if ((*inchr == '\\') && (inchr[1] != '\0')) { ++ *outchr++ = *inchr++; ++ } + if (*inchr != '\0') { + *outchr++ = *inchr++; + } diff --git a/SOURCES/httpd-2.4.37-CVE-2021-40438.patch b/SOURCES/httpd-2.4.37-CVE-2021-40438.patch new file mode 100644 index 0000000..39758c7 --- /dev/null +++ b/SOURCES/httpd-2.4.37-CVE-2021-40438.patch @@ -0,0 +1,126 @@ +diff --git a/modules/mappers/mod_rewrite.c b/modules/mappers/mod_rewrite.c +index fb897a9..38dbb24 100644 +--- a/modules/mappers/mod_rewrite.c ++++ b/modules/mappers/mod_rewrite.c +@@ -619,6 +619,13 @@ static unsigned is_absolute_uri(char *uri, int *supportsqs) + return 6; + } + break; ++ ++ case 'u': ++ case 'U': ++ if (!ap_cstr_casecmpn(uri, "nix:", 4)) { /* unix: */ ++ *sqs = 1; ++ return (uri[4] == '/' && uri[5] == '/') ? 7 : 5; ++ } + } + + return 0; +diff --git a/modules/proxy/mod_proxy.c b/modules/proxy/mod_proxy.c +index f383996..6a9ef55 100644 +--- a/modules/proxy/mod_proxy.c ++++ b/modules/proxy/mod_proxy.c +@@ -1717,7 +1717,8 @@ PROXY_DECLARE(const char *) ap_proxy_de_socketfy(apr_pool_t *p, const char *url) + * the UDS path... ignore it + */ + if (!strncasecmp(url, "unix:", 5) && +- ((ptr = ap_strchr_c(url, '|')) != NULL)) { ++ ((ptr = ap_strchr_c(url + 5, '|')) != NULL)) { ++ + /* move past the 'unix:...|' UDS path info */ + const char *ret, *c; + +diff --git a/modules/proxy/proxy_util.c b/modules/proxy/proxy_util.c +index 7714b6c..3dd570c 100644 +--- a/modules/proxy/proxy_util.c ++++ b/modules/proxy/proxy_util.c +@@ -2084,33 +2084,45 @@ static int ap_proxy_retry_worker(const char *proxy_function, proxy_worker *worke + * were passed a UDS url (eg: from mod_proxy) and adjust uds_path + * as required. + */ +-static void fix_uds_filename(request_rec *r, char **url) ++static int fix_uds_filename(request_rec *r, char **url) + { +- char *ptr, *ptr2; +- if (!r || !r->filename) return; ++ char *uds_url = r->filename + 6, *origin_url; + + if (!strncmp(r->filename, "proxy:", 6) && +- (ptr2 = ap_strcasestr(r->filename, "unix:")) && +- (ptr = ap_strchr(ptr2, '|'))) { ++ !ap_cstr_casecmpn(uds_url, "unix:", 5) && ++ (origin_url = ap_strchr(uds_url + 5, '|'))) { ++ char *uds_path = NULL; ++ apr_size_t url_len; + apr_uri_t urisock; + apr_status_t rv; +- *ptr = '\0'; +- rv = apr_uri_parse(r->pool, ptr2, &urisock); +- if (rv == APR_SUCCESS) { +- char *rurl = ptr+1; +- char *sockpath = ap_runtime_dir_relative(r->pool, urisock.path); +- apr_table_setn(r->notes, "uds_path", sockpath); +- *url = apr_pstrdup(r->pool, rurl); /* so we get the scheme for the uds */ +- /* r->filename starts w/ "proxy:", so add after that */ +- memmove(r->filename+6, rurl, strlen(rurl)+1); +- ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, +- "*: rewrite of url due to UDS(%s): %s (%s)", +- sockpath, *url, r->filename); +- } +- else { +- *ptr = '|'; +- } +- } ++ ++ *origin_url = '\0'; ++ rv = apr_uri_parse(r->pool, uds_url, &urisock); ++ *origin_url++ = '|'; ++ ++ if (rv == APR_SUCCESS && urisock.path && (!urisock.hostname ++ || !urisock.hostname[0])) { ++ uds_path = ap_runtime_dir_relative(r->pool, urisock.path); ++ } ++ ++ if (!uds_path) { ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10292) ++ "Invalid proxy UDS filename (%s)", r->filename); ++ return 0; ++ } ++ apr_table_setn(r->notes, "uds_path", uds_path); ++ ++ /* Remove the UDS path from *url and r->filename */ ++ url_len = strlen(origin_url); ++ *url = apr_pstrmemdup(r->pool, origin_url, url_len); ++ memcpy(uds_url, *url, url_len + 1); ++ ++ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, ++ "*: rewrite of url due to UDS(%s): %s (%s)", ++ uds_path, *url, r->filename); ++ } ++ ++ return 1; + } + + PROXY_DECLARE(int) ap_proxy_pre_request(proxy_worker **worker, +@@ -2128,7 +2140,9 @@ PROXY_DECLARE(int) ap_proxy_pre_request(proxy_worker **worker, + "%s: found worker %s for %s", + (*worker)->s->scheme, (*worker)->s->name, *url); + *balancer = NULL; +- fix_uds_filename(r, url); ++ if (!fix_uds_filename(r, url)) { ++ return HTTP_INTERNAL_SERVER_ERROR; ++ } + access_status = OK; + } + else if (r->proxyreq == PROXYREQ_PROXY) { +@@ -2159,7 +2173,9 @@ PROXY_DECLARE(int) ap_proxy_pre_request(proxy_worker **worker, + * regarding the Connection header in the request. + */ + apr_table_setn(r->subprocess_env, "proxy-nokeepalive", "1"); +- fix_uds_filename(r, url); ++ if (!fix_uds_filename(r, url)) { ++ return HTTP_INTERNAL_SERVER_ERROR; ++ } + } + } + } diff --git a/SOURCES/httpd-2.4.37-CVE-2021-44224.patch b/SOURCES/httpd-2.4.37-CVE-2021-44224.patch new file mode 100644 index 0000000..d3633fc --- /dev/null +++ b/SOURCES/httpd-2.4.37-CVE-2021-44224.patch @@ -0,0 +1,315 @@ +diff --git a/include/http_protocol.h b/include/http_protocol.h +index e7abdd9..e1572dc 100644 +--- a/include/http_protocol.h ++++ b/include/http_protocol.h +@@ -96,6 +96,13 @@ AP_DECLARE(void) ap_get_mime_headers(request_rec *r); + AP_DECLARE(void) ap_get_mime_headers_core(request_rec *r, + apr_bucket_brigade *bb); + ++/** ++ * Run post_read_request hook and validate. ++ * @param r The current request ++ * @return OK or HTTP_... ++ */ ++AP_DECLARE(int) ap_post_read_request(request_rec *r); ++ + /* Finish up stuff after a request */ + + /** +diff --git a/modules/http/http_request.c b/modules/http/http_request.c +index 9e7c4db..e873aab 100644 +--- a/modules/http/http_request.c ++++ b/modules/http/http_request.c +@@ -681,7 +681,7 @@ static request_rec *internal_internal_redirect(const char *new_uri, + * to do their thing on internal redirects as well. Perhaps this is a + * misnamed function. + */ +- if ((access_status = ap_run_post_read_request(new))) { ++ if ((access_status = ap_post_read_request(new))) { + ap_die(access_status, new); + return NULL; + } +diff --git a/modules/proxy/mod_proxy.c b/modules/proxy/mod_proxy.c +index 6a9ef55..a6df1b8 100644 +--- a/modules/proxy/mod_proxy.c ++++ b/modules/proxy/mod_proxy.c +@@ -584,11 +584,12 @@ static int proxy_detect(request_rec *r) + + if (conf->req && r->parsed_uri.scheme) { + /* but it might be something vhosted */ +- if (!(r->parsed_uri.hostname +- && !strcasecmp(r->parsed_uri.scheme, ap_http_scheme(r)) +- && ap_matches_request_vhost(r, r->parsed_uri.hostname, +- (apr_port_t)(r->parsed_uri.port_str ? r->parsed_uri.port +- : ap_default_port(r))))) { ++ if (!r->parsed_uri.hostname ++ || ap_cstr_casecmp(r->parsed_uri.scheme, ap_http_scheme(r)) != 0 ++ || !ap_matches_request_vhost(r, r->parsed_uri.hostname, ++ (apr_port_t)(r->parsed_uri.port_str ++ ? r->parsed_uri.port ++ : ap_default_port(r)))) { + r->proxyreq = PROXYREQ_PROXY; + r->uri = r->unparsed_uri; + r->filename = apr_pstrcat(r->pool, "proxy:", r->uri, NULL); +@@ -1750,6 +1751,7 @@ static const char * + struct proxy_alias *new; + char *f = cmd->path; + char *r = NULL; ++ const char *real; + char *word; + apr_table_t *params = apr_table_make(cmd->pool, 5); + const apr_array_header_t *arr; +@@ -1815,6 +1817,10 @@ static const char * + if (r == NULL) { + return "ProxyPass|ProxyPassMatch needs a path when not defined in a location"; + } ++ if (!(real = ap_proxy_de_socketfy(cmd->temp_pool, r))) { ++ return "ProxyPass|ProxyPassMatch uses an invalid \"unix:\" URL"; ++ } ++ + + /* if per directory, save away the single alias */ + if (cmd->path) { +@@ -1831,7 +1837,7 @@ static const char * + } + + new->fake = apr_pstrdup(cmd->pool, f); +- new->real = apr_pstrdup(cmd->pool, ap_proxy_de_socketfy(cmd->pool, r)); ++ new->real = apr_pstrdup(cmd->pool, real); + new->flags = flags; + if (use_regex) { + new->regex = ap_pregcomp(cmd->pool, f, AP_REG_EXTENDED); +@@ -2316,6 +2322,7 @@ static const char *add_member(cmd_parms *cmd, void *dummy, const char *arg) + proxy_worker *worker; + char *path = cmd->path; + char *name = NULL; ++ const char *real; + char *word; + apr_table_t *params = apr_table_make(cmd->pool, 5); + const apr_array_header_t *arr; +@@ -2356,6 +2363,9 @@ static const char *add_member(cmd_parms *cmd, void *dummy, const char *arg) + return "BalancerMember must define balancer name when outside section"; + if (!name) + return "BalancerMember must define remote proxy server"; ++ if (!(real = ap_proxy_de_socketfy(cmd->temp_pool, name))) { ++ return "BalancerMember uses an invalid \"unix:\" URL"; ++ } + + ap_str_tolower(path); /* lowercase scheme://hostname */ + +@@ -2368,7 +2378,7 @@ static const char *add_member(cmd_parms *cmd, void *dummy, const char *arg) + } + + /* Try to find existing worker */ +- worker = ap_proxy_get_worker(cmd->temp_pool, balancer, conf, ap_proxy_de_socketfy(cmd->temp_pool, name)); ++ worker = ap_proxy_get_worker(cmd->temp_pool, balancer, conf, real); + if (!worker) { + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, cmd->server, APLOGNO(01147) + "Defining worker '%s' for balancer '%s'", +@@ -2457,7 +2467,13 @@ static const char * + } + } + else { +- worker = ap_proxy_get_worker(cmd->temp_pool, NULL, conf, ap_proxy_de_socketfy(cmd->temp_pool, name)); ++ const char *real; ++ ++ if (!(real = ap_proxy_de_socketfy(cmd->temp_pool, name))) { ++ return "ProxySet uses an invalid \"unix:\" URL"; ++ } ++ ++ worker = ap_proxy_get_worker(cmd->temp_pool, NULL, conf, real); + if (!worker) { + if (in_proxy_section) { + err = ap_proxy_define_worker(cmd->pool, &worker, NULL, +@@ -2599,8 +2615,14 @@ static const char *proxysection(cmd_parms *cmd, void *mconfig, const char *arg) + } + } + else { ++ const char *real; ++ ++ if (!(real = ap_proxy_de_socketfy(cmd->temp_pool, conf->p))) { ++ return " uses an invalid \"unix:\" URL"; ++ } ++ + worker = ap_proxy_get_worker(cmd->temp_pool, NULL, sconf, +- ap_proxy_de_socketfy(cmd->temp_pool, (char*)conf->p)); ++ real); + if (!worker) { + err = ap_proxy_define_worker(cmd->pool, &worker, NULL, + sconf, conf->p, 0); +diff --git a/modules/proxy/mod_proxy.h b/modules/proxy/mod_proxy.h +index fbbd508..dca6f69 100644 +--- a/modules/proxy/mod_proxy.h ++++ b/modules/proxy/mod_proxy.h +@@ -713,6 +713,8 @@ typedef __declspec(dllimport) const char * + proxy_dir_conf *, const char *); + #endif + ++#define AP_PROXY_WORKER_NO_UDS (1u << 3) ++ + + /* Connection pool API */ + /** +@@ -725,6 +727,24 @@ typedef __declspec(dllimport) const char * + PROXY_DECLARE(char *) ap_proxy_worker_name(apr_pool_t *p, + proxy_worker *worker); + ++ ++/** ++ * Get the worker from proxy configuration, looking for either PREFIXED or ++ * MATCHED or both types of workers according to given mask ++ * @param p memory pool used for finding worker ++ * @param balancer the balancer that the worker belongs to ++ * @param conf current proxy server configuration ++ * @param url url to find the worker from ++ * @param mask bitmask of AP_PROXY_WORKER_IS_* ++ * @return proxy_worker or NULL if not found ++ */ ++PROXY_DECLARE(proxy_worker *) ap_proxy_get_worker_ex(apr_pool_t *p, ++ proxy_balancer *balancer, ++ proxy_server_conf *conf, ++ const char *url, ++ unsigned int mask); ++ ++ + /** + * Get the worker from proxy configuration + * @param p memory pool used for finding worker +@@ -737,6 +757,8 @@ PROXY_DECLARE(proxy_worker *) ap_proxy_get_worker(apr_pool_t *p, + proxy_balancer *balancer, + proxy_server_conf *conf, + const char *url); ++ ++ + /** + * Define and Allocate space for the worker to proxy configuration + * @param p memory pool to allocate worker from +diff --git a/modules/proxy/proxy_util.c b/modules/proxy/proxy_util.c +index 032e0c4..3d5b220 100644 +--- a/modules/proxy/proxy_util.c ++++ b/modules/proxy/proxy_util.c +@@ -1643,10 +1643,11 @@ PROXY_DECLARE(char *) ap_proxy_worker_name(apr_pool_t *p, + return apr_pstrcat(p, "unix:", worker->s->uds_path, "|", worker->s->name, NULL); + } + +-PROXY_DECLARE(proxy_worker *) ap_proxy_get_worker(apr_pool_t *p, +- proxy_balancer *balancer, +- proxy_server_conf *conf, +- const char *url) ++PROXY_DECLARE(proxy_worker *) ap_proxy_get_worker_ex(apr_pool_t *p, ++ proxy_balancer *balancer, ++ proxy_server_conf *conf, ++ const char *url, ++ unsigned int mask) + { + proxy_worker *worker; + proxy_worker *max_worker = NULL; +@@ -1662,7 +1663,12 @@ PROXY_DECLARE(proxy_worker *) ap_proxy_get_worker(apr_pool_t *p, + return NULL; + } + +- url = ap_proxy_de_socketfy(p, url); ++ if (!(mask & AP_PROXY_WORKER_NO_UDS)) { ++ url = ap_proxy_de_socketfy(p, url); ++ if (!url) { ++ return NULL; ++ } ++ } + + c = ap_strchr_c(url, ':'); + if (c == NULL || c[1] != '/' || c[2] != '/' || c[3] == '\0') { +@@ -1727,6 +1733,14 @@ PROXY_DECLARE(proxy_worker *) ap_proxy_get_worker(apr_pool_t *p, + return max_worker; + } + ++PROXY_DECLARE(proxy_worker *) ap_proxy_get_worker(apr_pool_t *p, ++ proxy_balancer *balancer, ++ proxy_server_conf *conf, ++ const char *url) ++{ ++ return ap_proxy_get_worker_ex(p, balancer, conf, url, 0); ++} ++ + /* + * To create a worker from scratch first we define the + * specifics of the worker; this is all local data. +@@ -2134,22 +2148,22 @@ PROXY_DECLARE(int) ap_proxy_pre_request(proxy_worker **worker, + + access_status = proxy_run_pre_request(worker, balancer, r, conf, url); + if (access_status == DECLINED && *balancer == NULL) { +- *worker = ap_proxy_get_worker(r->pool, NULL, conf, *url); ++ const int forward = (r->proxyreq == PROXYREQ_PROXY); ++ *worker = ap_proxy_get_worker_ex(r->pool, NULL, conf, *url, ++ forward ? AP_PROXY_WORKER_NO_UDS : 0); + if (*worker) { + ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, + "%s: found worker %s for %s", + (*worker)->s->scheme, (*worker)->s->name, *url); +- *balancer = NULL; +- if (!fix_uds_filename(r, url)) { ++ if (!forward && !fix_uds_filename(r, url)) { + return HTTP_INTERNAL_SERVER_ERROR; + } + access_status = OK; + } +- else if (r->proxyreq == PROXYREQ_PROXY) { ++ else if (forward) { + if (conf->forward) { + ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, + "*: found forward proxy worker for %s", *url); +- *balancer = NULL; + *worker = conf->forward; + access_status = OK; + /* +@@ -2163,8 +2177,8 @@ PROXY_DECLARE(int) ap_proxy_pre_request(proxy_worker **worker, + else if (r->proxyreq == PROXYREQ_REVERSE) { + if (conf->reverse) { + ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, +- "*: using default reverse proxy worker for %s (no keepalive)", *url); +- *balancer = NULL; ++ "*: using default reverse proxy worker for %s " ++ "(no keepalive)", *url); + *worker = conf->reverse; + access_status = OK; + /* +diff --git a/server/protocol.c b/server/protocol.c +index 430d91e..a2aa081 100644 +--- a/server/protocol.c ++++ b/server/protocol.c +@@ -1525,7 +1525,7 @@ request_rec *ap_read_request(conn_rec *conn) + /* we may have switched to another server */ + apply_server_config(r); + +- if ((access_status = ap_run_post_read_request(r))) { ++ if ((access_status = ap_post_read_request(r))) { + goto die; + } + +@@ -1582,6 +1582,27 @@ ignore: + return NULL; + } + ++AP_DECLARE(int) ap_post_read_request(request_rec *r) ++{ ++ int status; ++ ++ if ((status = ap_run_post_read_request(r))) { ++ return status; ++ } ++ ++ /* Enforce http(s) only scheme for non-forward-proxy requests */ ++ if (!r->proxyreq ++ && r->parsed_uri.scheme ++ && (ap_cstr_casecmpn(r->parsed_uri.scheme, "http", 4) != 0 ++ || (r->parsed_uri.scheme[4] != '\0' ++ && (apr_tolower(r->parsed_uri.scheme[4]) != 's' ++ || r->parsed_uri.scheme[5] != '\0')))) { ++ return HTTP_BAD_REQUEST; ++ } ++ ++ return OK; ++} ++ + /* if a request with a body creates a subrequest, remove original request's + * input headers which pertain to the body which has already been read. + * out-of-line helper function for ap_set_sub_req_protocol. diff --git a/SOURCES/httpd-2.4.37-CVE-2021-44790.patch b/SOURCES/httpd-2.4.37-CVE-2021-44790.patch new file mode 100644 index 0000000..4f244a8 --- /dev/null +++ b/SOURCES/httpd-2.4.37-CVE-2021-44790.patch @@ -0,0 +1,12 @@ +diff --git a/modules/lua/lua_request.c b/modules/lua/lua_request.c +index 77a88b4..1d8be2e 100644 +--- a/modules/lua/lua_request.c ++++ b/modules/lua/lua_request.c +@@ -376,6 +376,7 @@ static int req_parsebody(lua_State *L) + if (end == NULL) break; + key = (char *) apr_pcalloc(r->pool, 256); + filename = (char *) apr_pcalloc(r->pool, 256); ++ if (end - crlf <= 8) break; + vlen = end - crlf - 8; + buffer = (char *) apr_pcalloc(r->pool, vlen+1); + memcpy(buffer, crlf + 4, vlen); diff --git a/SOURCES/httpd-2.4.37-CVE-2022-22720.patch b/SOURCES/httpd-2.4.37-CVE-2022-22720.patch new file mode 100644 index 0000000..e4abca2 --- /dev/null +++ b/SOURCES/httpd-2.4.37-CVE-2022-22720.patch @@ -0,0 +1,154 @@ +diff --git a/modules/http/http_filters.c b/modules/http/http_filters.c +index 9828cdf..6bedcac 100644 +--- a/modules/http/http_filters.c ++++ b/modules/http/http_filters.c +@@ -1605,9 +1605,9 @@ AP_DECLARE(int) ap_map_http_request_error(apr_status_t rv, int status) + */ + AP_DECLARE(int) ap_discard_request_body(request_rec *r) + { ++ int rc = OK; ++ conn_rec *c = r->connection; + apr_bucket_brigade *bb; +- int seen_eos; +- apr_status_t rv; + + /* Sometimes we'll get in a state where the input handling has + * detected an error where we want to drop the connection, so if +@@ -1616,54 +1616,57 @@ AP_DECLARE(int) ap_discard_request_body(request_rec *r) + * + * This function is also a no-op on a subrequest. + */ +- if (r->main || r->connection->keepalive == AP_CONN_CLOSE || +- ap_status_drops_connection(r->status)) { ++ if (r->main || c->keepalive == AP_CONN_CLOSE) { ++ return OK; ++ } ++ if (ap_status_drops_connection(r->status)) { ++ c->keepalive = AP_CONN_CLOSE; + return OK; + } + + bb = apr_brigade_create(r->pool, r->connection->bucket_alloc); +- seen_eos = 0; +- do { +- apr_bucket *bucket; ++ for (;;) { ++ apr_status_t rv; + + rv = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES, + APR_BLOCK_READ, HUGE_STRING_LEN); +- + if (rv != APR_SUCCESS) { +- apr_brigade_destroy(bb); +- return ap_map_http_request_error(rv, HTTP_BAD_REQUEST); ++ rc = ap_map_http_request_error(rv, HTTP_BAD_REQUEST); ++ goto cleanup; + } + +- for (bucket = APR_BRIGADE_FIRST(bb); +- bucket != APR_BRIGADE_SENTINEL(bb); +- bucket = APR_BUCKET_NEXT(bucket)) +- { +- const char *data; +- apr_size_t len; ++ while (!APR_BRIGADE_EMPTY(bb)) { ++ apr_bucket *b = APR_BRIGADE_FIRST(bb); + +- if (APR_BUCKET_IS_EOS(bucket)) { +- seen_eos = 1; +- break; ++ if (APR_BUCKET_IS_EOS(b)) { ++ goto cleanup; + } + +- /* These are metadata buckets. */ +- if (bucket->length == 0) { +- continue; +- } +- +- /* We MUST read because in case we have an unknown-length +- * bucket or one that morphs, we want to exhaust it. ++ /* There is no need to read empty or metadata buckets or ++ * buckets of known length, but we MUST read buckets of ++ * unknown length in order to exhaust them. + */ +- rv = apr_bucket_read(bucket, &data, &len, APR_BLOCK_READ); ++ if (b->length == (apr_size_t)-1) { ++ apr_size_t len; ++ const char *data; ++ ++ rv = apr_bucket_read(b, &data, &len, APR_BLOCK_READ); + if (rv != APR_SUCCESS) { +- apr_brigade_destroy(bb); +- return HTTP_BAD_REQUEST; ++ rc = HTTP_BAD_REQUEST; ++ goto cleanup; + } + } +- apr_brigade_cleanup(bb); +- } while (!seen_eos); + +- return OK; ++ apr_bucket_delete(b); ++ } ++ } ++ ++cleanup: ++ apr_brigade_cleanup(bb); ++ if (rc != OK) { ++ c->keepalive = AP_CONN_CLOSE; ++ } ++ return rc; + } + + /* Here we deal with getting the request message body from the client. +diff --git a/server/protocol.c b/server/protocol.c +index a2aa081..a554970 100644 +--- a/server/protocol.c ++++ b/server/protocol.c +@@ -1666,23 +1666,29 @@ AP_DECLARE(void) ap_set_sub_req_protocol(request_rec *rnew, + rnew->main = (request_rec *) r; + } + +-static void end_output_stream(request_rec *r) ++static void end_output_stream(request_rec *r, int status) + { + conn_rec *c = r->connection; + apr_bucket_brigade *bb; + apr_bucket *b; + + bb = apr_brigade_create(r->pool, c->bucket_alloc); ++ if (status != OK) { ++ b = ap_bucket_error_create(status, NULL, r->pool, c->bucket_alloc); ++ APR_BRIGADE_INSERT_TAIL(bb, b); ++ } + b = apr_bucket_eos_create(c->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(bb, b); ++ + ap_pass_brigade(r->output_filters, bb); ++ apr_brigade_cleanup(bb); + } + + AP_DECLARE(void) ap_finalize_sub_req_protocol(request_rec *sub) + { + /* tell the filter chain there is no more content coming */ + if (!sub->eos_sent) { +- end_output_stream(sub); ++ end_output_stream(sub, OK); + } + } + +@@ -1693,11 +1699,11 @@ AP_DECLARE(void) ap_finalize_sub_req_protocol(request_rec *sub) + */ + AP_DECLARE(void) ap_finalize_request_protocol(request_rec *r) + { +- (void) ap_discard_request_body(r); ++ int status = ap_discard_request_body(r); + + /* tell the filter chain there is no more content coming */ + if (!r->eos_sent) { +- end_output_stream(r); ++ end_output_stream(r, status); + } + } + diff --git a/SOURCES/httpd-2.4.37-hcheck-mem-issues.patch b/SOURCES/httpd-2.4.37-hcheck-mem-issues.patch new file mode 100644 index 0000000..810b9f2 --- /dev/null +++ b/SOURCES/httpd-2.4.37-hcheck-mem-issues.patch @@ -0,0 +1,199 @@ +diff --git a/modules/proxy/mod_proxy_hcheck.c b/modules/proxy/mod_proxy_hcheck.c +index bd89779..d7c0a68 100644 +--- a/modules/proxy/mod_proxy_hcheck.c ++++ b/modules/proxy/mod_proxy_hcheck.c +@@ -33,7 +33,6 @@ module AP_MODULE_DECLARE_DATA proxy_hcheck_module; + #endif + #else + #define HC_USE_THREADS 0 +-typedef void apr_thread_pool_t; + #endif + + typedef struct { +@@ -73,7 +72,7 @@ typedef struct { + proxy_balancer *balancer; + proxy_worker *worker; + proxy_worker *hc; +- apr_time_t now; ++ apr_time_t *now; + } baton_t; + + static void *hc_create_config(apr_pool_t *p, server_rec *s) +@@ -89,7 +88,10 @@ static void *hc_create_config(apr_pool_t *p, server_rec *s) + } + + static ap_watchdog_t *watchdog; +-static int tpsize = HC_THREADPOOL_SIZE; ++#if HC_USE_THREADS ++static apr_thread_pool_t *hctp; ++static int tpsize; ++#endif + + /* + * This serves double duty by not only validating (and creating) +@@ -825,29 +827,28 @@ static void * APR_THREAD_FUNC hc_check(apr_thread_t *thread, void *b) + server_rec *s = baton->ctx->s; + proxy_worker *worker = baton->worker; + proxy_worker *hc = baton->hc; +- apr_time_t now = baton->now; ++ apr_time_t now; + apr_status_t rv; + + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(03256) + "%sHealth checking %s", (thread ? "Threaded " : ""), + worker->s->name); + +- worker->s->updated = now; + if (hc->s->method == TCP) { + rv = hc_check_tcp(baton); + } + else { + rv = hc_check_http(baton); + } ++ ++ now = apr_time_now(); + if (rv == APR_ENOTIMPL) { + ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(03257) + "Somehow tried to use unimplemented hcheck method: %d", + (int)hc->s->method); +- apr_pool_destroy(baton->ptemp); +- return NULL; + } + /* what state are we in ? */ +- if (PROXY_WORKER_IS_HCFAILED(worker)) { ++ else if (PROXY_WORKER_IS_HCFAILED(worker)) { + if (rv == APR_SUCCESS) { + worker->s->pcount += 1; + if (worker->s->pcount >= worker->s->passes) { +@@ -860,7 +861,8 @@ static void * APR_THREAD_FUNC hc_check(apr_thread_t *thread, void *b) + + } + } +- } else { ++ } ++ else { + if (rv != APR_SUCCESS) { + worker->s->error_time = now; + worker->s->fcount += 1; +@@ -873,7 +875,12 @@ static void * APR_THREAD_FUNC hc_check(apr_thread_t *thread, void *b) + } + } + } ++ if (baton->now) { ++ *baton->now = now; ++ } + apr_pool_destroy(baton->ptemp); ++ worker->s->updated = now; ++ + return NULL; + } + +@@ -881,12 +888,10 @@ static apr_status_t hc_watchdog_callback(int state, void *data, + apr_pool_t *pool) + { + apr_status_t rv = APR_SUCCESS; +- apr_time_t now = apr_time_now(); + proxy_balancer *balancer; + sctx_t *ctx = (sctx_t *)data; + server_rec *s = ctx->s; + proxy_server_conf *conf; +- static apr_thread_pool_t *hctp = NULL; + + switch (state) { + case AP_WATCHDOG_STATE_STARTING: +@@ -913,7 +918,6 @@ static apr_status_t hc_watchdog_callback(int state, void *data, + "Skipping apr_thread_pool_create()"); + hctp = NULL; + } +- + #endif + break; + +@@ -929,45 +933,53 @@ static apr_status_t hc_watchdog_callback(int state, void *data, + ctx->s = s; + for (i = 0; i < conf->balancers->nelts; i++, balancer++) { + int n; ++ apr_time_t now; + proxy_worker **workers; + proxy_worker *worker; + /* Have any new balancers or workers been added dynamically? */ + ap_proxy_sync_balancer(balancer, s, conf); + workers = (proxy_worker **)balancer->workers->elts; ++ now = apr_time_now(); + for (n = 0; n < balancer->workers->nelts; n++) { + worker = *workers; + if (!PROXY_WORKER_IS(worker, PROXY_WORKER_STOPPED) && +- (worker->s->method != NONE) && +- (now > worker->s->updated + worker->s->interval)) { ++ (worker->s->method != NONE) && ++ (worker->s->updated != 0) && ++ (now > worker->s->updated + worker->s->interval)) { + baton_t *baton; + apr_pool_t *ptemp; ++ + ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, s, + "Checking %s worker: %s [%d] (%pp)", balancer->s->name, + worker->s->name, worker->s->method, worker); + + if ((rv = hc_init_worker(ctx, worker)) != APR_SUCCESS) { ++ worker->s->updated = now; + return rv; + } +- /* This pool must last the lifetime of the (possible) thread */ ++ worker->s->updated = 0; ++ ++ /* This pool has the lifetime of the check */ + apr_pool_create(&ptemp, ctx->p); + apr_pool_tag(ptemp, "hc_request"); +- baton = apr_palloc(ptemp, sizeof(baton_t)); ++ baton = apr_pcalloc(ptemp, sizeof(baton_t)); + baton->ctx = ctx; +- baton->now = now; + baton->balancer = balancer; + baton->worker = worker; + baton->ptemp = ptemp; + baton->hc = hc_get_hcworker(ctx, worker, ptemp); +- +- if (!hctp) { +- hc_check(NULL, baton); +- } + #if HC_USE_THREADS +- else { +- rv = apr_thread_pool_push(hctp, hc_check, (void *)baton, +- APR_THREAD_TASK_PRIORITY_NORMAL, NULL); ++ if (hctp) { ++ apr_thread_pool_push(hctp, hc_check, (void *)baton, ++ APR_THREAD_TASK_PRIORITY_NORMAL, ++ NULL); + } ++ else + #endif ++ { ++ baton->now = &now; ++ hc_check(NULL, baton); ++ } + } + workers++; + } +@@ -986,9 +998,9 @@ static apr_status_t hc_watchdog_callback(int state, void *data, + ap_log_error(APLOG_MARK, APLOG_INFO, rv, s, APLOGNO(03315) + "apr_thread_pool_destroy() failed"); + } ++ hctp = NULL; + } + #endif +- hctp = NULL; + break; + } + return rv; +@@ -996,7 +1008,10 @@ static apr_status_t hc_watchdog_callback(int state, void *data, + static int hc_pre_config(apr_pool_t *pconf, apr_pool_t *plog, + apr_pool_t *ptemp) + { ++#if HC_USE_THREADS ++ hctp = NULL; + tpsize = HC_THREADPOOL_SIZE; ++#endif + return OK; + } + static int hc_post_config(apr_pool_t *p, apr_pool_t *plog, diff --git a/SOURCES/httpd-2.4.37-r1845768+.patch b/SOURCES/httpd-2.4.37-r1845768+.patch new file mode 100644 index 0000000..a51934f --- /dev/null +++ b/SOURCES/httpd-2.4.37-r1845768+.patch @@ -0,0 +1,48 @@ +diff --git a/modules/ssl/ssl_engine_init.c b/modules/ssl/ssl_engine_init.c +index 70d151e..e4f5fc8 100644 +--- a/modules/ssl/ssl_engine_init.c ++++ b/modules/ssl/ssl_engine_init.c +@@ -1095,7 +1095,9 @@ static apr_status_t ssl_init_ctx_crl(server_rec *s, + /* + * Read a file that optionally contains the server certificate in PEM + * format, possibly followed by a sequence of CA certificates that +- * should be sent to the peer in the SSL Certificate message. ++ * should be sent to the peer in the SSL Certificate message. Returns ++ * 0 on success, otherwise the OpenSSL error stack contents should be ++ * reported. + */ + static int use_certificate_chain( + SSL_CTX *ctx, char *file, int skipfirst, pem_password_cb *cb) +@@ -1128,8 +1130,10 @@ static int use_certificate_chain( + ctx->extra_certs = NULL; + } + #endif ++ + /* create new extra chain by loading the certs */ + n = 0; ++ ERR_clear_error(); + while ((x509 = PEM_read_bio_X509(bio, NULL, cb, NULL)) != NULL) { + if (!SSL_CTX_add_extra_chain_cert(ctx, x509)) { + X509_free(x509); +@@ -1190,6 +1194,7 @@ static apr_status_t ssl_init_ctx_cert_chain(server_rec *s, + if (n < 0) { + ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(01903) + "Failed to configure CA certificate chain!"); ++ ssl_log_ssl_error(SSLLOG_MARK, APLOG_EMERG, s); + return ssl_die(s); + } + +diff --git a/modules/ssl/ssl_util_ocsp.c b/modules/ssl/ssl_util_ocsp.c +index b11a6e9..b66e151 100644 +--- a/modules/ssl/ssl_util_ocsp.c ++++ b/modules/ssl/ssl_util_ocsp.c +@@ -363,7 +363,9 @@ static STACK_OF(X509) *modssl_read_ocsp_certificates(const char *file) + BIO_free(bio); + return NULL; + } ++ + /* create new extra chain by loading the certs */ ++ ERR_clear_error(); + while ((x509 = PEM_read_bio_X509(bio, NULL, NULL, NULL)) != NULL) { + if (!other_certs) { + other_certs = sk_X509_new_null(); diff --git a/SOURCES/httpd-2.4.37-r1862410.patch b/SOURCES/httpd-2.4.37-r1862410.patch new file mode 100644 index 0000000..a21d24d --- /dev/null +++ b/SOURCES/httpd-2.4.37-r1862410.patch @@ -0,0 +1,108 @@ +--- a/modules/dav/main/mod_dav.c ++++ b/modules/dav/main/mod_dav.c +@@ -557,6 +557,7 @@ + dav_begin_multistatus(bb, r, status, namespaces); + + apr_pool_create(&subpool, r->pool); ++ apr_pool_tag(subpool, "mod_dav-multistatus"); + + for (; first != NULL; first = first->next) { + apr_pool_clear(subpool); +@@ -1980,8 +1981,9 @@ + ** Note: we cast to lose the "const". The propdb won't try to change + ** the resource, however, since we are opening readonly. + */ +- err = dav_open_propdb(ctx->r, ctx->w.lockdb, wres->resource, 1, +- ctx->doc ? ctx->doc->namespaces : NULL, &propdb); ++ err = dav_popen_propdb(ctx->scratchpool, ++ ctx->r, ctx->w.lockdb, wres->resource, 1, ++ ctx->doc ? ctx->doc->namespaces : NULL, &propdb); + if (err != NULL) { + /* ### do something with err! */ + +--- a/modules/dav/main/mod_dav.h ++++ b/modules/dav/main/mod_dav.h +@@ -1590,6 +1590,16 @@ + apr_array_header_t *ns_xlate, + dav_propdb **propdb); + ++DAV_DECLARE(dav_error *) dav_popen_propdb( ++ apr_pool_t *p, ++ request_rec *r, ++ dav_lockdb *lockdb, ++ const dav_resource *resource, ++ int ro, ++ apr_array_header_t *ns_xlate, ++ dav_propdb **propdb); ++ ++ + DAV_DECLARE(void) dav_close_propdb(dav_propdb *db); + + DAV_DECLARE(dav_get_props_result) dav_get_props( +--- a/modules/dav/main/props.c ++++ b/modules/dav/main/props.c +@@ -323,7 +323,7 @@ + { + /* need to escape the uri that's in the resource struct because during + * the property walker it's not encoded. */ +- const char *e_uri = ap_escape_uri(propdb->resource->pool, ++ const char *e_uri = ap_escape_uri(propdb->p, + propdb->resource->uri); + + /* perform a "GET" on the resource's URI (note that the resource +@@ -524,8 +524,21 @@ + apr_array_header_t * ns_xlate, + dav_propdb **p_propdb) + { +- dav_propdb *propdb = apr_pcalloc(r->pool, sizeof(*propdb)); ++ return dav_popen_propdb(r->pool, r, lockdb, resource, ro, ns_xlate, p_propdb); ++} + ++DAV_DECLARE(dav_error *)dav_popen_propdb(apr_pool_t *p, ++ request_rec *r, dav_lockdb *lockdb, ++ const dav_resource *resource, ++ int ro, ++ apr_array_header_t * ns_xlate, ++ dav_propdb **p_propdb) ++{ ++ dav_propdb *propdb = NULL; ++ ++ propdb = apr_pcalloc(p, sizeof(*propdb)); ++ propdb->p = p; ++ + *p_propdb = NULL; + + #if DAV_DEBUG +@@ -537,7 +550,6 @@ + #endif + + propdb->r = r; +- apr_pool_create(&propdb->p, r->pool); + propdb->resource = resource; + propdb->ns_xlate = ns_xlate; + +@@ -562,10 +574,10 @@ + (*propdb->db_hooks->close)(propdb->db); + } + +- /* Currently, mod_dav's pool usage doesn't allow clearing this pool. */ +-#if 0 +- apr_pool_destroy(propdb->p); +-#endif ++ if (propdb->subreq) { ++ ap_destroy_sub_req(propdb->subreq); ++ propdb->subreq = NULL; ++ } + } + + DAV_DECLARE(dav_get_props_result) dav_get_allprops(dav_propdb *propdb, +@@ -739,7 +751,8 @@ + */ + + if (elem->priv == NULL) { +- elem->priv = apr_pcalloc(propdb->p, sizeof(*priv)); ++ /* elem->priv outlives propdb->p. Hence use the request pool */ ++ elem->priv = apr_pcalloc(propdb->r->pool, sizeof(*priv)); + } + priv = elem->priv; + diff --git a/SOURCES/httpd-2.4.37-reply-two-tls-rec.patch b/SOURCES/httpd-2.4.37-reply-two-tls-rec.patch new file mode 100644 index 0000000..a4a3835 --- /dev/null +++ b/SOURCES/httpd-2.4.37-reply-two-tls-rec.patch @@ -0,0 +1,188 @@ +diff --git a/modules/ssl/ssl_engine_io.c b/modules/ssl/ssl_engine_io.c +index 018b667..4e3875a 100644 +--- a/modules/ssl/ssl_engine_io.c ++++ b/modules/ssl/ssl_engine_io.c +@@ -1598,18 +1598,32 @@ static apr_status_t ssl_io_filter_input(ap_filter_t *f, + } + + +-/* ssl_io_filter_output() produces one SSL/TLS message per bucket ++/* ssl_io_filter_output() produces one SSL/TLS record per bucket + * passed down the output filter stack. This results in a high +- * overhead (network packets) for any output comprising many small +- * buckets. SSI page applied through the HTTP chunk filter, for +- * example, may produce many brigades containing small buckets - +- * [chunk-size CRLF] [chunk-data] [CRLF]. ++ * overhead (more network packets & TLS processing) for any output ++ * comprising many small buckets. SSI output passed through the HTTP ++ * chunk filter, for example, may produce many brigades containing ++ * small buckets - [chunk-size CRLF] [chunk-data] [CRLF]. + * +- * The coalescing filter merges many small buckets into larger buckets +- * where possible, allowing the SSL I/O output filter to handle them +- * more efficiently. */ ++ * Sending HTTP response headers as a separate TLS record to the ++ * response body also reveals information to a network observer (the ++ * size of headers) which can be significant. ++ * ++ * The coalescing filter merges data buckets with the aim of producing ++ * fewer, larger TLS records - without copying/buffering all content ++ * and introducing unnecessary overhead. ++ * ++ * ### This buffering could be probably be done more comprehensively ++ * ### in ssl_io_filter_output itself. ++ * ++ * ### Another possible performance optimisation in particular for the ++ * ### [HEAP] [FILE] HTTP response case is using a brigade rather than ++ * ### a char array to buffer; using apr_brigade_write() to append ++ * ### will use already-allocated memory from the HEAP, reducing # of ++ * ### copies. ++ */ + +-#define COALESCE_BYTES (2048) ++#define COALESCE_BYTES (AP_IOBUFSIZE) + + struct coalesce_ctx { + char buffer[COALESCE_BYTES]; +@@ -1622,11 +1636,12 @@ static apr_status_t ssl_io_filter_coalesce(ap_filter_t *f, + apr_bucket *e, *upto; + apr_size_t bytes = 0; + struct coalesce_ctx *ctx = f->ctx; ++ apr_size_t buffered = ctx ? ctx->bytes : 0; /* space used on entry */ + unsigned count = 0; + + /* The brigade consists of zero-or-more small data buckets which +- * can be coalesced (the prefix), followed by the remainder of the +- * brigade. ++ * can be coalesced (referred to as the "prefix"), followed by the ++ * remainder of the brigade. + * + * Find the last bucket - if any - of that prefix. count gives + * the number of buckets in the prefix. The "prefix" must contain +@@ -1641,24 +1656,97 @@ static apr_status_t ssl_io_filter_coalesce(ap_filter_t *f, + e != APR_BRIGADE_SENTINEL(bb) + && !APR_BUCKET_IS_METADATA(e) + && e->length != (apr_size_t)-1 +- && e->length < COALESCE_BYTES +- && (bytes + e->length) < COALESCE_BYTES +- && (ctx == NULL +- || bytes + ctx->bytes + e->length < COALESCE_BYTES); ++ && e->length <= COALESCE_BYTES ++ && (buffered + bytes + e->length) <= COALESCE_BYTES; + e = APR_BUCKET_NEXT(e)) { + if (e->length) count++; /* don't count zero-length buckets */ + bytes += e->length; + } ++ ++ /* If there is room remaining and the next bucket is a data ++ * bucket, try to include it in the prefix to coalesce. For a ++ * typical [HEAP] [FILE] HTTP response brigade, this handles ++ * merging the headers and the start of the body into a single TLS ++ * record. */ ++ if (bytes + buffered > 0 ++ && bytes + buffered < COALESCE_BYTES ++ && e != APR_BRIGADE_SENTINEL(bb) ++ && !APR_BUCKET_IS_METADATA(e)) { ++ apr_status_t rv = APR_SUCCESS; ++ ++ /* For an indeterminate length bucket (PIPE/CGI/...), try a ++ * non-blocking read to have it morph into a HEAP. If the ++ * read fails with EAGAIN, it is harmless to try a split ++ * anyway, split is ENOTIMPL for most PIPE-like buckets. */ ++ if (e->length == (apr_size_t)-1) { ++ const char *discard; ++ apr_size_t ignore; ++ ++ rv = apr_bucket_read(e, &discard, &ignore, APR_NONBLOCK_READ); ++ if (rv != APR_SUCCESS && !APR_STATUS_IS_EAGAIN(rv)) { ++ ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, f->c, APLOGNO(10232) ++ "coalesce failed to read from %s bucket", ++ e->type->name); ++ return AP_FILTER_ERROR; ++ } ++ } ++ ++ if (rv == APR_SUCCESS) { ++ /* If the read above made the bucket morph, it may now fit ++ * entirely within the buffer. Otherwise, split it so it does ++ * fit. */ ++ if (e->length > COALESCE_BYTES ++ || e->length + buffered + bytes > COALESCE_BYTES) { ++ rv = apr_bucket_split(e, COALESCE_BYTES - (buffered + bytes)); ++ } ++ ++ if (rv == APR_SUCCESS && e->length == 0) { ++ /* As above, don't count in the prefix if the bucket is ++ * now zero-length. */ ++ } ++ else if (rv == APR_SUCCESS) { ++ ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, f->c, ++ "coalesce: adding %" APR_SIZE_T_FMT " bytes " ++ "from split %s bucket, total %" APR_SIZE_T_FMT, ++ e->length, e->type->name, bytes + buffered); ++ ++ count++; ++ bytes += e->length; ++ e = APR_BUCKET_NEXT(e); ++ } ++ else if (rv != APR_ENOTIMPL) { ++ ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, f->c, APLOGNO(10233) ++ "coalesce: failed to split data bucket"); ++ return AP_FILTER_ERROR; ++ } ++ } ++ } ++ ++ /* The prefix is zero or more buckets. upto now points to the ++ * bucket AFTER the end of the prefix, which may be the brigade ++ * sentinel. */ + upto = e; + +- /* Coalesce the prefix, if: +- * a) more than one bucket is found to coalesce, or +- * b) the brigade contains only a single data bucket, or +- * c) the data bucket is not last but we have buffered data already. ++ /* Coalesce the prefix, if any of the following are true: ++ * ++ * a) the prefix is more than one bucket ++ * OR ++ * b) the prefix is the entire brigade, which is a single bucket ++ * AND the prefix length is smaller than the buffer size, ++ * OR ++ * c) the prefix is a single bucket ++ * AND there is buffered data from a previous pass. ++ * ++ * The aim with (b) is to buffer a small bucket so it can be ++ * coalesced with future invocations of this filter. e.g. three ++ * calls each with a single 100 byte HEAP bucket should get ++ * coalesced together. But an invocation with a 8192 byte HEAP ++ * should pass through untouched. + */ + if (bytes > 0 + && (count > 1 +- || (upto == APR_BRIGADE_SENTINEL(bb)) ++ || (upto == APR_BRIGADE_SENTINEL(bb) ++ && bytes < COALESCE_BYTES) + || (ctx && ctx->bytes > 0))) { + /* If coalescing some bytes, ensure a context has been + * created. */ +@@ -1669,7 +1757,8 @@ static apr_status_t ssl_io_filter_coalesce(ap_filter_t *f, + + ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, f->c, + "coalesce: have %" APR_SIZE_T_FMT " bytes, " +- "adding %" APR_SIZE_T_FMT " more", ctx->bytes, bytes); ++ "adding %" APR_SIZE_T_FMT " more (buckets=%u)", ++ ctx->bytes, bytes, count); + + /* Iterate through the prefix segment. For non-fatal errors + * in this loop it is safe to break out and fall back to the +@@ -1684,7 +1773,8 @@ static apr_status_t ssl_io_filter_coalesce(ap_filter_t *f, + if (APR_BUCKET_IS_METADATA(e) + || e->length == (apr_size_t)-1) { + ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, f->c, APLOGNO(02012) +- "unexpected bucket type during coalesce"); ++ "unexpected %s bucket during coalesce", ++ e->type->name); + break; /* non-fatal error; break out */ + } + diff --git a/SOURCES/httpd-2.4.37-usertrack-samesite.patch b/SOURCES/httpd-2.4.37-usertrack-samesite.patch new file mode 100644 index 0000000..592616e --- /dev/null +++ b/SOURCES/httpd-2.4.37-usertrack-samesite.patch @@ -0,0 +1,178 @@ +diff --git a/docs/manual/mod/mod_usertrack.html.en b/docs/manual/mod/mod_usertrack.html.en +index b212747..d2da9b9 100644 +--- a/docs/manual/mod/mod_usertrack.html.en ++++ b/docs/manual/mod/mod_usertrack.html.en +@@ -47,7 +47,10 @@ + +@@ -127,6 +130,22 @@ CustomLog "logs/clickstream.log" usertrack +
CookieExpires "3 weeks"
+ + ++ ++
top
++

CookieHTTPOnly Directive

++ ++ ++ ++ ++ ++ ++ ++ ++
Description:Adds the 'HTTPOnly' attribute to the cookie
Syntax:CookieHTTPOnly on|off
Default:CookieHTTPOnly off
Context:server config, virtual host, directory, .htaccess
Override:FileInfo
Status:Extension
Module:mod_usertrack
++

When set to 'ON', the 'HTTPOnly' cookie attribute is added to this ++ modules tracking cookie. This attribute instructs browsers to block javascript ++ from reading the value of the cookie.

++ +
+
top
+

CookieName Directive

+@@ -150,6 +169,45 @@ CustomLog "logs/clickstream.log" usertrack +
CookieName clicktrack
+ + ++
++
top
++

CookieSameSite Directive

++ ++ ++ ++ ++ ++ ++ ++ ++
Description:Adds the 'SameSite' attribute to the cookie
Syntax:CookieSameSite None|Lax|Strict
Default:unset
Context:server config, virtual host, directory, .htaccess
Override:FileInfo
Status:Extension
Module:mod_usertrack
++

When set to 'None', 'Lax', or 'Strict', the 'SameSite' cookie attribute ++ is added to this modules tracking cookie with the corresponding value. ++ This attribute instructs browser on how to treat the cookie when it is ++ requested in a cross-site context.

++ ++
++

A value of 'None' sets 'SameSite=None', which is the most liberal setting. To ++ omit this attribute, omit the directive entirely.

++
++ ++ ++
++
top
++

CookieSecure Directive

++ ++ ++ ++ ++ ++ ++ ++ ++
Description:Adds the 'Secure' attribute to the cookie
Syntax:CookieSecure on|off
Default:CookieSecure off
Context:server config, virtual host, directory, .htaccess
Override:FileInfo
Status:Extension
Module:mod_usertrack
++

When set to 'ON', the 'Secure' cookie attribute is added to this ++ modules tracking cookie. This attribute instructs browsers to only ++ transmit the cookie over HTTPS.

++ +
+
top
+

CookieStyle Directive

+diff --git a/modules/metadata/mod_usertrack.c b/modules/metadata/mod_usertrack.c +index 73a9f45..65759c2 100644 +--- a/modules/metadata/mod_usertrack.c ++++ b/modules/metadata/mod_usertrack.c +@@ -86,6 +86,9 @@ typedef struct { + const char *cookie_domain; + char *regexp_string; /* used to compile regexp; save for debugging */ + ap_regex_t *regexp; /* used to find usertrack cookie in cookie header */ ++ int is_secure; ++ int is_httponly; ++ const char *samesite; + } cookie_dir_rec; + + /* Make Cookie: Now we have to generate something that is going to be +@@ -143,6 +146,21 @@ static void make_cookie(request_rec *r) + : ""), + NULL); + } ++ if (dcfg->samesite != NULL) { ++ new_cookie = apr_pstrcat(r->pool, new_cookie, "; ", ++ dcfg->samesite, ++ NULL); ++ } ++ if (dcfg->is_secure) { ++ new_cookie = apr_pstrcat(r->pool, new_cookie, "; Secure", ++ NULL); ++ } ++ if (dcfg->is_httponly) { ++ new_cookie = apr_pstrcat(r->pool, new_cookie, "; HttpOnly", ++ NULL); ++ } ++ ++ + + apr_table_addn(r->err_headers_out, + (dcfg->style == CT_COOKIE2 ? "Set-Cookie2" : "Set-Cookie"), +@@ -269,6 +287,7 @@ static void *make_cookie_dir(apr_pool_t *p, char *d) + dcfg->cookie_domain = NULL; + dcfg->style = CT_UNSET; + dcfg->enabled = 0; ++ /* calloc'ed to disabled: samesite, is_secure, is_httponly */ + + /* In case the user does not use the CookieName directive, + * we need to compile the regexp for the default cookie name. */ +@@ -429,6 +448,31 @@ static const char *set_cookie_style(cmd_parms *cmd, void *mconfig, + return NULL; + } + ++/* ++ * SameSite enabled disabled ++ */ ++ ++static const char *set_samesite_value(cmd_parms *cmd, void *mconfig, ++ const char *name) ++{ ++ cookie_dir_rec *dcfg; ++ ++ dcfg = (cookie_dir_rec *) mconfig; ++ ++ if (strcasecmp(name, "strict") == 0) { ++ dcfg->samesite = "SameSite=Strict"; ++ } else if (strcasecmp(name, "lax") == 0) { ++ dcfg->samesite = "SameSite=Lax"; ++ } else if (strcasecmp(name, "none") == 0) { ++ dcfg->samesite = "SameSite=None"; ++ } else { ++ return "CookieSameSite accepts 'Strict', 'Lax', or 'None'"; ++ } ++ ++ ++ return NULL; ++} ++ + static const command_rec cookie_log_cmds[] = { + AP_INIT_TAKE1("CookieExpires", set_cookie_exp, NULL, OR_FILEINFO, + "an expiry date code"), +@@ -440,6 +484,17 @@ static const command_rec cookie_log_cmds[] = { + "whether or not to enable cookies"), + AP_INIT_TAKE1("CookieName", set_cookie_name, NULL, OR_FILEINFO, + "name of the tracking cookie"), ++ AP_INIT_FLAG("CookieTracking", set_cookie_enable, NULL, OR_FILEINFO, ++ "whether or not to enable cookies"), ++ AP_INIT_TAKE1("CookieSameSite", set_samesite_value, NULL, OR_FILEINFO, ++ "SameSite setting"), ++ AP_INIT_FLAG("CookieSecure", ap_set_flag_slot, ++ (void *)APR_OFFSETOF(cookie_dir_rec, is_secure), OR_FILEINFO, ++ "is cookie secure"), ++ AP_INIT_FLAG("CookieHttpOnly", ap_set_flag_slot, ++ (void *)APR_OFFSETOF(cookie_dir_rec, is_httponly),OR_FILEINFO, ++ "is cookie http only"), ++ + {NULL} + }; + diff --git a/SOURCES/welcome.conf b/SOURCES/welcome.conf index 5158e8b..37b7394 100644 --- a/SOURCES/welcome.conf +++ b/SOURCES/welcome.conf @@ -16,4 +16,4 @@ Alias /.noindex.html /usr/share/httpd/noindex/index.html -Alias /poweredby.png /usr/share/httpd/icons/apache_pb2.png \ No newline at end of file +Alias /poweredby.png /usr/share/httpd/icons/apache_pb3.png \ No newline at end of file diff --git a/SPECS/httpd.spec b/SPECS/httpd.spec index 65dfd79..daa6a90 100644 --- a/SPECS/httpd.spec +++ b/SPECS/httpd.spec @@ -13,7 +13,7 @@ Summary: Apache HTTP Server Name: httpd Version: 2.4.37 -Release: 40%{?dist} +Release: 47%{?dist}.1 URL: https://httpd.apache.org/ Source0: https://www.apache.org/dist/httpd/httpd-%{version}.tar.bz2 Source2: httpd.logrotate @@ -54,6 +54,7 @@ Source42: httpd-init.service Source43: httpd-ssl-gencerts Source44: httpd@.service Source45: config.layout +Source46: apache-poweredby.png # build/scripts patches # http://bugzilla.redhat.com/show_bug.cgi?id=1231924 @@ -101,6 +102,8 @@ Patch38: httpd-2.4.37-pr37355.patch Patch39: httpd-2.4.37-proxy-ws-idle-timeout.patch # https://bugzilla.redhat.com/show_bug.cgi?id=1883648 Patch40: httpd-2.4.37-ssl-proxy-chains.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1935742 +Patch41: httpd-2.4.37-usertrack-samesite.patch # Bug fixes # https://bugzilla.redhat.com/show_bug.cgi?id=1397243 @@ -152,6 +155,14 @@ Patch84: httpd-2.4.37-r1878280.patch Patch85: httpd-2.4.37-htcacheclean-dont-break.patch # https://bugzilla.redhat.com/show_bug.cgi?id=1937334 Patch86: httpd-2.4.37-r1873907.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1680111 +Patch87: httpd-2.4.37-reply-two-tls-rec.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1905613 +Patch88: httpd-2.4.37-r1845768+.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=2001046 +Patch89: httpd-2.4.37-r1862410.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1984828 +Patch90: httpd-2.4.37-hcheck-mem-issues.patch # Security fixes Patch200: httpd-2.4.37-r1851471.patch @@ -181,6 +192,30 @@ Patch209: httpd-2.4.37-CVE-2020-1934.patch Patch210: httpd-2.4.37-CVE-2018-17199.patch # https://bugzilla.redhat.com/show_bug.cgi?id=1866563 Patch211: httpd-2.4.37-CVE-2020-11984.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1972500 +Patch212: httpd-2.4.37-CVE-2021-30641.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1968307 +Patch213: httpd-2.4.37-CVE-2021-26690.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=2005117 +Patch214: httpd-2.4.37-CVE-2021-40438.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1966732 +Patch215: httpd-2.4.37-CVE-2021-26691.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1968278 +Patch216: httpd-2.4.37-CVE-2020-35452.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=2005128 +Patch217: httpd-2.4.37-CVE-2021-34798.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=2005119 +Patch218: httpd-2.4.37-CVE-2021-39275.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=2005124 +Patch219: httpd-2.4.37-CVE-2021-36160.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1966728 +Patch220: httpd-2.4.37-CVE-2021-33193.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=2034674 +Patch221: httpd-2.4.37-CVE-2021-44790.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=2034672 +Patch222: httpd-2.4.37-CVE-2021-44224.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=2064321 +Patch223: httpd-2.4.37-CVE-2022-22720.patch License: ASL 2.0 Group: System Environment/Daemons @@ -190,7 +225,8 @@ BuildRequires: zlib-devel, libselinux-devel, lua-devel, brotli-devel BuildRequires: apr-devel >= 1.5.0, apr-util-devel >= 1.5.0, pcre-devel >= 5.0 BuildRequires: systemd-devel # web server testpage added to redhat-logos in 82.0 (rhbz1896319) -Requires: /etc/mime.types, system-logos-httpd >= 82.0 +# new logo requires new footer copyring which was added in rhbz1934800 +Requires: /etc/mime.types, system-logos(httpd-logo-ng) Obsoletes: httpd-suexec Provides: webserver Provides: mod_dav = %{version}-%{release}, httpd-suexec = %{version}-%{release} @@ -330,6 +366,7 @@ interface for storing and accessing per-user session data. %patch38 -p1 -b .pr37355 %patch39 -p1 -b .proxy-ws-idle-timeout %patch40 -p1 -b .ssl-proxy-chains +%patch41 -p1 -b .usertrack-samesite %patch61 -p1 -b .r1738878 %patch62 -p1 -b .r1633085 @@ -355,6 +392,10 @@ interface for storing and accessing per-user session data. %patch84 -p1 -b .r1878280 %patch85 -p1 -b .htcacheclean-dont-break %patch86 -p1 -b .r1873907 +%patch87 -p1 -b .reply-two-tls-rec +%patch88 -p1 -b .r1845768+ +%patch89 -p1 -b .r1862410 +%patch90 -p1 -b .hcheck-mem-issues %patch200 -p1 -b .r1851471 %patch201 -p1 -b .CVE-2019-0211 @@ -368,6 +409,18 @@ interface for storing and accessing per-user session data. %patch209 -p1 -b .CVE-2020-1934 %patch210 -p1 -b .CVE-2018-17199 %patch211 -p1 -b .CVE-2020-11984 +%patch212 -p1 -b .CVE-2021-30641 +%patch213 -p1 -b .CVE-2021-26690 +%patch214 -p1 -b .CVE-2021-40438 +%patch215 -p1 -b .CVE-2021-26691 +%patch216 -p1 -b .CVE-2020-35452 +%patch217 -p1 -b .CVE-2021-34798 +%patch218 -p1 -b .CVE-2021-39275 +%patch219 -p1 -b .CVE-2021-36160 +%patch220 -p1 -b .CVE-2021-33193 +%patch221 -p1 -b .CVE-2021-44790 +%patch222 -p1 -b .CVE-2021-44224 +%patch223 -p1 -b .CVE-2022-22720 # Patch in the vendor string sed -i '/^#define PLATFORM/s/Unix/%{vstring}/' os/unix/os.h @@ -394,6 +447,9 @@ if test "x${vmmn}" != "x%{mmn}"; then exit 1 fi +# A new logo which comes together with a new test page +cp %{SOURCE46} ./docs/icons/apache_pb3.png + # Provide default layout cp $RPM_SOURCE_DIR/config.layout . @@ -870,6 +926,55 @@ rm -rf $RPM_BUILD_ROOT %{_rpmconfigdir}/macros.d/macros.httpd %changelog +* Mon Mar 21 2022 Luboš Uhliarik - 2.4.37-47.1 +- Resolves: #2065248 - CVE-2022-22720 httpd:2.4/httpd: HTTP request smuggling + vulnerability in Apache HTTP Server 2.4.52 and earlier + +* Thu Jan 20 2022 Luboš Uhliarik - 2.4.37-47 +- Resolves: #2035030 - CVE-2021-44224 httpd:2.4/httpd: possible NULL dereference + or SSRF in forward proxy configurations + +* Mon Jan 10 2022 Luboš Uhliarik - 2.4.37-46 +- Resolves: #2035063 - CVE-2021-44790 httpd:2.4/httpd: mod_lua: possible buffer + overflow when parsing multipart content + +* Thu Jan 06 2022 Luboš Uhliarik - 2.4.37-45 +- Resolves: #2007199 - CVE-2021-36160 httpd:2.4/httpd: mod_proxy_uwsgi: + out-of-bounds read via a crafted request uri-path +- Resolves: #1972491 - CVE-2021-33193 httpd:2.4/mod_http2: Request splitting via + HTTP/2 method injection and mod_proxy + +* Mon Nov 29 2021 Luboš Uhliarik - 2.4.37-44 +- Resolves: #1968278 - CVE-2020-35452 httpd:2.4/httpd: Single zero byte stack + overflow in mod_auth_digest +- Resolves: #2001046 - Apache httpd OOME with mod_dav in RHEL 8 +- Resolves: #2005128 (CVE-2021-34798) - CVE-2021-34798 httpd: NULL pointer + dereference via malformed requests +- Resolves: #1984828 - mod_proxy_hcheck piles up health checks leading to high + memory consumption +- Resolves: #2005119 - CVE-2021-39275 httpd: out-of-bounds write in + ap_escape_quotes() via malicious input + +* Tue Oct 26 2021 Luboš Uhliarik - 2.4.37-43 +- Related: #2007236 - CVE-2021-40438 httpd:2.4/httpd: mod_proxy: SSRF via + a crafted request uri-path + +* Thu Sep 30 2021 Luboš Uhliarik - 2.4.37-42 +- Resolves: #2007236 - CVE-2021-40438 httpd:2.4/httpd: mod_proxy: SSRF via + a crafted request uri-path +- Resolves: #1969229 - CVE-2021-26691 httpd:2.4/httpd: Heap overflow in + mod_session + +* Fri Jul 09 2021 Luboš Uhliarik - 2.4.37-41 +- Resolves: #1680111 - httpd sends reply to HTTPS GET using two TLS records +- Resolves: #1905613 - mod_ssl does not like valid certificate chain +- Resolves: #1935742 - [RFE] backport samesite/httponly/secure flags for + usertrack +- Resolves: #1972500 - CVE-2021-30641 httpd:2.4/httpd: MergeSlashes regression +- Resolves: #1968307 - CVE-2021-26690 httpd:2.4/httpd: mod_session NULL pointer + dereference in parser +- Resolves: #1934741 - Apache trademark update - new logo + * Fri May 14 2021 Lubos Uhliarik - 2.4.37-40 - Resolves: #1952557 - mod_proxy_wstunnel.html is a malformed XML - Resolves: #1937334 - SSLProtocol with based virtual hosts