diff --git a/.gitignore b/.gitignore index 4c70af4..6e3f15e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1 @@ -SOURCES/htcacheclean.service.xml SOURCES/httpd-2.4.37.tar.bz2 -SOURCES/httpd.conf.xml -SOURCES/httpd.service.xml -SOURCES/centos-noindex-8.0.tar.gz diff --git a/.httpd.metadata b/.httpd.metadata index 000b59a..e540f7e 100644 --- a/.httpd.metadata +++ b/.httpd.metadata @@ -1,5 +1 @@ -a34c31169efbe6140496c37801489610461bdf9b SOURCES/htcacheclean.service.xml 4a38471de821288b0300148016f2b03dfee8adf2 SOURCES/httpd-2.4.37.tar.bz2 -fa18caadd0afbddc2c7a7fc404bf4f2b41867148 SOURCES/httpd.conf.xml -888df830bdc465de3bced6f075c33380018e544f SOURCES/httpd.service.xml -6aa65f45c247226fc922c455e0187abd90c839e8 SOURCES/centos-noindex-8.0.tar.gz diff --git a/README.debrand b/README.debrand deleted file mode 100644 index 01c46d2..0000000 --- a/README.debrand +++ /dev/null @@ -1,2 +0,0 @@ -Warning: This package was configured for automatic debranding, but the changes -failed to apply. diff --git a/SOURCES/htcacheclean.service.xml b/SOURCES/htcacheclean.service.xml new file mode 100644 index 0000000..01b68e4 --- /dev/null +++ b/SOURCES/htcacheclean.service.xml @@ -0,0 +1,123 @@ + + + + + + htcacheclean systemd unit + httpd + AuthorOrtonJoejorton@redhat.com + + + + htcacheclean.service + 8 + + + + htcacheclean.service + htcacheclean unit file for systemd + + + + + /usr/lib/systemd/system/htcacheclean.service + + + + + Description + + This manual page describes the systemd + unit file for the htcacheclean daemon. This + unit file provides a service which runs + htcacheclean in daemon mode, + periodically cleaning the disk cache root to ensure disk space + usage is within configured limits. + + + + + Options + + The service is configured by configuration file + /etc/sysconfig/htcacheclean. The following + variables are used, following standard systemd + EnvironmentFile= syntax: + + + + INTERVAL= + + Sets the interval between cache clean runs, in + minutes. By default this is configured as + 15. + + + + CACHE_ROOT= + + Sets the directory name used for the cache + root. By default this is configured as + /var/cache/httpd/proxy. + + + + LIMIT= + + Sets the total disk cache space limit, in + bytes. Use a K or M + suffix to signify kilobytes or megabytes. By default this is + set to 100M. + + + + OPTIONS= + + Any other options to pass to + htcacheclean. + + + + + + Files + + /usr/lib/systemd/system/htcacheclean.service, + /etc/sysconfig/htcacheclean + + + + See also + + + htcacheclean8, + httpd8, + httpd.service8, + systemd.exec8 + + + + + + diff --git a/SOURCES/httpd-2.4.37-CVE-2019-10092.patch b/SOURCES/httpd-2.4.37-CVE-2019-10092.patch new file mode 100644 index 0000000..a06d9c2 --- /dev/null +++ b/SOURCES/httpd-2.4.37-CVE-2019-10092.patch @@ -0,0 +1,192 @@ +diff --git a/modules/http/http_protocol.c b/modules/http/http_protocol.c +index e419eb6..dcafa9c 100644 +--- a/modules/http/http_protocol.c ++++ b/modules/http/http_protocol.c +@@ -1132,13 +1132,10 @@ static const char *get_canned_error_string(int status, + "\">here.

\n", + NULL)); + case HTTP_USE_PROXY: +- return(apr_pstrcat(p, +- "

This resource is only accessible " +- "through the proxy\n", +- ap_escape_html(r->pool, location), +- "
\nYou will need to configure " +- "your client to use that proxy.

\n", +- NULL)); ++ return("

This resource is only accessible " ++ "through the proxy\n" ++ "
\nYou will need to configure " ++ "your client to use that proxy.

\n"); + case HTTP_PROXY_AUTHENTICATION_REQUIRED: + case HTTP_UNAUTHORIZED: + return("

This server could not verify that you\n" +@@ -1154,34 +1151,20 @@ static const char *get_canned_error_string(int status, + "error-notes", + "

\n")); + case HTTP_FORBIDDEN: +- s1 = apr_pstrcat(p, +- "

You don't have permission to access ", +- ap_escape_html(r->pool, r->uri), +- "\non this server.
\n", +- NULL); +- return(add_optional_notes(r, s1, "error-notes", "

\n")); ++ return(add_optional_notes(r, "

You don't have permission to access this resource.", "error-notes", "

\n")); + case HTTP_NOT_FOUND: +- return(apr_pstrcat(p, +- "

The requested URL ", +- ap_escape_html(r->pool, r->uri), +- " was not found on this server.

\n", +- NULL)); ++ return("

The requested URL was not found on this server.

\n"); + case HTTP_METHOD_NOT_ALLOWED: + return(apr_pstrcat(p, + "

The requested method ", + ap_escape_html(r->pool, r->method), +- " is not allowed for the URL ", +- ap_escape_html(r->pool, r->uri), +- ".

\n", ++ " is not allowed for this URL.

\n", + NULL)); + case HTTP_NOT_ACCEPTABLE: +- s1 = apr_pstrcat(p, +- "

An appropriate representation of the " +- "requested resource ", +- ap_escape_html(r->pool, r->uri), +- " could not be found on this server.

\n", +- NULL); +- return(add_optional_notes(r, s1, "variant-list", "")); ++ return(add_optional_notes(r, ++ "

An appropriate representation of the requested resource " ++ "could not be found on this server.

\n", ++ "variant-list", "")); + case HTTP_MULTIPLE_CHOICES: + return(add_optional_notes(r, "", "variant-list", "")); + case HTTP_LENGTH_REQUIRED: +@@ -1192,18 +1175,13 @@ static const char *get_canned_error_string(int status, + NULL); + return(add_optional_notes(r, s1, "error-notes", "

\n")); + case HTTP_PRECONDITION_FAILED: +- return(apr_pstrcat(p, +- "

The precondition on the request " +- "for the URL ", +- ap_escape_html(r->pool, r->uri), +- " evaluated to false.

\n", +- NULL)); ++ return("

The precondition on the request " ++ "for this URL evaluated to false.

\n"); + case HTTP_NOT_IMPLEMENTED: + s1 = apr_pstrcat(p, + "

", +- ap_escape_html(r->pool, r->method), " to ", +- ap_escape_html(r->pool, r->uri), +- " not supported.
\n", ++ ap_escape_html(r->pool, r->method), " ", ++ " not supported for current URL.
\n", + NULL); + return(add_optional_notes(r, s1, "error-notes", "

\n")); + case HTTP_BAD_GATEWAY: +@@ -1211,29 +1189,19 @@ static const char *get_canned_error_string(int status, + "response from an upstream server.
" CRLF; + return(add_optional_notes(r, s1, "error-notes", "

\n")); + case HTTP_VARIANT_ALSO_VARIES: +- return(apr_pstrcat(p, +- "

A variant for the requested " +- "resource\n

\n",
+-                           ap_escape_html(r->pool, r->uri),
+-                           "\n
\nis itself a negotiable resource. " +- "This indicates a configuration error.

\n", +- NULL)); ++ return("

A variant for the requested " ++ "resource\n

\n"
++               "\n
\nis itself a negotiable resource. " ++ "This indicates a configuration error.

\n"); + case HTTP_REQUEST_TIME_OUT: + return("

Server timeout waiting for the HTTP request from the client.

\n"); + case HTTP_GONE: +- return(apr_pstrcat(p, +- "

The requested resource
", +- ap_escape_html(r->pool, r->uri), +- "
\nis no longer available on this server " +- "and there is no forwarding address.\n" +- "Please remove all references to this " +- "resource.

\n", +- NULL)); ++ return("

The requested resource is no longer available on this server" ++ " and there is no forwarding address.\n" ++ "Please remove all references to this resource.

\n"); + case HTTP_REQUEST_ENTITY_TOO_LARGE: + return(apr_pstrcat(p, +- "The requested resource
", +- ap_escape_html(r->pool, r->uri), "
\n", +- "does not allow request data with ", ++ "The requested resource does not allow request data with ", + ap_escape_html(r->pool, r->method), + " requests, or the amount of data provided in\n" + "the request exceeds the capacity limit.\n", +@@ -1317,11 +1285,9 @@ static const char *get_canned_error_string(int status, + "the Server Name Indication (SNI) in use for this\n" + "connection.

\n"); + case HTTP_UNAVAILABLE_FOR_LEGAL_REASONS: +- s1 = apr_pstrcat(p, +- "

Access to ", ap_escape_html(r->pool, r->uri), +- "\nhas been denied for legal reasons.
\n", +- NULL); +- return(add_optional_notes(r, s1, "error-notes", "

\n")); ++ return(add_optional_notes(r, ++ "

Access to this URL has been denied for legal reasons.
\n", ++ "error-notes", "

\n")); + default: /* HTTP_INTERNAL_SERVER_ERROR */ + /* + * This comparison to expose error-notes could be modified to +diff --git a/modules/proxy/mod_proxy.c b/modules/proxy/mod_proxy.c +index 800ede1..de48735 100644 +--- a/modules/proxy/mod_proxy.c ++++ b/modules/proxy/mod_proxy.c +@@ -1055,9 +1055,10 @@ static int proxy_handler(request_rec *r) + char *end; + maxfwd = apr_strtoi64(str, &end, 10); + if (maxfwd < 0 || maxfwd == APR_INT64_MAX || *end) { +- return ap_proxyerror(r, HTTP_BAD_REQUEST, +- apr_psprintf(r->pool, +- "Max-Forwards value '%s' could not be parsed", str)); ++ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO() ++ "Max-Forwards value '%s' could not be parsed", str); ++ return ap_proxyerror(r, HTTP_BAD_REQUEST, ++ "Max-Forwards request header could not be parsed"); + } + else if (maxfwd == 0) { + switch (r->method_number) { +diff --git a/modules/proxy/mod_proxy_ftp.c b/modules/proxy/mod_proxy_ftp.c +index 4a10987..8f6f853 100644 +--- a/modules/proxy/mod_proxy_ftp.c ++++ b/modules/proxy/mod_proxy_ftp.c +@@ -1024,8 +1024,9 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker, + /* We break the URL into host, port, path-search */ + if (r->parsed_uri.hostname == NULL) { + if (APR_SUCCESS != apr_uri_parse(p, url, &uri)) { +- return ap_proxyerror(r, HTTP_BAD_REQUEST, +- apr_psprintf(p, "URI cannot be parsed: %s", url)); ++ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO() ++ "URI cannot be parsed: %s", url); ++ return ap_proxyerror(r, HTTP_BAD_REQUEST, "URI cannot be parsed"); + } + connectname = uri.hostname; + connectport = uri.port; +diff --git a/modules/proxy/proxy_util.c b/modules/proxy/proxy_util.c +index 6501c68..0bbfa59 100644 +--- a/modules/proxy/proxy_util.c ++++ b/modules/proxy/proxy_util.c +@@ -368,12 +368,9 @@ PROXY_DECLARE(char *) + + PROXY_DECLARE(int) ap_proxyerror(request_rec *r, int statuscode, const char *message) + { +- const char *uri = ap_escape_html(r->pool, r->uri); + apr_table_setn(r->notes, "error-notes", + apr_pstrcat(r->pool, +- "The proxy server could not handle the request ", ap_escape_html(r->pool, r->method), " ", uri, +- ".

\n" ++ "The proxy server could not handle the request

" + "Reason: ", ap_escape_html(r->pool, message), + "

", + NULL)); diff --git a/SOURCES/httpd-2.4.37-CVE-2019-10097.patch b/SOURCES/httpd-2.4.37-CVE-2019-10097.patch new file mode 100644 index 0000000..b0132a9 --- /dev/null +++ b/SOURCES/httpd-2.4.37-CVE-2019-10097.patch @@ -0,0 +1,66 @@ +diff --git a/modules/metadata/mod_remoteip.c b/modules/metadata/mod_remoteip.c +index 4572ce1..a0cbc0f 100644 +--- a/modules/metadata/mod_remoteip.c ++++ b/modules/metadata/mod_remoteip.c +@@ -987,15 +987,13 @@ static remoteip_parse_status_t remoteip_process_v2_header(conn_rec *c, + return HDR_ERROR; + #endif + default: +- /* unsupported protocol, keep local connection address */ +- return HDR_DONE; ++ /* unsupported protocol */ ++ ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c, APLOGNO(10183) ++ "RemoteIPProxyProtocol: unsupported protocol %.2hx", ++ (unsigned short)hdr->v2.fam); ++ return HDR_ERROR; + } + break; /* we got a sockaddr now */ +- +- case 0x00: /* LOCAL command */ +- /* keep local connection address for LOCAL */ +- return HDR_DONE; +- + default: + /* not a supported command */ + ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c, APLOGNO(03507) +@@ -1087,11 +1085,24 @@ static apr_status_t remoteip_input_filter(ap_filter_t *f, + /* try to read a header's worth of data */ + while (!ctx->done) { + if (APR_BRIGADE_EMPTY(ctx->bb)) { +- ret = ap_get_brigade(f->next, ctx->bb, ctx->mode, block, +- ctx->need - ctx->rcvd); ++ apr_off_t got, want = ctx->need - ctx->rcvd; ++ ++ ret = ap_get_brigade(f->next, ctx->bb, ctx->mode, block, want); + if (ret != APR_SUCCESS) { ++ ap_log_cerror(APLOG_MARK, APLOG_ERR, ret, f->c, APLOGNO(10184) ++ "failed reading input"); + return ret; + } ++ ++ ret = apr_brigade_length(ctx->bb, 1, &got); ++ if (ret || got > want) { ++ ap_log_cerror(APLOG_MARK, APLOG_ERR, ret, f->c, APLOGNO(10185) ++ "RemoteIPProxyProtocol header too long, " ++ "got %" APR_OFF_T_FMT " expected %" APR_OFF_T_FMT, ++ got, want); ++ f->c->aborted = 1; ++ return APR_ECONNABORTED; ++ } + } + if (APR_BRIGADE_EMPTY(ctx->bb)) { + return block == APR_NONBLOCK_READ ? APR_SUCCESS : APR_EOF; +@@ -1139,6 +1150,13 @@ static apr_status_t remoteip_input_filter(ap_filter_t *f, + if (ctx->rcvd >= MIN_V2_HDR_LEN) { + ctx->need = MIN_V2_HDR_LEN + + remoteip_get_v2_len((proxy_header *) ctx->header); ++ if (ctx->need > sizeof(proxy_v2)) { ++ ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, f->c, APLOGNO(10186) ++ "RemoteIPProxyProtocol protocol header length too long"); ++ f->c->aborted = 1; ++ apr_brigade_destroy(ctx->bb); ++ return APR_ECONNABORTED; ++ } + } + if (ctx->rcvd >= ctx->need) { + psts = remoteip_process_v2_header(f->c, conn_conf, diff --git a/SOURCES/httpd-2.4.37-CVE-2019-10098.patch b/SOURCES/httpd-2.4.37-CVE-2019-10098.patch new file mode 100644 index 0000000..c3a559f --- /dev/null +++ b/SOURCES/httpd-2.4.37-CVE-2019-10098.patch @@ -0,0 +1,91 @@ +diff --git a/include/ap_regex.h b/include/ap_regex.h +index 7d8df79..7af2f99 100644 +--- a/include/ap_regex.h ++++ b/include/ap_regex.h +@@ -84,7 +84,11 @@ extern "C" { + + #define AP_REG_DOLLAR_ENDONLY 0x200 /* '$' matches at end of subject string only */ + +-#define AP_REG_MATCH "MATCH_" /** suggested prefix for ap_regname */ ++#define AP_REG_NO_DEFAULT 0x400 /**< Don't implicitely add AP_REG_DEFAULT options */ ++ ++#define AP_REG_MATCH "MATCH_" /**< suggested prefix for ap_regname */ ++ ++#define AP_REG_DEFAULT (AP_REG_DOTALL|AP_REG_DOLLAR_ENDONLY) + + /* Error values: */ + enum { +diff --git a/modules/filters/mod_substitute.c b/modules/filters/mod_substitute.c +index b7d5296..e976c51 100644 +--- a/modules/filters/mod_substitute.c ++++ b/modules/filters/mod_substitute.c +@@ -667,8 +667,10 @@ static const char *set_pattern(cmd_parms *cmd, void *cfg, const char *line) + + /* first see if we can compile the regex */ + if (!is_pattern) { +- r = ap_pregcomp(cmd->pool, from, AP_REG_EXTENDED | +- (ignore_case ? AP_REG_ICASE : 0)); ++ int flags = AP_REG_NO_DEFAULT ++ | (ap_regcomp_get_default_cflags() & AP_REG_DOLLAR_ENDONLY) ++ | (ignore_case ? AP_REG_ICASE : 0); ++ r = ap_pregcomp(cmd->pool, from, flags); + if (!r) + return "Substitute could not compile regex"; + } +diff --git a/server/core.c b/server/core.c +index 76432ce..6d00777 100644 +--- a/server/core.c ++++ b/server/core.c +@@ -4973,7 +4973,7 @@ static int core_pre_config(apr_pool_t *pconf, apr_pool_t *plog, apr_pool_t *ptem + init_config_defines(pconf); + apr_pool_cleanup_register(pconf, NULL, reset_config, apr_pool_cleanup_null); + +- ap_regcomp_set_default_cflags(AP_REG_DOLLAR_ENDONLY); ++ ap_regcomp_set_default_cflags(AP_REG_DEFAULT); + + mpm_common_pre_config(pconf); + +diff --git a/server/util_pcre.c b/server/util_pcre.c +index f2cb1bb..2a665c8 100644 +--- a/server/util_pcre.c ++++ b/server/util_pcre.c +@@ -120,7 +120,7 @@ AP_DECLARE(void) ap_regfree(ap_regex_t *preg) + * Compile a regular expression * + *************************************************/ + +-static int default_cflags = AP_REG_DOLLAR_ENDONLY; ++static int default_cflags = AP_REG_DEFAULT; + + AP_DECLARE(int) ap_regcomp_get_default_cflags(void) + { +@@ -168,7 +168,8 @@ AP_DECLARE(int) ap_regcomp(ap_regex_t * preg, const char *pattern, int cflags) + int errcode = 0; + int options = PCRE_DUPNAMES; + +- cflags |= default_cflags; ++ if ((cflags & AP_REG_NO_DEFAULT) == 0) ++ cflags |= default_cflags; + if ((cflags & AP_REG_ICASE) != 0) + options |= PCRE_CASELESS; + if ((cflags & AP_REG_NEWLINE) != 0) +diff --git a/server/util_regex.c b/server/util_regex.c +index 2a30d68..5405f8d 100644 +--- a/server/util_regex.c ++++ b/server/util_regex.c +@@ -94,6 +94,7 @@ AP_DECLARE(ap_rxplus_t*) ap_rxplus_compile(apr_pool_t *pool, + } + + /* anything after the current delimiter is flags */ ++ ret->flags = ap_regcomp_get_default_cflags() & AP_REG_DOLLAR_ENDONLY; + while (*++endp) { + switch (*endp) { + case 'i': ret->flags |= AP_REG_ICASE; break; +@@ -106,7 +107,7 @@ AP_DECLARE(ap_rxplus_t*) ap_rxplus_compile(apr_pool_t *pool, + default: break; /* we should probably be stricter here */ + } + } +- if (ap_regcomp(&ret->rx, rxstr, ret->flags) == 0) { ++ if (ap_regcomp(&ret->rx, rxstr, AP_REG_NO_DEFAULT | ret->flags) == 0) { + apr_pool_cleanup_register(pool, &ret->rx, rxplus_cleanup, + apr_pool_cleanup_null); + } diff --git a/SOURCES/httpd-2.4.37-CVE-2020-1934.patch b/SOURCES/httpd-2.4.37-CVE-2020-1934.patch new file mode 100644 index 0000000..69088b9 --- /dev/null +++ b/SOURCES/httpd-2.4.37-CVE-2020-1934.patch @@ -0,0 +1,68 @@ +--- a/modules/proxy/mod_proxy_ftp.c 2020/02/07 17:01:07 1873744 ++++ b/modules/proxy/mod_proxy_ftp.c 2020/02/07 17:04:45 1873745 +@@ -218,7 +218,7 @@ + * (EBCDIC) machines either. + */ + static apr_status_t ftp_string_read(conn_rec *c, apr_bucket_brigade *bb, +- char *buff, apr_size_t bufflen, int *eos) ++ char *buff, apr_size_t bufflen, int *eos, apr_size_t *outlen) + { + apr_bucket *e; + apr_status_t rv; +@@ -230,6 +230,7 @@ + /* start with an empty string */ + buff[0] = 0; + *eos = 0; ++ *outlen = 0; + + /* loop through each brigade */ + while (!found) { +@@ -273,6 +274,7 @@ + if (len > 0) { + memcpy(pos, response, len); + pos += len; ++ *outlen += len; + } + } + apr_bucket_delete(e); +@@ -385,28 +387,36 @@ + char buff[5]; + char *mb = msgbuf, *me = &msgbuf[msglen]; + apr_status_t rv; ++ apr_size_t nread; ++ + int eos; + +- if (APR_SUCCESS != (rv = ftp_string_read(ftp_ctrl, bb, response, sizeof(response), &eos))) { ++ if (APR_SUCCESS != (rv = ftp_string_read(ftp_ctrl, bb, response, sizeof(response), &eos, &nread))) { + return -1; + } + /* + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, NULL, APLOGNO(03233) + "<%s", response); + */ ++ if (nread < 4) { ++ ap_log_error(APLOG_MARK, APLOG_INFO, 0, NULL, APLOGNO(10229) "Malformed FTP response '%s'", response); ++ *mb = '\0'; ++ return -1; ++ } ++ + if (!apr_isdigit(response[0]) || !apr_isdigit(response[1]) || +- !apr_isdigit(response[2]) || (response[3] != ' ' && response[3] != '-')) ++ !apr_isdigit(response[2]) || (response[3] != ' ' && response[3] != '-')) + status = 0; + else + status = 100 * response[0] + 10 * response[1] + response[2] - 111 * '0'; + + mb = apr_cpystrn(mb, response + 4, me - mb); + +- if (response[3] == '-') { ++ if (response[3] == '-') { /* multi-line reply "123-foo\nbar\n123 baz" */ + memcpy(buff, response, 3); + buff[3] = ' '; + do { +- if (APR_SUCCESS != (rv = ftp_string_read(ftp_ctrl, bb, response, sizeof(response), &eos))) { ++ if (APR_SUCCESS != (rv = ftp_string_read(ftp_ctrl, bb, response, sizeof(response), &eos, &nread))) { + return -1; + } + mb = apr_cpystrn(mb, response + (' ' == response[0] ? 1 : 4), me - mb); diff --git a/SOURCES/httpd-2.4.37-balancer-failover.patch b/SOURCES/httpd-2.4.37-balancer-failover.patch new file mode 100644 index 0000000..ca691f7 --- /dev/null +++ b/SOURCES/httpd-2.4.37-balancer-failover.patch @@ -0,0 +1,225 @@ +diff --git a/modules/proxy/mod_proxy_http.c b/modules/proxy/mod_proxy_http.c +index ec1e042..2c0500f 100644 +--- a/modules/proxy/mod_proxy_http.c ++++ b/modules/proxy/mod_proxy_http.c +@@ -310,16 +310,18 @@ static int stream_reqbody_read(proxy_http_req_t *req, apr_bucket_brigade *bb, + return OK; + } + +-static int stream_reqbody(proxy_http_req_t *req, rb_methods rb_method) ++static int stream_reqbody(proxy_http_req_t *req) + { + request_rec *r = req->r; + int seen_eos = 0, rv = OK; + apr_size_t hdr_len; + char chunk_hdr[20]; /* must be here due to transient bucket. */ ++ conn_rec *origin = req->origin; + proxy_conn_rec *p_conn = req->backend; + apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc; + apr_bucket_brigade *header_brigade = req->header_brigade; + apr_bucket_brigade *input_brigade = req->input_brigade; ++ rb_methods rb_method = req->rb_method; + apr_off_t bytes, bytes_streamed = 0; + apr_bucket *e; + +@@ -333,7 +335,7 @@ static int stream_reqbody(proxy_http_req_t *req, rb_methods rb_method) + } + + if (!APR_BRIGADE_EMPTY(input_brigade)) { +- /* If this brigade contains EOS, either stop or remove it. */ ++ /* If this brigade contains EOS, remove it and be done. */ + if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { + seen_eos = 1; + +@@ -375,7 +377,8 @@ static int stream_reqbody(proxy_http_req_t *req, rb_methods rb_method) + APR_BRIGADE_INSERT_TAIL(input_brigade, e); + } + } +- else if (bytes_streamed > req->cl_val) { ++ else if (rb_method == RB_STREAM_CL ++ && bytes_streamed > req->cl_val) { + /* C-L < bytes streamed?!? + * We will error out after the body is completely + * consumed, but we can't stream more bytes at the +@@ -407,7 +410,7 @@ static int stream_reqbody(proxy_http_req_t *req, rb_methods rb_method) + APR_BRIGADE_PREPEND(input_brigade, header_brigade); + + /* Flush here on EOS because we won't stream_reqbody_read() again */ +- rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, req->origin, ++ rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin, + input_brigade, seen_eos); + if (rv != OK) { + return rv; +@@ -454,10 +457,6 @@ static int spool_reqbody_cl(proxy_http_req_t *req, apr_off_t *bytes_spooled) + /* If this brigade contains EOS, either stop or remove it. */ + if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { + seen_eos = 1; +- +- /* We can't pass this EOS to the output_filters. */ +- e = APR_BRIGADE_LAST(input_brigade); +- apr_bucket_delete(e); + } + + apr_brigade_length(input_brigade, 1, &bytes); +@@ -644,7 +643,18 @@ static int ap_proxy_http_prefetch(proxy_http_req_t *req, + */ + temp_brigade = apr_brigade_create(p, bucket_alloc); + block = req->prefetch_nonblocking ? APR_NONBLOCK_READ : APR_BLOCK_READ; +- do { ++ ++ /* Account for saved input, if any. */ ++ apr_brigade_length(input_brigade, 0, &bytes_read); ++ ++ /* Ensure we don't hit a wall where we have a buffer too small ++ * for ap_get_brigade's filters to fetch us another bucket, ++ * surrender once we hit 80 bytes less than MAX_MEM_SPOOL ++ * (an arbitrary value). ++ */ ++ while (bytes_read < MAX_MEM_SPOOL - 80 ++ && (APR_BRIGADE_EMPTY(input_brigade) ++ || !APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade)))) { + status = ap_get_brigade(r->input_filters, temp_brigade, + AP_MODE_READBYTES, block, + MAX_MEM_SPOOL - bytes_read); +@@ -686,15 +696,7 @@ static int ap_proxy_http_prefetch(proxy_http_req_t *req, + c->client_ip, c->remote_host ? c->remote_host: ""); + return HTTP_INTERNAL_SERVER_ERROR; + } +- +- /* Ensure we don't hit a wall where we have a buffer too small +- * for ap_get_brigade's filters to fetch us another bucket, +- * surrender once we hit 80 bytes less than MAX_MEM_SPOOL +- * (an arbitrary value.) +- */ +- } while ((bytes_read < MAX_MEM_SPOOL - 80) +- && !APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade)) +- && !req->prefetch_nonblocking); ++ } + + /* Use chunked request body encoding or send a content-length body? + * +@@ -838,35 +840,21 @@ static int ap_proxy_http_request(proxy_http_req_t *req) + { + int rv; + request_rec *r = req->r; +- apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc; +- apr_bucket_brigade *header_brigade = req->header_brigade; +- apr_bucket_brigade *input_brigade = req->input_brigade; + + /* send the request header/body, if any. */ + switch (req->rb_method) { ++ case RB_SPOOL_CL: + case RB_STREAM_CL: + case RB_STREAM_CHUNKED: + if (req->do_100_continue) { +- rv = ap_proxy_pass_brigade(bucket_alloc, r, req->backend, +- req->origin, header_brigade, 1); ++ rv = ap_proxy_pass_brigade(req->bucket_alloc, r, req->backend, ++ req->origin, req->header_brigade, 1); + } + else { +- rv = stream_reqbody(req, req->rb_method); ++ rv = stream_reqbody(req); + } + break; + +- case RB_SPOOL_CL: +- /* Prefetch has built the header and spooled the whole body; +- * if we don't expect 100-continue we can flush both all at once, +- * otherwise flush the header only. +- */ +- if (!req->do_100_continue) { +- APR_BRIGADE_CONCAT(header_brigade, input_brigade); +- } +- rv = ap_proxy_pass_brigade(bucket_alloc, r, req->backend, +- req->origin, header_brigade, 1); +- break; +- + default: + /* shouldn't be possible */ + rv = HTTP_INTERNAL_SERVER_ERROR; +@@ -1577,15 +1565,10 @@ int ap_proxy_http_process_response(proxy_http_req_t *req) + + /* Send the request body (fully). */ + switch(req->rb_method) { ++ case RB_SPOOL_CL: + case RB_STREAM_CL: + case RB_STREAM_CHUNKED: +- status = stream_reqbody(req, req->rb_method); +- break; +- case RB_SPOOL_CL: +- /* Prefetch has spooled the whole body, flush it. */ +- status = ap_proxy_pass_brigade(req->bucket_alloc, r, +- backend, origin, +- req->input_brigade, 1); ++ status = stream_reqbody(req); + break; + default: + /* Shouldn't happen */ +@@ -1940,6 +1923,7 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker, + const char *u; + proxy_http_req_t *req = NULL; + proxy_conn_rec *backend = NULL; ++ apr_bucket_brigade *input_brigade = NULL; + int is_ssl = 0; + conn_rec *c = r->connection; + proxy_dir_conf *dconf; +@@ -2005,8 +1989,20 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker, + + dconf = ap_get_module_config(r->per_dir_config, &proxy_module); + ++ /* We possibly reuse input data prefetched in previous call(s), e.g. for a ++ * balancer fallback scenario, and in this case the 100 continue settings ++ * should be consistent between balancer members. If not, we need to ignore ++ * Proxy100Continue on=>off once we tried to prefetch already, otherwise ++ * the HTTP_IN filter won't send 100 Continue for us anymore, and we might ++ * deadlock with the client waiting for each other. Note that off=>on is ++ * not an issue because in this case r->expecting_100 is false (the 100 ++ * Continue is out already), but we make sure that prefetch will be ++ * nonblocking to avoid passing more time there. ++ */ ++ apr_pool_userdata_get((void **)&input_brigade, "proxy-req-input", p); ++ + /* Should we handle end-to-end or ping 100-continue? */ +- if ((r->expecting_100 && dconf->forward_100_continue) ++ if ((r->expecting_100 && (dconf->forward_100_continue || input_brigade)) + || PROXY_DO_100_CONTINUE(worker, r)) { + /* We need to reset r->expecting_100 or prefetching will cause + * ap_http_filter() to send "100 Continue" response by itself. So +@@ -2023,7 +2019,8 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker, + /* Should we block while prefetching the body or try nonblocking and flush + * data to the backend ASAP? + */ +- else if (apr_table_get(r->subprocess_env, "proxy-prefetch-nonblocking")) { ++ else if (input_brigade || apr_table_get(r->subprocess_env, ++ "proxy-prefetch-nonblocking")) { + req->prefetch_nonblocking = 1; + } + +@@ -2048,6 +2045,17 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker, + sizeof(req->server_portstr)))) + goto cleanup; + ++ /* The header is always (re-)built since it depends on worker settings, ++ * but the body can be fetched only once (even partially), so it's saved ++ * in between proxy_http_handler() calls should we come back here. ++ */ ++ req->header_brigade = apr_brigade_create(p, req->bucket_alloc); ++ if (input_brigade == NULL) { ++ input_brigade = apr_brigade_create(p, req->bucket_alloc); ++ apr_pool_userdata_setn(input_brigade, "proxy-req-input", NULL, p); ++ } ++ req->input_brigade = input_brigade; ++ + /* Prefetch (nonlocking) the request body so to increase the chance to get + * the whole (or enough) body and determine Content-Length vs chunked or + * spooled. By doing this before connecting or reusing the backend, we want +@@ -2058,8 +2066,6 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker, + * to reduce to the minimum the unavoidable local is_socket_connected() vs + * remote keepalive race condition. + */ +- req->input_brigade = apr_brigade_create(p, req->bucket_alloc); +- req->header_brigade = apr_brigade_create(p, req->bucket_alloc); + if ((status = ap_proxy_http_prefetch(req, uri, locurl)) != OK) + goto cleanup; + diff --git a/SOURCES/httpd-2.4.37-logjournal.patch b/SOURCES/httpd-2.4.37-logjournal.patch new file mode 100644 index 0000000..721911c --- /dev/null +++ b/SOURCES/httpd-2.4.37-logjournal.patch @@ -0,0 +1,87 @@ +diff --git a/modules/loggers/config.m4 b/modules/loggers/config.m4 +index 762e773e94..0848d2e377 100644 +--- a/modules/loggers/config.m4 ++++ b/modules/loggers/config.m4 +@@ -5,6 +5,8 @@ dnl APACHE_MODULE(name, helptext[, objects[, structname[, default[, config]]]]) + APACHE_MODPATH_INIT(loggers) + + APACHE_MODULE(log_config, logging configuration. You won't be able to log requests to the server without this module., , , yes) ++APR_ADDTO(MOD_LOG_CONFIG_LDADD, [$SYSTEMD_LIBS]) ++ + APACHE_MODULE(log_debug, configurable debug logging, , , most) + APACHE_MODULE(log_forensic, forensic logging) + +diff --git a/modules/loggers/mod_log_config.c b/modules/loggers/mod_log_config.c +index 996c09cf49..50a056a2f8 100644 +--- a/modules/loggers/mod_log_config.c ++++ b/modules/loggers/mod_log_config.c +@@ -172,6 +172,10 @@ + #include + #endif + ++#ifdef HAVE_SYSTEMD ++#include ++#endif ++ + #define DEFAULT_LOG_FORMAT "%h %l %u %t \"%r\" %>s %b" + + module AP_MODULE_DECLARE_DATA log_config_module; +@@ -1638,6 +1642,25 @@ static apr_status_t ap_default_log_writer( request_rec *r, + + return rv; + } ++ ++static apr_status_t wrap_journal_stream(apr_pool_t *p, apr_file_t **outfd, ++ int priority) ++{ ++#ifdef HAVE_SYSTEMD ++ int fd; ++ ++ fd = sd_journal_stream_fd("httpd", priority, 0); ++ if (fd < 0) return fd; ++ ++ /* This is an AF_UNIX socket fd so is more pipe-like than ++ * file-like (the fd is neither seekable or readable), and use of ++ * apr_os_pipe_put_ex() allows cleanup registration. */ ++ return apr_os_pipe_put_ex(outfd, &fd, 1, p); ++#else ++ return APR_ENOTIMPL; ++#endif ++} ++ + static void *ap_default_log_writer_init(apr_pool_t *p, server_rec *s, + const char* name) + { +@@ -1650,6 +1673,32 @@ static void *ap_default_log_writer_init(apr_pool_t *p, server_rec *s, + } + return ap_piped_log_write_fd(pl); + } ++ else if (strncasecmp(name, "journald:", 9) == 0) { ++ int priority; ++ const char *err = ap_parse_log_level(name + 9, &priority); ++ apr_status_t rv; ++ apr_file_t *fd; ++ ++ if (err == NULL && priority > LOG_DEBUG) { ++ err = "TRACE level debugging not supported with journald"; ++ } ++ ++ if (err) { ++ ap_log_error(APLOG_MARK, APLOG_ERR, APR_EBADPATH, s, ++ "invalid journald log priority name %s: %s", ++ name, err); ++ return NULL; ++ } ++ ++ rv = wrap_journal_stream(p, &fd, priority); ++ if (rv) { ++ ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, ++ "could not open journald log stream"); ++ return NULL; ++ } ++ ++ return fd; ++ } + else { + const char *fname = ap_server_root_relative(p, name); + apr_file_t *fd; diff --git a/SOURCES/httpd-2.4.37-proxy-continue.patch b/SOURCES/httpd-2.4.37-proxy-continue.patch new file mode 100644 index 0000000..932b043 --- /dev/null +++ b/SOURCES/httpd-2.4.37-proxy-continue.patch @@ -0,0 +1,1713 @@ +diff --git a/modules/proxy/mod_proxy.c b/modules/proxy/mod_proxy.c +index de48735..d13c249 100644 +--- a/modules/proxy/mod_proxy.c ++++ b/modules/proxy/mod_proxy.c +@@ -1574,6 +1574,8 @@ static void *create_proxy_dir_config(apr_pool_t *p, char *dummy) + new->error_override_set = 0; + new->add_forwarded_headers = 1; + new->add_forwarded_headers_set = 0; ++ new->forward_100_continue = 1; ++ new->forward_100_continue_set = 0; + + return (void *) new; + } +@@ -1610,6 +1612,11 @@ static void *merge_proxy_dir_config(apr_pool_t *p, void *basev, void *addv) + : add->add_forwarded_headers; + new->add_forwarded_headers_set = add->add_forwarded_headers_set + || base->add_forwarded_headers_set; ++ new->forward_100_continue = ++ (add->forward_100_continue_set == 0) ? base->forward_100_continue ++ : add->forward_100_continue; ++ new->forward_100_continue_set = add->forward_100_continue_set ++ || base->forward_100_continue_set; + + return new; + } +@@ -2110,6 +2117,14 @@ static const char * + conf->preserve_host_set = 1; + return NULL; + } ++static const char * ++ forward_100_continue(cmd_parms *parms, void *dconf, int flag) ++{ ++ proxy_dir_conf *conf = dconf; ++ conf->forward_100_continue = flag; ++ conf->forward_100_continue_set = 1; ++ return NULL; ++} + + static const char * + set_recv_buffer_size(cmd_parms *parms, void *dummy, const char *arg) +@@ -2683,6 +2698,9 @@ static const command_rec proxy_cmds[] = + "Configure local source IP used for request forward"), + AP_INIT_FLAG("ProxyAddHeaders", add_proxy_http_headers, NULL, RSRC_CONF|ACCESS_CONF, + "on if X-Forwarded-* headers should be added or completed"), ++ AP_INIT_FLAG("Proxy100Continue", forward_100_continue, NULL, RSRC_CONF|ACCESS_CONF, ++ "on if 100-Continue should be forwarded to the origin server, off if the " ++ "proxy should handle it by itself"), + {NULL} + }; + +diff --git a/modules/proxy/mod_proxy.h b/modules/proxy/mod_proxy.h +index 3419023..288c5d4 100644 +--- a/modules/proxy/mod_proxy.h ++++ b/modules/proxy/mod_proxy.h +@@ -240,6 +240,8 @@ typedef struct { + /** Named back references */ + apr_array_header_t *refs; + ++ unsigned int forward_100_continue:1; ++ unsigned int forward_100_continue_set:1; + } proxy_dir_conf; + + /* if we interpolate env vars per-request, we'll need a per-request +@@ -380,6 +382,12 @@ do { \ + (w)->s->io_buffer_size_set = (c)->io_buffer_size_set; \ + } while (0) + ++#define PROXY_DO_100_CONTINUE(w, r) \ ++((w)->s->ping_timeout_set \ ++ && (PROXYREQ_REVERSE == (r)->proxyreq) \ ++ && !(apr_table_get((r)->subprocess_env, "force-proxy-request-1.0")) \ ++ && ap_request_has_body((r))) ++ + /* use 2 hashes */ + typedef struct { + unsigned int def; +diff --git a/modules/proxy/mod_proxy_ftp.c b/modules/proxy/mod_proxy_ftp.c +index 8f6f853..8d66b4a 100644 +--- a/modules/proxy/mod_proxy_ftp.c ++++ b/modules/proxy/mod_proxy_ftp.c +@@ -1181,12 +1181,10 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker, + return HTTP_SERVICE_UNAVAILABLE; + } + +- if (!backend->connection) { +- status = ap_proxy_connection_create_ex("FTP", backend, r); +- if (status != OK) { +- proxy_ftp_cleanup(r, backend); +- return status; +- } ++ status = ap_proxy_connection_create_ex("FTP", backend, r); ++ if (status != OK) { ++ proxy_ftp_cleanup(r, backend); ++ return status; + } + + /* Use old naming */ +diff --git a/modules/proxy/mod_proxy_hcheck.c b/modules/proxy/mod_proxy_hcheck.c +index 2783a58..dd8e407 100644 +--- a/modules/proxy/mod_proxy_hcheck.c ++++ b/modules/proxy/mod_proxy_hcheck.c +@@ -762,10 +762,8 @@ static apr_status_t hc_check_http(baton_t *baton) + } + + r = create_request_rec(ptemp, ctx->s, baton->balancer, wctx->method); +- if (!backend->connection) { +- if ((status = ap_proxy_connection_create_ex("HCOH", backend, r)) != OK) { +- return backend_cleanup("HCOH", backend, ctx->s, status); +- } ++ if ((status = ap_proxy_connection_create_ex("HCOH", backend, r)) != OK) { ++ return backend_cleanup("HCOH", backend, ctx->s, status); + } + set_request_connection(r, backend->connection); + +diff --git a/modules/proxy/mod_proxy_http.c b/modules/proxy/mod_proxy_http.c +index 56af9a8..f007ad6 100644 +--- a/modules/proxy/mod_proxy_http.c ++++ b/modules/proxy/mod_proxy_http.c +@@ -216,8 +216,12 @@ static void add_cl(apr_pool_t *p, + APR_BRIGADE_INSERT_TAIL(header_brigade, e); + } + +-#define ASCII_CRLF "\015\012" +-#define ASCII_ZERO "\060" ++#ifndef CRLF_ASCII ++#define CRLF_ASCII "\015\012" ++#endif ++#ifndef ZERO_ASCII ++#define ZERO_ASCII "\060" ++#endif + + static void terminate_headers(apr_bucket_alloc_t *bucket_alloc, + apr_bucket_brigade *header_brigade) +@@ -225,304 +229,228 @@ static void terminate_headers(apr_bucket_alloc_t *bucket_alloc, + apr_bucket *e; + + /* add empty line at the end of the headers */ +- e = apr_bucket_immortal_create(ASCII_CRLF, 2, bucket_alloc); ++ e = apr_bucket_immortal_create(CRLF_ASCII, 2, bucket_alloc); + APR_BRIGADE_INSERT_TAIL(header_brigade, e); + } + + + #define MAX_MEM_SPOOL 16384 + +-static int stream_reqbody_chunked(apr_pool_t *p, +- request_rec *r, +- proxy_conn_rec *p_conn, +- conn_rec *origin, +- apr_bucket_brigade *header_brigade, +- apr_bucket_brigade *input_brigade) +-{ +- int seen_eos = 0, rv = OK; +- apr_size_t hdr_len; +- apr_off_t bytes; +- apr_status_t status; +- apr_bucket_alloc_t *bucket_alloc = r->connection->bucket_alloc; +- apr_bucket_brigade *bb; +- apr_bucket *e; +- +- add_te_chunked(p, bucket_alloc, header_brigade); +- terminate_headers(bucket_alloc, header_brigade); ++typedef enum { ++ RB_INIT = 0, ++ RB_STREAM_CL, ++ RB_STREAM_CHUNKED, ++ RB_SPOOL_CL ++} rb_methods; + +- while (!APR_BUCKET_IS_EOS(APR_BRIGADE_FIRST(input_brigade))) +- { +- char chunk_hdr[20]; /* must be here due to transient bucket. */ ++typedef struct { ++ apr_pool_t *p; ++ request_rec *r; ++ proxy_worker *worker; ++ proxy_server_conf *sconf; + +- /* If this brigade contains EOS, either stop or remove it. */ +- if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { +- seen_eos = 1; +- +- /* We can't pass this EOS to the output_filters. */ +- e = APR_BRIGADE_LAST(input_brigade); +- apr_bucket_delete(e); +- } +- +- apr_brigade_length(input_brigade, 1, &bytes); ++ char server_portstr[32]; ++ proxy_conn_rec *backend; ++ conn_rec *origin; + +- hdr_len = apr_snprintf(chunk_hdr, sizeof(chunk_hdr), +- "%" APR_UINT64_T_HEX_FMT CRLF, +- (apr_uint64_t)bytes); ++ apr_bucket_alloc_t *bucket_alloc; ++ apr_bucket_brigade *header_brigade; ++ apr_bucket_brigade *input_brigade; ++ char *old_cl_val, *old_te_val; ++ apr_off_t cl_val; + +- ap_xlate_proto_to_ascii(chunk_hdr, hdr_len); +- e = apr_bucket_transient_create(chunk_hdr, hdr_len, +- bucket_alloc); +- APR_BRIGADE_INSERT_HEAD(input_brigade, e); ++ rb_methods rb_method; + +- /* +- * Append the end-of-chunk CRLF +- */ +- e = apr_bucket_immortal_create(ASCII_CRLF, 2, bucket_alloc); +- APR_BRIGADE_INSERT_TAIL(input_brigade, e); ++ int expecting_100; ++ unsigned int do_100_continue:1, ++ prefetch_nonblocking:1; ++} proxy_http_req_t; + +- if (header_brigade) { +- /* we never sent the header brigade, so go ahead and +- * take care of that now +- */ +- bb = header_brigade; +- +- /* +- * Save input_brigade in bb brigade. (At least) in the SSL case +- * input_brigade contains transient buckets whose data would get +- * overwritten during the next call of ap_get_brigade in the loop. +- * ap_save_brigade ensures these buckets to be set aside. +- * Calling ap_save_brigade with NULL as filter is OK, because +- * bb brigade already has been created and does not need to get +- * created by ap_save_brigade. +- */ +- status = ap_save_brigade(NULL, &bb, &input_brigade, p); +- if (status != APR_SUCCESS) { +- return HTTP_INTERNAL_SERVER_ERROR; +- } +- +- header_brigade = NULL; +- } +- else { +- bb = input_brigade; ++/* Read what's in the client pipe. If nonblocking is set and read is EAGAIN, ++ * pass a FLUSH bucket to the backend and read again in blocking mode. ++ */ ++static int stream_reqbody_read(proxy_http_req_t *req, apr_bucket_brigade *bb, ++ int nonblocking) ++{ ++ request_rec *r = req->r; ++ proxy_conn_rec *p_conn = req->backend; ++ apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc; ++ apr_read_type_e block = nonblocking ? APR_NONBLOCK_READ : APR_BLOCK_READ; ++ apr_status_t status; ++ int rv; ++ ++ for (;;) { ++ status = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES, ++ block, HUGE_STRING_LEN); ++ if (block == APR_BLOCK_READ ++ || (!APR_STATUS_IS_EAGAIN(status) ++ && (status != APR_SUCCESS || !APR_BRIGADE_EMPTY(bb)))) { ++ break; + } + +- /* The request is flushed below this loop with chunk EOS header */ +- rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin, bb, 0); ++ /* Flush and retry (blocking) */ ++ apr_brigade_cleanup(bb); ++ rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, req->origin, bb, 1); + if (rv != OK) { + return rv; + } +- +- if (seen_eos) { +- break; +- } +- +- status = ap_get_brigade(r->input_filters, input_brigade, +- AP_MODE_READBYTES, APR_BLOCK_READ, +- HUGE_STRING_LEN); +- +- if (status != APR_SUCCESS) { +- conn_rec *c = r->connection; +- ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02608) +- "read request body failed to %pI (%s)" +- " from %s (%s)", p_conn->addr, +- p_conn->hostname ? p_conn->hostname: "", +- c->client_ip, c->remote_host ? c->remote_host: ""); +- return ap_map_http_request_error(status, HTTP_BAD_REQUEST); +- } ++ block = APR_BLOCK_READ; + } + +- if (header_brigade) { +- /* we never sent the header brigade because there was no request body; +- * send it now +- */ +- bb = header_brigade; +- } +- else { +- if (!APR_BRIGADE_EMPTY(input_brigade)) { +- /* input brigade still has an EOS which we can't pass to the output_filters. */ +- e = APR_BRIGADE_LAST(input_brigade); +- AP_DEBUG_ASSERT(APR_BUCKET_IS_EOS(e)); +- apr_bucket_delete(e); +- } +- bb = input_brigade; +- } +- +- e = apr_bucket_immortal_create(ASCII_ZERO ASCII_CRLF +- /* */ +- ASCII_CRLF, +- 5, bucket_alloc); +- APR_BRIGADE_INSERT_TAIL(bb, e); +- +- if (apr_table_get(r->subprocess_env, "proxy-sendextracrlf")) { +- e = apr_bucket_immortal_create(ASCII_CRLF, 2, bucket_alloc); +- APR_BRIGADE_INSERT_TAIL(bb, e); ++ if (status != APR_SUCCESS) { ++ conn_rec *c = r->connection; ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02608) ++ "read request body failed to %pI (%s)" ++ " from %s (%s)", p_conn->addr, ++ p_conn->hostname ? p_conn->hostname: "", ++ c->client_ip, c->remote_host ? c->remote_host: ""); ++ return ap_map_http_request_error(status, HTTP_BAD_REQUEST); + } + +- /* Now we have headers-only, or the chunk EOS mark; flush it */ +- rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin, bb, 1); +- return rv; ++ return OK; + } + +-static int stream_reqbody_cl(apr_pool_t *p, +- request_rec *r, +- proxy_conn_rec *p_conn, +- conn_rec *origin, +- apr_bucket_brigade *header_brigade, +- apr_bucket_brigade *input_brigade, +- char *old_cl_val) ++static int stream_reqbody(proxy_http_req_t *req, rb_methods rb_method) + { +- int seen_eos = 0, rv = 0; +- apr_status_t status = APR_SUCCESS; +- apr_bucket_alloc_t *bucket_alloc = r->connection->bucket_alloc; +- apr_bucket_brigade *bb; ++ request_rec *r = req->r; ++ int seen_eos = 0, rv = OK; ++ apr_size_t hdr_len; ++ char chunk_hdr[20]; /* must be here due to transient bucket. */ ++ proxy_conn_rec *p_conn = req->backend; ++ apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc; ++ apr_bucket_brigade *header_brigade = req->header_brigade; ++ apr_bucket_brigade *input_brigade = req->input_brigade; ++ apr_off_t bytes, bytes_streamed = 0; + apr_bucket *e; +- apr_off_t cl_val = 0; +- apr_off_t bytes; +- apr_off_t bytes_streamed = 0; +- +- if (old_cl_val) { +- char *endstr; + +- add_cl(p, bucket_alloc, header_brigade, old_cl_val); +- status = apr_strtoff(&cl_val, old_cl_val, &endstr, 10); +- +- if (status || *endstr || endstr == old_cl_val || cl_val < 0) { +- ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01085) +- "could not parse request Content-Length (%s)", +- old_cl_val); +- return HTTP_BAD_REQUEST; ++ do { ++ if (APR_BRIGADE_EMPTY(input_brigade) ++ && APR_BRIGADE_EMPTY(header_brigade)) { ++ rv = stream_reqbody_read(req, input_brigade, 1); ++ if (rv != OK) { ++ return rv; ++ } + } +- } +- terminate_headers(bucket_alloc, header_brigade); +- +- while (!APR_BUCKET_IS_EOS(APR_BRIGADE_FIRST(input_brigade))) +- { +- apr_brigade_length(input_brigade, 1, &bytes); +- bytes_streamed += bytes; +- +- /* If this brigade contains EOS, either stop or remove it. */ +- if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { +- seen_eos = 1; + +- /* We can't pass this EOS to the output_filters. */ +- e = APR_BRIGADE_LAST(input_brigade); +- apr_bucket_delete(e); ++ if (!APR_BRIGADE_EMPTY(input_brigade)) { ++ /* If this brigade contains EOS, either stop or remove it. */ ++ if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { ++ seen_eos = 1; + +- if (apr_table_get(r->subprocess_env, "proxy-sendextracrlf")) { +- e = apr_bucket_immortal_create(ASCII_CRLF, 2, bucket_alloc); +- APR_BRIGADE_INSERT_TAIL(input_brigade, e); ++ /* We can't pass this EOS to the output_filters. */ ++ e = APR_BRIGADE_LAST(input_brigade); ++ apr_bucket_delete(e); + } +- } + +- /* C-L < bytes streamed?!? +- * We will error out after the body is completely +- * consumed, but we can't stream more bytes at the +- * back end since they would in part be interpreted +- * as another request! If nothing is sent, then +- * just send nothing. +- * +- * Prevents HTTP Response Splitting. +- */ +- if (bytes_streamed > cl_val) { +- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01086) +- "read more bytes of request body than expected " +- "(got %" APR_OFF_T_FMT ", expected %" APR_OFF_T_FMT ")", +- bytes_streamed, cl_val); +- return HTTP_INTERNAL_SERVER_ERROR; +- } +- +- if (header_brigade) { +- /* we never sent the header brigade, so go ahead and +- * take care of that now +- */ +- bb = header_brigade; ++ apr_brigade_length(input_brigade, 1, &bytes); ++ bytes_streamed += bytes; + +- /* +- * Save input_brigade in bb brigade. (At least) in the SSL case +- * input_brigade contains transient buckets whose data would get +- * overwritten during the next call of ap_get_brigade in the loop. +- * ap_save_brigade ensures these buckets to be set aside. +- * Calling ap_save_brigade with NULL as filter is OK, because +- * bb brigade already has been created and does not need to get +- * created by ap_save_brigade. +- */ +- status = ap_save_brigade(NULL, &bb, &input_brigade, p); +- if (status != APR_SUCCESS) { ++ if (rb_method == RB_STREAM_CHUNKED) { ++ if (bytes) { ++ /* ++ * Prepend the size of the chunk ++ */ ++ hdr_len = apr_snprintf(chunk_hdr, sizeof(chunk_hdr), ++ "%" APR_UINT64_T_HEX_FMT CRLF, ++ (apr_uint64_t)bytes); ++ ap_xlate_proto_to_ascii(chunk_hdr, hdr_len); ++ e = apr_bucket_transient_create(chunk_hdr, hdr_len, ++ bucket_alloc); ++ APR_BRIGADE_INSERT_HEAD(input_brigade, e); ++ ++ /* ++ * Append the end-of-chunk CRLF ++ */ ++ e = apr_bucket_immortal_create(CRLF_ASCII, 2, bucket_alloc); ++ APR_BRIGADE_INSERT_TAIL(input_brigade, e); ++ } ++ if (seen_eos) { ++ /* ++ * Append the tailing 0-size chunk ++ */ ++ e = apr_bucket_immortal_create(ZERO_ASCII CRLF_ASCII ++ /* */ ++ CRLF_ASCII, ++ 5, bucket_alloc); ++ APR_BRIGADE_INSERT_TAIL(input_brigade, e); ++ } ++ } ++ else if (bytes_streamed > req->cl_val) { ++ /* C-L < bytes streamed?!? ++ * We will error out after the body is completely ++ * consumed, but we can't stream more bytes at the ++ * back end since they would in part be interpreted ++ * as another request! If nothing is sent, then ++ * just send nothing. ++ * ++ * Prevents HTTP Response Splitting. ++ */ ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01086) ++ "read more bytes of request body than expected " ++ "(got %" APR_OFF_T_FMT ", expected " ++ "%" APR_OFF_T_FMT ")", ++ bytes_streamed, req->cl_val); + return HTTP_INTERNAL_SERVER_ERROR; + } + +- header_brigade = NULL; +- } +- else { +- bb = input_brigade; +- } +- +- /* Once we hit EOS, we are ready to flush. */ +- rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin, bb, seen_eos); +- if (rv != OK) { +- return rv ; +- } +- +- if (seen_eos) { +- break; ++ if (seen_eos && apr_table_get(r->subprocess_env, ++ "proxy-sendextracrlf")) { ++ e = apr_bucket_immortal_create(CRLF_ASCII, 2, bucket_alloc); ++ APR_BRIGADE_INSERT_TAIL(input_brigade, e); ++ } + } + +- status = ap_get_brigade(r->input_filters, input_brigade, +- AP_MODE_READBYTES, APR_BLOCK_READ, +- HUGE_STRING_LEN); ++ /* If we never sent the header brigade, go ahead and take care of ++ * that now by prepending it (once only since header_brigade will be ++ * empty afterward). ++ */ ++ APR_BRIGADE_PREPEND(input_brigade, header_brigade); + +- if (status != APR_SUCCESS) { +- conn_rec *c = r->connection; +- ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02609) +- "read request body failed to %pI (%s)" +- " from %s (%s)", p_conn->addr, +- p_conn->hostname ? p_conn->hostname: "", +- c->client_ip, c->remote_host ? c->remote_host: ""); +- return ap_map_http_request_error(status, HTTP_BAD_REQUEST); ++ /* Flush here on EOS because we won't stream_reqbody_read() again */ ++ rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, req->origin, ++ input_brigade, seen_eos); ++ if (rv != OK) { ++ return rv; + } +- } ++ } while (!seen_eos); + +- if (bytes_streamed != cl_val) { ++ if (rb_method == RB_STREAM_CL && bytes_streamed != req->cl_val) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01087) + "client %s given Content-Length did not match" + " number of body bytes read", r->connection->client_ip); + return HTTP_BAD_REQUEST; + } + +- if (header_brigade) { +- /* we never sent the header brigade since there was no request +- * body; send it now with the flush flag +- */ +- bb = header_brigade; +- return(ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin, bb, 1)); +- } +- + return OK; + } + +-static int spool_reqbody_cl(apr_pool_t *p, +- request_rec *r, +- proxy_conn_rec *p_conn, +- conn_rec *origin, +- apr_bucket_brigade *header_brigade, +- apr_bucket_brigade *input_brigade, +- int force_cl) ++static int spool_reqbody_cl(proxy_http_req_t *req, apr_off_t *bytes_spooled) + { +- int seen_eos = 0; +- apr_status_t status; +- apr_bucket_alloc_t *bucket_alloc = r->connection->bucket_alloc; ++ apr_pool_t *p = req->p; ++ request_rec *r = req->r; ++ int seen_eos = 0, rv = OK; ++ apr_status_t status = APR_SUCCESS; ++ apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc; ++ apr_bucket_brigade *input_brigade = req->input_brigade; + apr_bucket_brigade *body_brigade; + apr_bucket *e; +- apr_off_t bytes, bytes_spooled = 0, fsize = 0; ++ apr_off_t bytes, fsize = 0; + apr_file_t *tmpfile = NULL; + apr_off_t limit; + + body_brigade = apr_brigade_create(p, bucket_alloc); ++ *bytes_spooled = 0; + + limit = ap_get_limit_req_body(r); + +- while (!APR_BUCKET_IS_EOS(APR_BRIGADE_FIRST(input_brigade))) +- { ++ do { ++ if (APR_BRIGADE_EMPTY(input_brigade)) { ++ rv = stream_reqbody_read(req, input_brigade, 0); ++ if (rv != OK) { ++ return rv; ++ } ++ } ++ + /* If this brigade contains EOS, either stop or remove it. */ + if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { + seen_eos = 1; +@@ -534,13 +462,13 @@ static int spool_reqbody_cl(apr_pool_t *p, + + apr_brigade_length(input_brigade, 1, &bytes); + +- if (bytes_spooled + bytes > MAX_MEM_SPOOL) { ++ if (*bytes_spooled + bytes > MAX_MEM_SPOOL) { + /* + * LimitRequestBody does not affect Proxy requests (Should it?). + * Let it take effect if we decide to store the body in a + * temporary file on disk. + */ +- if (limit && (bytes_spooled + bytes > limit)) { ++ if (limit && (*bytes_spooled + bytes > limit)) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01088) + "Request body is larger than the configured " + "limit of %" APR_OFF_T_FMT, limit); +@@ -610,69 +538,42 @@ static int spool_reqbody_cl(apr_pool_t *p, + + } + +- bytes_spooled += bytes; +- +- if (seen_eos) { +- break; +- } +- +- status = ap_get_brigade(r->input_filters, input_brigade, +- AP_MODE_READBYTES, APR_BLOCK_READ, +- HUGE_STRING_LEN); +- +- if (status != APR_SUCCESS) { +- conn_rec *c = r->connection; +- ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02610) +- "read request body failed to %pI (%s)" +- " from %s (%s)", p_conn->addr, +- p_conn->hostname ? p_conn->hostname: "", +- c->client_ip, c->remote_host ? c->remote_host: ""); +- return ap_map_http_request_error(status, HTTP_BAD_REQUEST); +- } +- } ++ *bytes_spooled += bytes; ++ } while (!seen_eos); + +- if (bytes_spooled || force_cl) { +- add_cl(p, bucket_alloc, header_brigade, apr_off_t_toa(p, bytes_spooled)); +- } +- terminate_headers(bucket_alloc, header_brigade); +- APR_BRIGADE_CONCAT(header_brigade, body_brigade); ++ APR_BRIGADE_CONCAT(input_brigade, body_brigade); + if (tmpfile) { +- apr_brigade_insert_file(header_brigade, tmpfile, 0, fsize, p); ++ apr_brigade_insert_file(input_brigade, tmpfile, 0, fsize, p); + } + if (apr_table_get(r->subprocess_env, "proxy-sendextracrlf")) { +- e = apr_bucket_immortal_create(ASCII_CRLF, 2, bucket_alloc); +- APR_BRIGADE_INSERT_TAIL(header_brigade, e); ++ e = apr_bucket_immortal_create(CRLF_ASCII, 2, bucket_alloc); ++ APR_BRIGADE_INSERT_TAIL(input_brigade, e); + } +- /* This is all a single brigade, pass with flush flagged */ +- return(ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin, header_brigade, 1)); ++ return OK; + } + +-static +-int ap_proxy_http_request(apr_pool_t *p, request_rec *r, +- proxy_conn_rec *p_conn, proxy_worker *worker, +- proxy_server_conf *conf, +- apr_uri_t *uri, +- char *url, char *server_portstr) ++static int ap_proxy_http_prefetch(proxy_http_req_t *req, ++ apr_uri_t *uri, char *url) + { ++ apr_pool_t *p = req->p; ++ request_rec *r = req->r; + conn_rec *c = r->connection; +- apr_bucket_alloc_t *bucket_alloc = c->bucket_alloc; +- apr_bucket_brigade *header_brigade; +- apr_bucket_brigade *input_brigade; ++ proxy_conn_rec *p_conn = req->backend; ++ apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc; ++ apr_bucket_brigade *header_brigade = req->header_brigade; ++ apr_bucket_brigade *input_brigade = req->input_brigade; + apr_bucket_brigade *temp_brigade; + apr_bucket *e; + char *buf; + apr_status_t status; +- enum rb_methods {RB_INIT, RB_STREAM_CL, RB_STREAM_CHUNKED, RB_SPOOL_CL}; +- enum rb_methods rb_method = RB_INIT; +- char *old_cl_val = NULL; +- char *old_te_val = NULL; + apr_off_t bytes_read = 0; + apr_off_t bytes; + int force10, rv; ++ apr_read_type_e block; + conn_rec *origin = p_conn->connection; + + if (apr_table_get(r->subprocess_env, "force-proxy-request-1.0")) { +- if (r->expecting_100) { ++ if (req->expecting_100) { + return HTTP_EXPECTATION_FAILED; + } + force10 = 1; +@@ -680,17 +581,14 @@ int ap_proxy_http_request(apr_pool_t *p, request_rec *r, + force10 = 0; + } + +- header_brigade = apr_brigade_create(p, bucket_alloc); + rv = ap_proxy_create_hdrbrgd(p, header_brigade, r, p_conn, +- worker, conf, uri, url, server_portstr, +- &old_cl_val, &old_te_val); ++ req->worker, req->sconf, ++ uri, url, req->server_portstr, ++ &req->old_cl_val, &req->old_te_val); + if (rv != OK) { + return rv; + } + +- /* We have headers, let's figure out our request body... */ +- input_brigade = apr_brigade_create(p, bucket_alloc); +- + /* sub-requests never use keepalives, and mustn't pass request bodies. + * Because the new logic looks at input_brigade, we will self-terminate + * input_brigade and jump past all of the request body logic... +@@ -703,9 +601,9 @@ int ap_proxy_http_request(apr_pool_t *p, request_rec *r, + if (!r->kept_body && r->main) { + /* XXX: Why DON'T sub-requests use keepalives? */ + p_conn->close = 1; +- old_cl_val = NULL; +- old_te_val = NULL; +- rb_method = RB_STREAM_CL; ++ req->old_te_val = NULL; ++ req->old_cl_val = NULL; ++ req->rb_method = RB_STREAM_CL; + e = apr_bucket_eos_create(input_brigade->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(input_brigade, e); + goto skip_body; +@@ -719,18 +617,19 @@ int ap_proxy_http_request(apr_pool_t *p, request_rec *r, + * encoding has been done by the extensions' handler, and + * do not modify add_te_chunked's logic + */ +- if (old_te_val && strcasecmp(old_te_val, "chunked") != 0) { ++ if (req->old_te_val && ap_cstr_casecmp(req->old_te_val, "chunked") != 0) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01093) +- "%s Transfer-Encoding is not supported", old_te_val); ++ "%s Transfer-Encoding is not supported", ++ req->old_te_val); + return HTTP_INTERNAL_SERVER_ERROR; + } + +- if (old_cl_val && old_te_val) { ++ if (req->old_cl_val && req->old_te_val) { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01094) + "client %s (%s) requested Transfer-Encoding " + "chunked body with Content-Length (C-L ignored)", + c->client_ip, c->remote_host ? c->remote_host: ""); +- old_cl_val = NULL; ++ req->old_cl_val = NULL; + origin->keepalive = AP_CONN_CLOSE; + p_conn->close = 1; + } +@@ -744,10 +643,19 @@ int ap_proxy_http_request(apr_pool_t *p, request_rec *r, + * reasonable size. + */ + temp_brigade = apr_brigade_create(p, bucket_alloc); ++ block = req->prefetch_nonblocking ? APR_NONBLOCK_READ : APR_BLOCK_READ; + do { + status = ap_get_brigade(r->input_filters, temp_brigade, +- AP_MODE_READBYTES, APR_BLOCK_READ, ++ AP_MODE_READBYTES, block, + MAX_MEM_SPOOL - bytes_read); ++ /* ap_get_brigade may return success with an empty brigade ++ * for a non-blocking read which would block ++ */ ++ if (block == APR_NONBLOCK_READ ++ && ((status == APR_SUCCESS && APR_BRIGADE_EMPTY(temp_brigade)) ++ || APR_STATUS_IS_EAGAIN(status))) { ++ break; ++ } + if (status != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01095) + "prefetch request body failed to %pI (%s)" +@@ -785,7 +693,8 @@ int ap_proxy_http_request(apr_pool_t *p, request_rec *r, + * (an arbitrary value.) + */ + } while ((bytes_read < MAX_MEM_SPOOL - 80) +- && !APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))); ++ && !APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade)) ++ && !req->prefetch_nonblocking); + + /* Use chunked request body encoding or send a content-length body? + * +@@ -822,7 +731,8 @@ int ap_proxy_http_request(apr_pool_t *p, request_rec *r, + * is absent, and the filters are unchanged (the body won't + * be resized by another content filter). + */ +- if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { ++ if (!APR_BRIGADE_EMPTY(input_brigade) ++ && APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { + /* The whole thing fit, so our decision is trivial, use + * the filtered bytes read from the client for the request + * body Content-Length. +@@ -830,34 +740,43 @@ int ap_proxy_http_request(apr_pool_t *p, request_rec *r, + * If we expected no body, and read no body, do not set + * the Content-Length. + */ +- if (old_cl_val || old_te_val || bytes_read) { +- old_cl_val = apr_off_t_toa(r->pool, bytes_read); ++ if (req->old_cl_val || req->old_te_val || bytes_read) { ++ req->old_cl_val = apr_off_t_toa(r->pool, bytes_read); ++ req->cl_val = bytes_read; + } +- rb_method = RB_STREAM_CL; ++ req->rb_method = RB_STREAM_CL; + } +- else if (old_te_val) { ++ else if (req->old_te_val) { + if (force10 + || (apr_table_get(r->subprocess_env, "proxy-sendcl") + && !apr_table_get(r->subprocess_env, "proxy-sendchunks") + && !apr_table_get(r->subprocess_env, "proxy-sendchunked"))) { +- rb_method = RB_SPOOL_CL; ++ req->rb_method = RB_SPOOL_CL; + } + else { +- rb_method = RB_STREAM_CHUNKED; ++ req->rb_method = RB_STREAM_CHUNKED; + } + } +- else if (old_cl_val) { ++ else if (req->old_cl_val) { + if (r->input_filters == r->proto_input_filters) { +- rb_method = RB_STREAM_CL; ++ char *endstr; ++ status = apr_strtoff(&req->cl_val, req->old_cl_val, &endstr, 10); ++ if (status != APR_SUCCESS || *endstr || req->cl_val < 0) { ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01085) ++ "could not parse request Content-Length (%s)", ++ req->old_cl_val); ++ return HTTP_BAD_REQUEST; ++ } ++ req->rb_method = RB_STREAM_CL; + } + else if (!force10 + && (apr_table_get(r->subprocess_env, "proxy-sendchunks") + || apr_table_get(r->subprocess_env, "proxy-sendchunked")) + && !apr_table_get(r->subprocess_env, "proxy-sendcl")) { +- rb_method = RB_STREAM_CHUNKED; ++ req->rb_method = RB_STREAM_CHUNKED; + } + else { +- rb_method = RB_SPOOL_CL; ++ req->rb_method = RB_SPOOL_CL; + } + } + else { +@@ -865,7 +784,31 @@ int ap_proxy_http_request(apr_pool_t *p, request_rec *r, + * requests, and has the behavior that it will not add any C-L + * when the old_cl_val is NULL. + */ +- rb_method = RB_SPOOL_CL; ++ req->rb_method = RB_SPOOL_CL; ++ } ++ ++ switch (req->rb_method) { ++ case RB_STREAM_CHUNKED: ++ add_te_chunked(req->p, bucket_alloc, header_brigade); ++ break; ++ ++ case RB_STREAM_CL: ++ if (req->old_cl_val) { ++ add_cl(req->p, bucket_alloc, header_brigade, req->old_cl_val); ++ } ++ break; ++ ++ default: /* => RB_SPOOL_CL */ ++ /* If we have to spool the body, do it now, before connecting or ++ * reusing the backend connection. ++ */ ++ rv = spool_reqbody_cl(req, &bytes); ++ if (rv != OK) { ++ return rv; ++ } ++ if (bytes || req->old_te_val || req->old_cl_val) { ++ add_cl(p, bucket_alloc, header_brigade, apr_off_t_toa(p, bytes)); ++ } + } + + /* Yes I hate gotos. This is the subrequest shortcut */ +@@ -886,23 +829,44 @@ skip_body: + e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(header_brigade, e); + } ++ terminate_headers(bucket_alloc, header_brigade); + +- /* send the request body, if any. */ +- switch(rb_method) { +- case RB_STREAM_CHUNKED: +- rv = stream_reqbody_chunked(p, r, p_conn, origin, header_brigade, +- input_brigade); +- break; ++ return OK; ++} ++ ++static int ap_proxy_http_request(proxy_http_req_t *req) ++{ ++ int rv; ++ request_rec *r = req->r; ++ apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc; ++ apr_bucket_brigade *header_brigade = req->header_brigade; ++ apr_bucket_brigade *input_brigade = req->input_brigade; ++ ++ /* send the request header/body, if any. */ ++ switch (req->rb_method) { + case RB_STREAM_CL: +- rv = stream_reqbody_cl(p, r, p_conn, origin, header_brigade, +- input_brigade, old_cl_val); ++ case RB_STREAM_CHUNKED: ++ if (req->do_100_continue) { ++ rv = ap_proxy_pass_brigade(bucket_alloc, r, req->backend, ++ req->origin, header_brigade, 1); ++ } ++ else { ++ rv = stream_reqbody(req, req->rb_method); ++ } + break; ++ + case RB_SPOOL_CL: +- rv = spool_reqbody_cl(p, r, p_conn, origin, header_brigade, +- input_brigade, (old_cl_val != NULL) +- || (old_te_val != NULL) +- || (bytes_read > 0)); ++ /* Prefetch has built the header and spooled the whole body; ++ * if we don't expect 100-continue we can flush both all at once, ++ * otherwise flush the header only. ++ */ ++ if (!req->do_100_continue) { ++ APR_BRIGADE_CONCAT(header_brigade, input_brigade); ++ } ++ rv = ap_proxy_pass_brigade(bucket_alloc, r, req->backend, ++ req->origin, header_brigade, 1); + break; ++ + default: + /* shouldn't be possible */ + rv = HTTP_INTERNAL_SERVER_ERROR; +@@ -910,10 +874,12 @@ skip_body: + } + + if (rv != OK) { ++ conn_rec *c = r->connection; + /* apr_status_t value has been logged in lower level method */ + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01097) + "pass request body failed to %pI (%s) from %s (%s)", +- p_conn->addr, p_conn->hostname ? p_conn->hostname: "", ++ req->backend->addr, ++ req->backend->hostname ? req->backend->hostname: "", + c->client_ip, c->remote_host ? c->remote_host: ""); + return rv; + } +@@ -1189,12 +1155,16 @@ static int add_trailers(void *data, const char *key, const char *val) + } + + static +-apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, +- proxy_conn_rec **backend_ptr, +- proxy_worker *worker, +- proxy_server_conf *conf, +- char *server_portstr) { ++int ap_proxy_http_process_response(proxy_http_req_t *req) ++{ ++ apr_pool_t *p = req->p; ++ request_rec *r = req->r; + conn_rec *c = r->connection; ++ proxy_worker *worker = req->worker; ++ proxy_conn_rec *backend = req->backend; ++ conn_rec *origin = req->origin; ++ int do_100_continue = req->do_100_continue; ++ + char *buffer; + char fixed_buffer[HUGE_STRING_LEN]; + const char *buf; +@@ -1217,19 +1187,11 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, + int proxy_status = OK; + const char *original_status_line = r->status_line; + const char *proxy_status_line = NULL; +- proxy_conn_rec *backend = *backend_ptr; +- conn_rec *origin = backend->connection; + apr_interval_time_t old_timeout = 0; + proxy_dir_conf *dconf; +- int do_100_continue; + + dconf = ap_get_module_config(r->per_dir_config, &proxy_module); + +- do_100_continue = (worker->s->ping_timeout_set +- && ap_request_has_body(r) +- && (PROXYREQ_REVERSE == r->proxyreq) +- && !(apr_table_get(r->subprocess_env, "force-proxy-request-1.0"))); +- + bb = apr_brigade_create(p, c->bucket_alloc); + pass_bb = apr_brigade_create(p, c->bucket_alloc); + +@@ -1248,7 +1210,7 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, + } + + /* Setup for 100-Continue timeout if appropriate */ +- if (do_100_continue) { ++ if (do_100_continue && worker->s->ping_timeout_set) { + apr_socket_timeout_get(backend->sock, &old_timeout); + if (worker->s->ping_timeout != old_timeout) { + apr_status_t rc; +@@ -1273,6 +1235,8 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, + origin->local_addr->port)); + do { + apr_status_t rc; ++ int major = 0, minor = 0; ++ int toclose = 0; + + apr_brigade_cleanup(bb); + +@@ -1360,9 +1324,6 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, + * This is buggy if we ever see an HTTP/1.10 + */ + if (apr_date_checkmask(buffer, "HTTP/#.# ###*")) { +- int major, minor; +- int toclose; +- + major = buffer[5] - '0'; + minor = buffer[7] - '0'; + +@@ -1412,8 +1373,8 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, + "Set-Cookie", NULL); + + /* shove the headers direct into r->headers_out */ +- ap_proxy_read_headers(r, backend->r, buffer, response_field_size, origin, +- &pread_len); ++ ap_proxy_read_headers(r, backend->r, buffer, response_field_size, ++ origin, &pread_len); + + if (r->headers_out == NULL) { + ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01106) +@@ -1491,7 +1452,8 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, + r->headers_out = ap_proxy_clean_warnings(p, r->headers_out); + + /* handle Via header in response */ +- if (conf->viaopt != via_off && conf->viaopt != via_block) { ++ if (req->sconf->viaopt != via_off ++ && req->sconf->viaopt != via_block) { + const char *server_name = ap_get_server_name(r); + /* If USE_CANONICAL_NAME_OFF was configured for the proxy virtual host, + * then the server name returned by ap_get_server_name() is the +@@ -1502,18 +1464,18 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, + server_name = r->server->server_hostname; + /* create a "Via:" response header entry and merge it */ + apr_table_addn(r->headers_out, "Via", +- (conf->viaopt == via_full) ++ (req->sconf->viaopt == via_full) + ? apr_psprintf(p, "%d.%d %s%s (%s)", + HTTP_VERSION_MAJOR(r->proto_num), + HTTP_VERSION_MINOR(r->proto_num), + server_name, +- server_portstr, ++ req->server_portstr, + AP_SERVER_BASEVERSION) + : apr_psprintf(p, "%d.%d %s%s", + HTTP_VERSION_MAJOR(r->proto_num), + HTTP_VERSION_MINOR(r->proto_num), + server_name, +- server_portstr) ++ req->server_portstr) + ); + } + +@@ -1531,18 +1493,6 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, + } + + if (ap_is_HTTP_INFO(proxy_status)) { +- interim_response++; +- /* Reset to old timeout iff we've adjusted it */ +- if (do_100_continue +- && (r->status == HTTP_CONTINUE) +- && (worker->s->ping_timeout != old_timeout)) { +- apr_socket_timeout_set(backend->sock, old_timeout); +- } +- } +- else { +- interim_response = 0; +- } +- if (interim_response) { + /* RFC2616 tells us to forward this. + * + * OTOH, an interim response here may mean the backend +@@ -1563,7 +1513,13 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, + ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, + "HTTP: received interim %d response", r->status); + if (!policy +- || (!strcasecmp(policy, "RFC") && ((r->expecting_100 = 1)))) { ++ || (!strcasecmp(policy, "RFC") ++ && (proxy_status != HTTP_CONTINUE ++ || (req->expecting_100 = 1)))) { ++ if (proxy_status == HTTP_CONTINUE) { ++ r->expecting_100 = req->expecting_100; ++ req->expecting_100 = 0; ++ } + ap_send_interim_response(r, 1); + } + /* FIXME: refine this to be able to specify per-response-status +@@ -1573,7 +1529,106 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, + ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01108) + "undefined proxy interim response policy"); + } ++ interim_response++; + } ++ else { ++ interim_response = 0; ++ } ++ ++ /* If we still do 100-continue (end-to-end or ping), either the ++ * current response is the expected "100 Continue" and we are done ++ * with this mode, or this is another interim response and we'll wait ++ * for the next one, or this is a final response and hence the backend ++ * did not honor our expectation. ++ */ ++ if (do_100_continue && (!interim_response ++ || proxy_status == HTTP_CONTINUE)) { ++ /* RFC 7231 - Section 5.1.1 - Expect - Requirement for servers ++ * A server that responds with a final status code before ++ * reading the entire message body SHOULD indicate in that ++ * response whether it intends to close the connection or ++ * continue reading and discarding the request message. ++ * ++ * So, if this response is not an interim 100 Continue, we can ++ * avoid sending the request body if the backend responded with ++ * "Connection: close" or HTTP < 1.1, and either let the core ++ * discard it or the caller try another balancer member with the ++ * same body (given status 503, though not implemented yet). ++ */ ++ int do_send_body = (proxy_status == HTTP_CONTINUE ++ || (!toclose && major > 0 && minor > 0)); ++ ++ /* Reset to old timeout iff we've adjusted it. */ ++ if (worker->s->ping_timeout_set) { ++ apr_socket_timeout_set(backend->sock, old_timeout); ++ } ++ ++ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(10153) ++ "HTTP: %s100 continue sent by %pI (%s): " ++ "%ssending body (response: HTTP/%i.%i %s)", ++ proxy_status != HTTP_CONTINUE ? "no " : "", ++ backend->addr, ++ backend->hostname ? backend->hostname : "", ++ do_send_body ? "" : "not ", ++ major, minor, proxy_status_line); ++ ++ if (do_send_body) { ++ int status; ++ ++ /* Send the request body (fully). */ ++ switch(req->rb_method) { ++ case RB_STREAM_CL: ++ case RB_STREAM_CHUNKED: ++ status = stream_reqbody(req, req->rb_method); ++ break; ++ case RB_SPOOL_CL: ++ /* Prefetch has spooled the whole body, flush it. */ ++ status = ap_proxy_pass_brigade(req->bucket_alloc, r, ++ backend, origin, ++ req->input_brigade, 1); ++ break; ++ default: ++ /* Shouldn't happen */ ++ status = HTTP_INTERNAL_SERVER_ERROR; ++ break; ++ } ++ if (status != OK) { ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, ++ APLOGNO(10154) "pass request body failed " ++ "to %pI (%s) from %s (%s) with status %i", ++ backend->addr, ++ backend->hostname ? backend->hostname : "", ++ c->client_ip, ++ c->remote_host ? c->remote_host : "", ++ status); ++ backend->close = 1; ++ return status; ++ } ++ } ++ else { ++ /* If we don't read the client connection any further, since ++ * there are pending data it should be "Connection: close"d to ++ * prevent reuse. We don't exactly c->keepalive = AP_CONN_CLOSE ++ * here though, because error_override or a potential retry on ++ * another backend could finally read that data and finalize ++ * the request processing, making keep-alive possible. So what ++ * we do is restoring r->expecting_100 for ap_set_keepalive() ++ * to do the right thing according to the final response and ++ * any later update of r->expecting_100. ++ */ ++ r->expecting_100 = req->expecting_100; ++ req->expecting_100 = 0; ++ } ++ ++ /* Once only! */ ++ do_100_continue = 0; ++ } ++ ++ if (interim_response) { ++ /* Already forwarded above, read next response */ ++ continue; ++ } ++ + /* Moved the fixups of Date headers and those affected by + * ProxyPassReverse/etc from here to ap_proxy_read_headers + */ +@@ -1648,7 +1703,6 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, + + /* send body - but only if a body is expected */ + if ((!r->header_only) && /* not HEAD request */ +- !interim_response && /* not any 1xx response */ + (proxy_status != HTTP_NO_CONTENT) && /* not 204 */ + (proxy_status != HTTP_NOT_MODIFIED)) { /* not 304 */ + +@@ -1697,7 +1751,7 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, + + rv = ap_get_brigade(backend->r->input_filters, bb, + AP_MODE_READBYTES, mode, +- conf->io_buffer_size); ++ req->sconf->io_buffer_size); + + /* ap_get_brigade will return success with an empty brigade + * for a non-blocking read which would block: */ +@@ -1789,7 +1843,7 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, + ap_proxy_release_connection(backend->worker->s->scheme, + backend, r->server); + /* Ensure that the backend is not reused */ +- *backend_ptr = NULL; ++ req->backend = NULL; + + } + +@@ -1798,12 +1852,13 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, + || c->aborted) { + /* Ack! Phbtt! Die! User aborted! */ + /* Only close backend if we haven't got all from the +- * backend. Furthermore if *backend_ptr is NULL it is no ++ * backend. Furthermore if req->backend is NULL it is no + * longer safe to fiddle around with backend as it might + * be already in use by another thread. + */ +- if (*backend_ptr) { +- backend->close = 1; /* this causes socket close below */ ++ if (req->backend) { ++ /* this causes socket close below */ ++ req->backend->close = 1; + } + finish = TRUE; + } +@@ -1816,7 +1871,7 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, + } + ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, "end body send"); + } +- else if (!interim_response) { ++ else { + ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, "header only"); + + /* make sure we release the backend connection as soon +@@ -1826,7 +1881,7 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, + */ + ap_proxy_release_connection(backend->worker->s->scheme, + backend, r->server); +- *backend_ptr = NULL; ++ req->backend = NULL; + + /* Pass EOS bucket down the filter chain. */ + e = apr_bucket_eos_create(c->bucket_alloc); +@@ -1880,14 +1935,17 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker, + apr_port_t proxyport) + { + int status; +- char server_portstr[32]; + char *scheme; + const char *proxy_function; + const char *u; ++ proxy_http_req_t *req = NULL; + proxy_conn_rec *backend = NULL; + int is_ssl = 0; + conn_rec *c = r->connection; ++ proxy_dir_conf *dconf; + int retry = 0; ++ char *locurl = url; ++ int toclose = 0; + /* + * Use a shorter-lived pool to reduce memory usage + * and avoid a memory leak +@@ -1928,14 +1986,47 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker, + } + ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "HTTP: serving URL %s", url); + +- + /* create space for state information */ + if ((status = ap_proxy_acquire_connection(proxy_function, &backend, +- worker, r->server)) != OK) +- goto cleanup; ++ worker, r->server)) != OK) { ++ return status; ++ } + + backend->is_ssl = is_ssl; + ++ req = apr_pcalloc(p, sizeof(*req)); ++ req->p = p; ++ req->r = r; ++ req->sconf = conf; ++ req->worker = worker; ++ req->backend = backend; ++ req->bucket_alloc = c->bucket_alloc; ++ req->rb_method = RB_INIT; ++ ++ dconf = ap_get_module_config(r->per_dir_config, &proxy_module); ++ ++ /* Should we handle end-to-end or ping 100-continue? */ ++ if ((r->expecting_100 && dconf->forward_100_continue) ++ || PROXY_DO_100_CONTINUE(worker, r)) { ++ /* We need to reset r->expecting_100 or prefetching will cause ++ * ap_http_filter() to send "100 Continue" response by itself. So ++ * we'll use req->expecting_100 in mod_proxy_http to determine whether ++ * the client should be forwarded "100 continue", and r->expecting_100 ++ * will be restored at the end of the function with the actual value of ++ * req->expecting_100 (i.e. cleared only if mod_proxy_http sent the ++ * "100 Continue" according to its policy). ++ */ ++ req->do_100_continue = req->prefetch_nonblocking = 1; ++ req->expecting_100 = r->expecting_100; ++ r->expecting_100 = 0; ++ } ++ /* Should we block while prefetching the body or try nonblocking and flush ++ * data to the backend ASAP? ++ */ ++ else if (apr_table_get(r->subprocess_env, "proxy-prefetch-nonblocking")) { ++ req->prefetch_nonblocking = 1; ++ } ++ + /* + * In the case that we are handling a reverse proxy connection and this + * is not a request that is coming over an already kept alive connection +@@ -1949,15 +2040,53 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker, + backend->close = 1; + } + ++ /* Step One: Determine Who To Connect To */ ++ if ((status = ap_proxy_determine_connection(p, r, conf, worker, backend, ++ uri, &locurl, proxyname, ++ proxyport, req->server_portstr, ++ sizeof(req->server_portstr)))) ++ goto cleanup; ++ ++ /* Prefetch (nonlocking) the request body so to increase the chance to get ++ * the whole (or enough) body and determine Content-Length vs chunked or ++ * spooled. By doing this before connecting or reusing the backend, we want ++ * to minimize the delay between this connection is considered alive and ++ * the first bytes sent (should the client's link be slow or some input ++ * filter retain the data). This is a best effort to prevent the backend ++ * from closing (from under us) what it thinks is an idle connection, hence ++ * to reduce to the minimum the unavoidable local is_socket_connected() vs ++ * remote keepalive race condition. ++ */ ++ req->input_brigade = apr_brigade_create(p, req->bucket_alloc); ++ req->header_brigade = apr_brigade_create(p, req->bucket_alloc); ++ if ((status = ap_proxy_http_prefetch(req, uri, locurl)) != OK) ++ goto cleanup; ++ ++ /* We need to reset backend->close now, since ap_proxy_http_prefetch() set ++ * it to disable the reuse of the connection *after* this request (no keep- ++ * alive), not to close any reusable connection before this request. However ++ * assure what is expected later by using a local flag and do the right thing ++ * when ap_proxy_connect_backend() below provides the connection to close. ++ */ ++ toclose = backend->close; ++ backend->close = 0; ++ + while (retry < 2) { +- char *locurl = url; ++ if (retry) { ++ char *newurl = url; + +- /* Step One: Determine Who To Connect To */ +- if ((status = ap_proxy_determine_connection(p, r, conf, worker, backend, +- uri, &locurl, proxyname, +- proxyport, server_portstr, +- sizeof(server_portstr))) != OK) +- break; ++ /* Step One (again): (Re)Determine Who To Connect To */ ++ if ((status = ap_proxy_determine_connection(p, r, conf, worker, ++ backend, uri, &newurl, proxyname, proxyport, ++ req->server_portstr, sizeof(req->server_portstr)))) ++ break; ++ ++ /* The code assumes locurl is not changed during the loop, or ++ * ap_proxy_http_prefetch() would have to be called every time, ++ * and header_brigade be changed accordingly... ++ */ ++ AP_DEBUG_ASSERT(strcmp(newurl, locurl) == 0); ++ } + + /* Step Two: Make the Connection */ + if (ap_proxy_check_connection(proxy_function, backend, r->server, 1, +@@ -1972,54 +2101,64 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker, + } + + /* Step Three: Create conn_rec */ +- if (!backend->connection) { +- if ((status = ap_proxy_connection_create_ex(proxy_function, +- backend, r)) != OK) +- break; +- /* +- * On SSL connections set a note on the connection what CN is +- * requested, such that mod_ssl can check if it is requested to do +- * so. +- */ +- if (backend->ssl_hostname) { +- apr_table_setn(backend->connection->notes, +- "proxy-request-hostname", +- backend->ssl_hostname); +- } ++ if ((status = ap_proxy_connection_create_ex(proxy_function, ++ backend, r)) != OK) ++ break; ++ req->origin = backend->connection; ++ ++ /* Don't recycle the connection if prefetch (above) told not to do so */ ++ if (toclose) { ++ backend->close = 1; ++ req->origin->keepalive = AP_CONN_CLOSE; ++ } ++ ++ /* ++ * On SSL connections set a note on the connection what CN is ++ * requested, such that mod_ssl can check if it is requested to do ++ * so. ++ * ++ * https://github.com/apache/httpd/commit/7d272e2628b4ae05f68cdc74b070707250896a34 ++ */ ++ if (backend->ssl_hostname) { ++ apr_table_setn(backend->connection->notes, ++ "proxy-request-hostname", ++ backend->ssl_hostname); + } + + /* Step Four: Send the Request + * On the off-chance that we forced a 100-Continue as a + * kinda HTTP ping test, allow for retries + */ +- if ((status = ap_proxy_http_request(p, r, backend, worker, +- conf, uri, locurl, server_portstr)) != OK) { +- if ((status == HTTP_SERVICE_UNAVAILABLE) && worker->s->ping_timeout_set) { +- backend->close = 1; ++ status = ap_proxy_http_request(req); ++ if (status != OK) { ++ if (req->do_100_continue && status == HTTP_SERVICE_UNAVAILABLE) { + ap_log_rerror(APLOG_MARK, APLOG_INFO, status, r, APLOGNO(01115) + "HTTP: 100-Continue failed to %pI (%s)", + worker->cp->addr, worker->s->hostname_ex); ++ backend->close = 1; + retry++; + continue; +- } else { +- break; +- } ++ } + ++ break; + } + + /* Step Five: Receive the Response... Fall thru to cleanup */ +- status = ap_proxy_http_process_response(p, r, &backend, worker, +- conf, server_portstr); ++ status = ap_proxy_http_process_response(req); + + break; + } + + /* Step Six: Clean Up */ + cleanup: +- if (backend) { ++ if (req->backend) { + if (status != OK) +- backend->close = 1; +- ap_proxy_http_cleanup(proxy_function, r, backend); ++ req->backend->close = 1; ++ ap_proxy_http_cleanup(proxy_function, r, req->backend); ++ } ++ if (req->expecting_100) { ++ /* Restore r->expecting_100 if we didn't touch it */ ++ r->expecting_100 = req->expecting_100; + } + return status; + } +diff --git a/modules/proxy/mod_proxy_uwsgi.c b/modules/proxy/mod_proxy_uwsgi.c +index c5d4f8e..e1253e4 100644 +--- a/modules/proxy/mod_proxy_uwsgi.c ++++ b/modules/proxy/mod_proxy_uwsgi.c +@@ -509,12 +509,11 @@ static int uwsgi_handler(request_rec *r, proxy_worker * worker, + } + + /* Step Three: Create conn_rec */ +- if (!backend->connection) { +- if ((status = ap_proxy_connection_create(UWSGI_SCHEME, backend, +- r->connection, +- r->server)) != OK) +- goto cleanup; +- } ++ if ((status = ap_proxy_connection_create(UWSGI_SCHEME, backend, ++ r->connection, ++ r->server)) != OK) ++ goto cleanup; ++ + + /* Step Four: Process the Request */ + if (((status = ap_setup_client_block(r, REQUEST_CHUNKED_ERROR)) != OK) +diff --git a/modules/proxy/mod_proxy_wstunnel.c b/modules/proxy/mod_proxy_wstunnel.c +index 9dda010..4aadbab 100644 +--- a/modules/proxy/mod_proxy_wstunnel.c ++++ b/modules/proxy/mod_proxy_wstunnel.c +@@ -284,8 +284,8 @@ static int proxy_wstunnel_handler(request_rec *r, proxy_worker *worker, + char server_portstr[32]; + proxy_conn_rec *backend = NULL; + char *scheme; +- int retry; + apr_pool_t *p = r->pool; ++ char *locurl = url; + apr_uri_t *uri; + int is_ssl = 0; + const char *upgrade_method = *worker->s->upgrade ? worker->s->upgrade : "WebSocket"; +@@ -318,59 +318,51 @@ static int proxy_wstunnel_handler(request_rec *r, proxy_worker *worker, + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02451) "serving URL %s", url); + + /* create space for state information */ +- status = ap_proxy_acquire_connection(scheme, &backend, worker, +- r->server); ++ status = ap_proxy_acquire_connection(scheme, &backend, worker, r->server); + if (status != OK) { +- if (backend) { +- backend->close = 1; +- ap_proxy_release_connection(scheme, backend, r->server); +- } +- return status; ++ goto cleanup; + } + + backend->is_ssl = is_ssl; + backend->close = 0; + +- retry = 0; +- while (retry < 2) { +- char *locurl = url; +- /* Step One: Determine Who To Connect To */ +- status = ap_proxy_determine_connection(p, r, conf, worker, backend, +- uri, &locurl, proxyname, proxyport, +- server_portstr, +- sizeof(server_portstr)); +- +- if (status != OK) +- break; +- +- /* Step Two: Make the Connection */ +- if (ap_proxy_connect_backend(scheme, backend, worker, r->server)) { +- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02452) +- "failed to make connection to backend: %s", +- backend->hostname); +- status = HTTP_SERVICE_UNAVAILABLE; +- break; +- } +- +- /* Step Three: Create conn_rec */ +- if (!backend->connection) { +- status = ap_proxy_connection_create_ex(scheme, backend, r); +- if (status != OK) { +- break; +- } +- } +- +- backend->close = 1; /* must be after ap_proxy_determine_connection */ +- ++ /* Step One: Determine Who To Connect To */ ++ status = ap_proxy_determine_connection(p, r, conf, worker, backend, ++ uri, &locurl, proxyname, proxyport, ++ server_portstr, ++ sizeof(server_portstr)); ++ ++ if (status != OK) { ++ goto cleanup; ++ } ++ ++ /* Step Two: Make the Connection */ ++ if (ap_proxy_connect_backend(scheme, backend, worker, r->server)) { ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02452) ++ "failed to make connection to backend: %s", ++ backend->hostname); ++ status = HTTP_SERVICE_UNAVAILABLE; ++ goto cleanup; ++ } + +- /* Step Three: Process the Request */ +- status = proxy_wstunnel_request(p, r, backend, worker, conf, uri, locurl, +- server_portstr); +- break; ++ /* Step Three: Create conn_rec */ ++ /* keep it because of */ ++ /* https://github.com/apache/httpd/commit/313d5ee40f390da1a6ee2c2752864ad3aad0a1c3 */ ++ status = ap_proxy_connection_create_ex(scheme, backend, r); ++ if (status != OK) { ++ goto cleanup; + } ++ ++ /* Step Four: Process the Request */ ++ status = proxy_wstunnel_request(p, r, backend, worker, conf, uri, locurl, ++ server_portstr); + ++cleanup: + /* Do not close the socket */ +- ap_proxy_release_connection(scheme, backend, r->server); ++ if (backend) { ++ backend->close = 1; ++ ap_proxy_release_connection(scheme, backend, r->server); ++ } + return status; + } + +diff --git a/modules/proxy/proxy_util.c b/modules/proxy/proxy_util.c +index 0bbfa59..0759dac 100644 +--- a/modules/proxy/proxy_util.c ++++ b/modules/proxy/proxy_util.c +@@ -3573,10 +3573,7 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, + * To be compliant, we only use 100-Continue for requests with bodies. + * We also make sure we won't be talking HTTP/1.0 as well. + */ +- do_100_continue = (worker->s->ping_timeout_set +- && ap_request_has_body(r) +- && (PROXYREQ_REVERSE == r->proxyreq) +- && !(apr_table_get(r->subprocess_env, "force-proxy-request-1.0"))); ++ do_100_continue = PROXY_DO_100_CONTINUE(worker, r); + + if (apr_table_get(r->subprocess_env, "force-proxy-request-1.0")) { + /* +@@ -3593,7 +3590,9 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, + buf = apr_pstrcat(p, r->method, " ", url, " HTTP/1.1" CRLF, NULL); + } + if (apr_table_get(r->subprocess_env, "proxy-nokeepalive")) { +- origin->keepalive = AP_CONN_CLOSE; ++ if (origin) { ++ origin->keepalive = AP_CONN_CLOSE; ++ } + p_conn->close = 1; + } + ap_xlate_proto_to_ascii(buf, strlen(buf)); +@@ -3685,14 +3684,6 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, + if (do_100_continue) { + const char *val; + +- if (!r->expecting_100) { +- /* Don't forward any "100 Continue" response if the client is +- * not expecting it. +- */ +- apr_table_setn(r->subprocess_env, "proxy-interim-response", +- "Suppress"); +- } +- + /* Add the Expect header if not already there. */ + if (((val = apr_table_get(r->headers_in, "Expect")) == NULL) + || (strcasecmp(val, "100-Continue") != 0 /* fast path */ +diff --git a/server/protocol.c b/server/protocol.c +index 8d90055..8d1fdd2 100644 +--- a/server/protocol.c ++++ b/server/protocol.c +@@ -2188,21 +2188,23 @@ AP_DECLARE(void) ap_send_interim_response(request_rec *r, int send_headers) + "Status is %d - not sending interim response", r->status); + return; + } +- if ((r->status == HTTP_CONTINUE) && !r->expecting_100) { +- /* +- * Don't send 100-Continue when there was no Expect: 100-continue +- * in the request headers. For origin servers this is a SHOULD NOT +- * for proxies it is a MUST NOT according to RFC 2616 8.2.3 +- */ +- return; +- } ++ if (r->status == HTTP_CONTINUE) { ++ if (!r->expecting_100) { ++ /* ++ * Don't send 100-Continue when there was no Expect: 100-continue ++ * in the request headers. For origin servers this is a SHOULD NOT ++ * for proxies it is a MUST NOT according to RFC 2616 8.2.3 ++ */ ++ return; ++ } + +- /* if we send an interim response, we're no longer in a state of +- * expecting one. Also, this could feasibly be in a subrequest, +- * so we need to propagate the fact that we responded. +- */ +- for (rr = r; rr != NULL; rr = rr->main) { +- rr->expecting_100 = 0; ++ /* if we send an interim response, we're no longer in a state of ++ * expecting one. Also, this could feasibly be in a subrequest, ++ * so we need to propagate the fact that we responded. ++ */ ++ for (rr = r; rr != NULL; rr = rr->main) { ++ rr->expecting_100 = 0; ++ } + } + + status_line = apr_pstrcat(r->pool, AP_SERVER_PROTOCOL, " ", r->status_line, CRLF, NULL); diff --git a/SOURCES/httpd-2.4.37-session-expiry-updt-int.patch b/SOURCES/httpd-2.4.37-session-expiry-updt-int.patch new file mode 100644 index 0000000..8c5b852 --- /dev/null +++ b/SOURCES/httpd-2.4.37-session-expiry-updt-int.patch @@ -0,0 +1,194 @@ +diff --git a/docs/manual/mod/mod_session.html.en b/docs/manual/mod/mod_session.html.en +index 6834f8e..9f8301f 100644 +--- a/docs/manual/mod/mod_session.html.en ++++ b/docs/manual/mod/mod_session.html.en +@@ -82,6 +82,7 @@ +
  • SessionHeader
  • +
  • SessionInclude
  • +
  • SessionMaxAge
  • ++
  • SessionExpiryUpdateInterval
  • + +

    Bugfix checklist

    See also

    +
      +@@ -482,6 +483,37 @@ AuthName realm + +

      Setting the maxage to zero disables session expiry.

      + ++ ++
      top
      ++

      SessionExpiryUpdateInterval Directive

      ++ ++ ++ ++ ++ ++ ++
      Description:Define the number of seconds a session's expiry may change without the session being updated
      Syntax:SessionExpiryUpdateInterval interval
      Default:SessionExpiryUpdateInterval 0 (always update)
      Context:server config, virtual host, directory, .htaccess
      Module:mod_session
      ++

      The SessionExpiryUpdateInterval directive allows ++ sessions to avoid the cost associated with writing the session each request ++ when only the expiry time has changed. This can be used to make a website ++ more efficient or reduce load on a database when using ++ mod_session_dbd. The session is always written if the data ++ stored in the session has changed or the expiry has changed by more than the ++ configured interval.

      ++ ++

      Setting the interval to zero disables this directive, and the session ++ expiry is refreshed for each request.

      ++ ++

      This directive only has an effect when combined with SessionMaxAge to enable session ++ expiry. Sessions without an expiry are only written when the data stored in ++ the session has changed.

      ++ ++

      Warning

      ++

      Because the session expiry may not be refreshed with each request, it's ++ possible for sessions to expire up to interval seconds early. ++ Using a small interval usually provides sufficient savings while having a ++ minimal effect on expiry resolution.

      ++ +
      + +
      +diff --git a/modules/session/mod_session.c b/modules/session/mod_session.c +index d517020..10e6396 100644 +--- a/modules/session/mod_session.c ++++ b/modules/session/mod_session.c +@@ -177,6 +177,7 @@ static apr_status_t ap_session_save(request_rec * r, session_rec * z) + { + if (z) { + apr_time_t now = apr_time_now(); ++ apr_time_t initialExpiry = z->expiry; + int rv = 0; + + session_dir_conf *dconf = ap_get_module_config(r->per_dir_config, +@@ -207,6 +208,17 @@ static apr_status_t ap_session_save(request_rec * r, session_rec * z) + z->expiry = now + z->maxage * APR_USEC_PER_SEC; + } + ++ /* don't save if the only change is the expiry by a small amount */ ++ if (!z->dirty && dconf->expiry_update_time ++ && (z->expiry - initialExpiry < dconf->expiry_update_time)) { ++ return APR_SUCCESS; ++ } ++ ++ /* also don't save sessions that didn't change at all */ ++ if (!z->dirty && !z->maxage) { ++ return APR_SUCCESS; ++ } ++ + /* encode the session */ + rv = ap_run_session_encode(r, z); + if (OK != rv) { +@@ -553,6 +565,10 @@ static void *merge_session_dir_config(apr_pool_t * p, void *basev, void *addv) + new->env_set = add->env_set || base->env_set; + new->includes = apr_array_append(p, base->includes, add->includes); + new->excludes = apr_array_append(p, base->excludes, add->excludes); ++ new->expiry_update_time = (add->expiry_update_set == 0) ++ ? base->expiry_update_time ++ : add->expiry_update_time; ++ new->expiry_update_set = add->expiry_update_set || base->expiry_update_set; + + return new; + } +@@ -622,6 +638,21 @@ static const char *add_session_exclude(cmd_parms * cmd, void *dconf, const char + return NULL; + } + ++static const char * ++ set_session_expiry_update(cmd_parms * parms, void *dconf, const char *arg) ++{ ++ session_dir_conf *conf = dconf; ++ ++ conf->expiry_update_time = atoi(arg); ++ if (conf->expiry_update_time < 0) { ++ return "SessionExpiryUpdateInterval must be positive or nul"; ++ } ++ conf->expiry_update_time = apr_time_from_sec(conf->expiry_update_time); ++ conf->expiry_update_set = 1; ++ ++ return NULL; ++} ++ + + static const command_rec session_cmds[] = + { +@@ -637,6 +668,9 @@ static const command_rec session_cmds[] = + "URL prefixes to include in the session. Defaults to all URLs"), + AP_INIT_TAKE1("SessionExclude", add_session_exclude, NULL, RSRC_CONF|OR_AUTHCFG, + "URL prefixes to exclude from the session. Defaults to no URLs"), ++ AP_INIT_TAKE1("SessionExpiryUpdateInterval", set_session_expiry_update, NULL, RSRC_CONF|OR_AUTHCFG, ++ "time interval for which a session's expiry time may change " ++ "without having to be rewritten. Zero to disable"), + {NULL} + }; + +diff --git a/modules/session/mod_session.h b/modules/session/mod_session.h +index a6dd5e9..bdeb532 100644 +--- a/modules/session/mod_session.h ++++ b/modules/session/mod_session.h +@@ -115,6 +115,9 @@ typedef struct { + * URLs included if empty */ + apr_array_header_t *excludes; /* URL prefixes to be excluded. No + * URLs excluded if empty */ ++ apr_time_t expiry_update_time; /* seconds the session expiry may change and ++ * not have to be rewritten */ ++ int expiry_update_set; + } session_dir_conf; + + /** +diff --git a/modules/session/mod_session_cookie.c b/modules/session/mod_session_cookie.c +index 6a02322..4aa75e4 100644 +--- a/modules/session/mod_session_cookie.c ++++ b/modules/session/mod_session_cookie.c +@@ -60,9 +60,6 @@ static apr_status_t session_cookie_save(request_rec * r, session_rec * z) + session_cookie_dir_conf *conf = ap_get_module_config(r->per_dir_config, + &session_cookie_module); + +- /* don't cache auth protected pages */ +- apr_table_addn(r->headers_out, "Cache-Control", "no-cache"); +- + /* create RFC2109 compliant cookie */ + if (conf->name_set) { + if (z->encoded && z->encoded[0]) { +@@ -162,6 +159,9 @@ static apr_status_t session_cookie_load(request_rec * r, session_rec ** z) + /* put the session in the notes so we don't have to parse it again */ + apr_table_setn(m->notes, note, (char *)zz); + ++ /* don't cache auth protected pages */ ++ apr_table_addn(r->headers_out, "Cache-Control", "no-cache, private"); ++ + return OK; + + } +diff --git a/modules/session/mod_session_dbd.c b/modules/session/mod_session_dbd.c +index 0be7306..f683da2 100644 +--- a/modules/session/mod_session_dbd.c ++++ b/modules/session/mod_session_dbd.c +@@ -245,6 +245,9 @@ static apr_status_t session_dbd_load(request_rec * r, session_rec ** z) + /* put the session in the notes so we don't have to parse it again */ + apr_table_setn(m->notes, note, (char *)zz); + ++ /* don't cache pages with a session */ ++ apr_table_addn(r->headers_out, "Cache-Control", "no-cache, private"); ++ + return OK; + + } +@@ -409,9 +412,6 @@ static apr_status_t session_dbd_save(request_rec * r, session_rec * z) + if (conf->name_set || conf->name2_set) { + char *oldkey = NULL, *newkey = NULL; + +- /* don't cache pages with a session */ +- apr_table_addn(r->headers_out, "Cache-Control", "no-cache"); +- + /* if the session is new or changed, make a new session ID */ + if (z->uuid) { + oldkey = apr_pcalloc(r->pool, APR_UUID_FORMATTED_LENGTH + 1); +@@ -458,7 +458,7 @@ static apr_status_t session_dbd_save(request_rec * r, session_rec * z) + else if (conf->peruser) { + + /* don't cache pages with a session */ +- apr_table_addn(r->headers_out, "Cache-Control", "no-cache"); ++ apr_table_addn(r->headers_out, "Cache-Control", "no-cache, private"); + + if (r->user) { + ret = dbd_save(r, r->user, r->user, z->encoded, z->expiry); diff --git a/SOURCES/httpd.conf.xml b/SOURCES/httpd.conf.xml new file mode 100644 index 0000000..705e527 --- /dev/null +++ b/SOURCES/httpd.conf.xml @@ -0,0 +1,259 @@ + + + + + + + httpd.conf + httpd + AuthorOrtonJoejorton@redhat.com + + + + httpd.conf + 5 + + + + httpd.conf + Configuration files for httpd + + + + + /etc/httpd/conf/httpd.conf, + /etc/httpd/conf.modules.d, + /etc/httpd/conf.d + + + + + Description + + The main configuration file for the httpd daemon is + /etc/httpd/conf/httpd.conf. The syntax of + this file is described at , and + the full set of available directives is listed at . + + + + Configuration structure + + The main configuration file + (httpd.conf) sets up various defaults and + includes configuration files from two directories - + /etc/httpd/conf.modules.d and + /etc/httpd/conf.d. Packages containing + loadable modules (like ) place files + in the conf.modules.d directory with the + appropriate directive so that module + is loaded by default. + + Some notable configured defaults are:. + + + + + The default document root from which content + is served. + + + + The daemon lists on TCP port 80. + + + + Error messages are logged to + @LOGDIR@/error_log. + + + + CGI scripts are served via the URL-path . + + + + + To remove any of the default configuration provided in + separate files covered below, replace that file with an empty + file rather than removing it from the filesystem, otherwise it + may be restored to the original when the package which provides + it is upgraded. + + + + + MPM configuration + + The configuration file at + /etc/httpd/conf.modules.d/00-mpm.conf is + used to select the multi-processing module (MPM), which governs + how httpd divides work between processes + and/or threads at run-time. Exactly one + directive must be uncommented in + this file; by default the MPM is enabled. + For more information on MPMs, see . + + If using the prefork MPM, the + "httpd_graceful_shutdown" SELinux boolean should also be + enabled, since with this MPM, httpd needs to establish TCP + connections to local ports to successfully complete a graceful + restart or shutdown. This boolean can be enabled by running the + command: semanage boolean -m --on + httpd_graceful_shutdown + + + + Module configuration files + + Module configuration files are provided in the + /etc/httpd/conf.modules.d/ directory. Filenames + in this directory are by convention prefixed with two digit numeric + prefix to ensure they are processed in the desired order. Core + modules provide with the httpd package are + loaded by files with a prefix to ensure + these are loaded first. Only filenames with a + suffix in this directory will be + processed. + + Other provided configuration files are listed below. + + + + /etc/httpd/conf.modules.d/00-base.conf + The set of core modules included with + httpd which are all loaded by + default. + + + + /etc/httpd/conf.modules.d/00-optional.conf + The set of non-core modules included with + httpd which are not + loaded by default. + + + + + /etc/httpd/conf.modules.d/00-systemd.conf + This file loads + which is necessary for the correct operation of the + httpd.service service, and should not be + removed or disabled. + + + + + + + Other configuration files + + Default module configuration files and site-specific + configuration files are loaded from the + /etc/httpd/conf.d/ directory. Only files + with a suffix will be loaded. The + following files are provided: + + + + /etc/httpd/conf.d/userdir.conf + This file gives an example configuration for + to map URLs such as + to + /home/jim/public_html/. Userdir mapping + is disabled by default. + + + + /etc/httpd/conf.d/autoindex.conf + This file provides the default configuration + for which generates HTML + directory listings when enabled. It also makes file icon + image files available at the + URL-path. + + + + /etc/httpd/conf.d/welcome.conf + This file enables a "welcome page" at + if no content is present + in the default documentation root + /var/www/html. + + + + /etc/httpd/conf.d/ssl.conf (present only if is installed) + This file configures a TLS + listening on port + . If the default configuration is used, + the referenced test certificate and private key are + generated the first time httpd.service is + started; see + httpd-init.service8 + for more information. + + + + + + + Instantiated services + + As an alternative to (or in addition to) the + httpd.service unit, the instantiated template + service httpd@.service unit file can be used, + which starts httpd using a different + configuration file to the default. For example, + systemctl start httpd@foobar.service will + start httpd using the configuration file + /etc/httpd/conf/foobar.conf. See httpd@.service8 for more information. + + + + + Files + + + /etc/httpd/conf/httpd.conf, + /etc/httpd/conf.d, + /etc/httpd/conf.modules.d + + + + + See also + + + httpd8, + httpd.service8, + , + + + + + + + diff --git a/SOURCES/httpd.service.xml b/SOURCES/httpd.service.xml new file mode 100644 index 0000000..b2c72dd --- /dev/null +++ b/SOURCES/httpd.service.xml @@ -0,0 +1,332 @@ + + + + + + + httpd systemd units + httpd + AuthorOrtonJoejorton@redhat.com + + + + httpd.service + 8 + + + + httpd.service + httpd@.service + httpd.socket + httpd-init.service + httpd unit files for systemd + + + + + /usr/lib/systemd/system/httpd.service, + /usr/lib/systemd/system/httpd@.service, + /usr/lib/systemd/system/httpd-init.service, + /usr/lib/systemd/system/httpd.socket + + + + + Description + + This manual page describes the systemd + unit files used to integrate the httpd daemon + with systemd. Two main unit files are + available: httpd.service allows the + httpd daemon to be run as a system service, and + httpd.socket allows httpd to be started via + socket-based activation. Most systems will use + httpd.service. + + The apachectl command has been modified + to invoke systemctl for most uses, so for + example, running apachectl start is equivalent + to running systemctl start httpd.service. This + ensures that the running httpd daemon is tracked and managed by + systemd. In contrast, running + httpd directly from a root shell will start the + service outside of systemd; in this case, + default security restrictions described below (including, but not + limited to, SELinux) will not be enforced. + + + Changing default behaviour + + To change the default behaviour of the httpd service, an + over-ride file should be created, rather + than changing + /usr/lib/systemd/system/httpd.service + directly, since such changes would be lost over package + upgrades. Running systemctl edit + httpd.service or systemctl edit + httpd.socket as root will create a drop-in file (in + the former case, in + /etc/systemd/system/httpd.service.d) which + over-rides the system defaults. + + For example, to set the + environment variable for the daemon, run systemctl edit + httpd.service and enter: + + [Service] +Environment=LD_LIBRARY_PATH=/opt/vendor/lib + + + + Starting the service at boot time + + The httpd.service and httpd.socket units are + disabled by default. To start the httpd + service at boot time, run: systemctl enable + httpd.service. In the default configuration, the + httpd daemon will accept connections on port 80 (and, if mod_ssl + is installed, TLS connections on port 443) for any configured + IPv4 or IPv6 address. + + If httpd is configured to depend on any specific IP + address (for example, with a "Listen" directive) which may only + become available during start-up, or if httpd depends on other + services (such as a database daemon), the service + must be configured to ensure correct + start-up ordering. + + For example, to ensure httpd is only running after all + configured network interfaces are configured, create a drop-in + file (as described above) with the following section: + + [Unit] +After=network-online.target +Wants=network-online.target + + See + for more information on start-up ordering with systemd. + + + + + SSL/TLS certificate generation + + The httpd-init.service unit is provided + with the mod_ssl package. This oneshot unit automatically + creates a TLS server certificate and key (using a generated + self-signed CA certificate and key) for testing purposes before + httpd is started. To inhibit certificate generation, use + systemctl mask httpd-init.service after + installing mod_ssl, and adjust the mod_ssl configuration to use + an appropriate certificate and key. + + + + + Reloading and stopping the service + + When running systemctl reload + httpd.service, a graceful + restart is used, which sends a signal to the httpd parent + process to reload the configuration and re-open log files. Any + children with open connections at the time of reload will + terminate only once they have completed serving requests. This + prevents users of the server seeing errors (or potentially + losing data) due to the reload, but means some there is some + delay before any configuration changes take effect for all + users. + + Similarly, a graceful stop is used + when systemctl stop httpd.service is run, + which terminates the server only once active connections have + been processed. + + To "ungracefully" stop the server without waiting for + requests to complete, use systemctl kill + --kill-who=main httpd; similarly to "ungracefully" + reload the configuration, use systemctl kill + --kill-who=main --signal=HUP httpd. + + + + Automated service restarts + + System packages (including the httpd package itself) may + restart the httpd service automatically after packages are + upgraded, installed, or removed. This is done using the + systemctl reload httpd.service, which + produces a graceful restart by default as + described above. + + To suppress automatic reloads entirely, create the file + /etc/sysconfig/httpd-disable-posttrans. + + + + Changing the default MPM (Multi-Processing Module) + + httpd offers a choice of multi-processing modules (MPMs), + which can be configured in + /etc/httpd/conf.modules.d/00-mpm.conf. + See + httpd.conf5 + for more information on changing the MPM. + + + + systemd integration and mod_systemd + + The httpd service uses the systemd + service type. The mod_systemd module must be + loaded (as in the default configuration) for this to work + correctly - the service will fail if this module is not + loaded. mod_systemd also makes worker and + request statistics available when running systemctl status + httpd. See + systemd.exec5 + for more information on systemd service types. + + + + Security and SELinux + + The default SELinux policy restricts the httpd service in + various ways. For example, the default policy limits the ports + to which httpd can bind (using the Listen + directive), which parts of the filesystem can be accessed, and + whether outgoing TCP connections are possible. Many of these + restrictions can be relaxed or adjusted by using + semanage to change booleans or other + types. See + httpd_selinux8 + for more information. + + The httpd service enables PrivateTmp + by default. The /tmp and + /var/tmp directories available within the + httpd process (and CGI scripts, etc) are not shared by other + processes. See + systemd.exec5 + for more information. + + + + + Socket activation + + Socket activation (see + systemd.socket5 + for more information) can be used with httpd + by enabling the httpd.socket unit. The + httpd listener configuration must exactly + match the ListenStream options configured for + the httpd.socket unit. The default + httpd.socket has a + ListenStream=80 and, if mod_ssl is installed, + ListenStream=443 by a drop-in file. If + additional Listen directives are added to the + httpd configuration, corresponding + ListenStream options should be added via + drop-in files, for example via systemctl edit + httpd.socket. + + If using socket activation with httpd, only one listener + on any given TCP port is supported; a configuration with both + "Listen 127.0.0.1:80" and "Listen + 192.168.1.2:80" will not work. + + + + Instantiated services + + The httpd@.service unit is an + instantiated template service. An instance of this unit will be + started using the configuration file + /etc/httpd/conf/INSTANCE.conf, where + INSTANCE is replaced with the instance + name. For example, systemctl start + httpd@foobar.service will start httpd using the + configuration file + /etc/httpd/conf/foobar.conf. The + environment variable is set to + the instance name by the unit and is available for use within + the configuration file. + + To allow multiple instances of httpd to run + simultaneously, a number of configuration directives must be + changed, such as PidFile and + DefaultRuntimeDir to pick non-conflicting + paths, and Listen to choose different ports. + The example configuration file + /usr/share/doc/httpd/instance.conf + demonstrates how to make such changes using + variable. + + It can be useful to configure instances of + httpd@.service to reload when + httpd.service is reloaded; for example, + logrotate will reload only + httpd.service when logs are rotated. If this + behaviour is required, create a drop-in file for the instance as + follows: + + [Unit] +ReloadPropagatedFrom=httpd.service + + As with normal units, drop-in files for instances can be created + using systemctl edit, e.g. systemctl edit + httpd@foobar.service. + + + + + + Files + + /usr/lib/systemd/system/httpd.service, + /usr/lib/systemd/system/httpd.socket, + /usr/lib/systemd/system/httpd@.service, + /etc/systemd/systemd/httpd.service.d + + + + See also + + + httpd8, + httpd.conf5, + systemd1, + systemctl1, + systemd.service5, + systemd.exec5, + systemd.socket5, + httpd_selinux8, + semanage8 + + + + + + diff --git a/SOURCES/welcome.conf b/SOURCES/welcome.conf index 7fdc0d5..5d1e452 100644 --- a/SOURCES/welcome.conf +++ b/SOURCES/welcome.conf @@ -6,25 +6,13 @@ # NOTE: if this file is removed, it will be restored on upgrades. # - Options -Indexes - ErrorDocument 403 /noindex/index.html + Options -Indexes + ErrorDocument 403 /.noindex.html -Alias /noindex /usr/share/httpd/noindex - - Options MultiViews - DirectoryIndex index.html - - AddLanguage en-US .en-US - AddLanguage es-ES .es-ES - AddLanguage zh-CN .zh-CN - AddLanguage zh-HK .zh-HK - AddLanguage zh-TW .zh-TW - - LanguagePriority en - ForceLanguagePriority Fallback - - AllowOverride None - Require all granted + AllowOverride None + Require all granted + +Alias /.noindex.html /usr/share/httpd/noindex/index.html diff --git a/SPECS/httpd.spec b/SPECS/httpd.spec index 10eb53f..d07a4b1 100644 --- a/SPECS/httpd.spec +++ b/SPECS/httpd.spec @@ -13,10 +13,10 @@ Summary: Apache HTTP Server Name: httpd Version: 2.4.37 -Release: 21%{?dist} +Release: 30%{?dist} URL: https://httpd.apache.org/ Source0: https://www.apache.org/dist/httpd/httpd-%{version}.tar.bz2 -Source1: centos-noindex-8.0.tar.gz +Source1: index.html Source2: httpd.logrotate Source3: instance.conf Source4: httpd-ssl-pass-dialog @@ -90,9 +90,12 @@ Patch32: httpd-2.4.37-sslprotdefault.patch Patch33: httpd-2.4.37-mod-md-mod-ssl-hooks.patch # https://bugzilla.redhat.com/show_bug.cgi?id=1725031 Patch34: httpd-2.4.37-r1861793+.patch -# https://bugzilla.redhat.com/show_bug.cgi?id=1704317ě +# https://bugzilla.redhat.com/show_bug.cgi?id=1704317 Patch35: httpd-2.4.37-sslkeylogfile-support.patch - +# https://bugzilla.redhat.com/show_bug.cgi?id=1794728 +Patch36: httpd-2.4.37-session-expiry-updt-int.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1209162 +Patch37: httpd-2.4.37-logjournal.patch # Bug fixes # https://bugzilla.redhat.com/show_bug.cgi?id=1397243 Patch61: httpd-2.4.35-r1738878.patch @@ -122,6 +125,10 @@ Patch73: httpd-2.4.35-ocsp-wrong-ctx.patch Patch74: httpd-2.4.37-r1828172+.patch # https://bugzilla.redhat.com/show_bug.cgi?id=1775158 Patch75: httpd-2.4.37-r1870095+.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1771847 +Patch76: httpd-2.4.37-proxy-continue.patch +Patch77: httpd-2.4.37-balancer-failover.patch + # Security fixes Patch200: httpd-2.4.37-r1851471.patch @@ -137,6 +144,16 @@ Patch204: httpd-2.4.37-CVE-2019-0220.patch # https://bugzilla.redhat.com/show_bug.cgi?id=1741864 # https://bugzilla.redhat.com/show_bug.cgi?id=1741868 Patch205: httpd-2.4.34-CVE-2019-9511-and-9516-and-9517.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1823259 +# https://bugzilla.redhat.com/show_bug.cgi?id=1747284 +# fixes both CVE-2020-1927 and CVE-2019-10098 +Patch206: httpd-2.4.37-CVE-2019-10098.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1747281 +Patch207: httpd-2.4.37-CVE-2019-10092.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1747291 +Patch208: httpd-2.4.37-CVE-2019-10097.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1820772 +Patch209: httpd-2.4.37-CVE-2020-1934.patch License: ASL 2.0 Group: System Environment/Daemons @@ -280,6 +297,8 @@ interface for storing and accessing per-user session data. %patch33 -p1 -b .mod-md-mod-ssl-hooks %patch34 -p1 -b .r1861793+ %patch35 -p1 -b .sslkeylogfile-support +%patch36 -p1 -b .session-expiry +%patch37 -p1 -b .logjournal %patch61 -p1 -b .r1738878 %patch62 -p1 -b .r1633085 @@ -294,6 +313,9 @@ interface for storing and accessing per-user session data. %patch73 -p1 -b .ocspwrongctx %patch74 -p1 -b .r1828172+ %patch75 -p1 -b .r1870095+ +%patch76 -p1 -b .proxy-continue +%patch77 -p1 -b .balancer-failover + %patch200 -p1 -b .r1851471 %patch201 -p1 -b .CVE-2019-0211 @@ -301,6 +323,10 @@ interface for storing and accessing per-user session data. %patch203 -p1 -b .CVE-2019-0217 %patch204 -p1 -b .CVE-2019-0220 %patch205 -p1 -b .CVE-2019-9511-and-9516-and-9517 +%patch206 -p1 -b .CVE-2019-10098 +%patch207 -p1 -b .CVE-2019-10092 +%patch208 -p1 -b .CVE-2019-10097 +%patch209 -p1 -b .CVE-2020-1934 # Patch in the vendor string sed -i '/^#define PLATFORM/s/Unix/%{vstring}/' os/unix/os.h @@ -504,7 +530,8 @@ EOF # Handle contentdir mkdir $RPM_BUILD_ROOT%{contentdir}/noindex -tar xzf %{SOURCE1} -C $RPM_BUILD_ROOT%{contentdir}/noindex/ --strip-components=1 +install -m 644 -p $RPM_SOURCE_DIR/index.html \ + $RPM_BUILD_ROOT%{contentdir}/noindex/index.html rm -rf %{contentdir}/htdocs # remove manual sources @@ -612,7 +639,7 @@ exit 0 %systemd_preun httpd.service htcacheclean.service httpd.socket %postun -%systemd_postun +%systemd_postun httpd.service htcacheclean.service httpd.socket # Trigger for conversion from SysV, per guidelines at: # https://fedoraproject.org/wiki/Packaging:ScriptletSnippets#Systemd @@ -718,7 +745,7 @@ rm -rf $RPM_BUILD_ROOT %{contentdir}/error/README %{contentdir}/error/*.var %{contentdir}/error/include/*.html -%{contentdir}/noindex/* +%{contentdir}/noindex/index.html %attr(0710,root,apache) %dir /run/httpd %attr(0700,apache,apache) %dir /run/httpd/htcacheclean @@ -802,8 +829,29 @@ rm -rf $RPM_BUILD_ROOT %{_rpmconfigdir}/macros.d/macros.httpd %changelog -* Tue Apr 28 2020 CentOS Sources - 2.4.37-21.el8.centos -- Apply debranding changes +* Mon Jun 15 2020 Joe Orton - 2.4.37-30 +- Resolves: #1209162 - support logging to journald from CustomLog + +* Mon Jun 08 2020 Lubos Uhliarik - 2.4.37-29 +- Resolves: #1823263 (CVE-2020-1934) - CVE-2020-1934 httpd: mod_proxy_ftp use of + uninitialized value + +* Fri May 29 2020 Lubos Uhliarik - 2.4.37-28 +- Related: #1771847 - BalancerMember ping parameter for mod_proxy_http + doesn't work + +* Tue Apr 14 2020 Lubos Uhliarik - 2.4.37-27 +- Resolves: #1823259 - CVE-2020-1927 httpd:2.4/httpd: mod_rewrite configurations + vulnerable to open redirect +- Resolves: #1747284 - CVE-2019-10098 httpd:2.4/httpd: mod_rewrite potential + open redirect +- Resolves: #1747281 - CVE-2019-10092 httpd:2.4/httpd: limited cross-site + scripting in mod_proxy error page +- Resolves: #1747291 - CVE-2019-10097 httpd:2.4/httpd: null-pointer dereference + in mod_remoteip +- Resolves: #1771847 - BalancerMember ping parameter for mod_proxy_http + doesn't work +- Resolves: #1794728 - Backport of SessionExpiryUpdateInterval directive * Mon Dec 02 2019 Lubos Uhliarik - 2.4.37-21 - Resolves: #1775158 - POST request with TLS 1.3 PHA client auth fails: