diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..6e3f15e --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +SOURCES/httpd-2.4.37.tar.bz2 diff --git a/.httpd.metadata b/.httpd.metadata new file mode 100644 index 0000000..e540f7e --- /dev/null +++ b/.httpd.metadata @@ -0,0 +1 @@ +4a38471de821288b0300148016f2b03dfee8adf2 SOURCES/httpd-2.4.37.tar.bz2 diff --git a/SOURCES/00-base.conf b/SOURCES/00-base.conf new file mode 100644 index 0000000..28dacb3 --- /dev/null +++ b/SOURCES/00-base.conf @@ -0,0 +1,68 @@ +# +# This file loads most of the modules included with the Apache HTTP +# Server itself. +# + +LoadModule access_compat_module modules/mod_access_compat.so +LoadModule actions_module modules/mod_actions.so +LoadModule alias_module modules/mod_alias.so +LoadModule allowmethods_module modules/mod_allowmethods.so +LoadModule auth_basic_module modules/mod_auth_basic.so +LoadModule auth_digest_module modules/mod_auth_digest.so +LoadModule authn_anon_module modules/mod_authn_anon.so +LoadModule authn_core_module modules/mod_authn_core.so +LoadModule authn_dbd_module modules/mod_authn_dbd.so +LoadModule authn_dbm_module modules/mod_authn_dbm.so +LoadModule authn_file_module modules/mod_authn_file.so +LoadModule authn_socache_module modules/mod_authn_socache.so +LoadModule authz_core_module modules/mod_authz_core.so +LoadModule authz_dbd_module modules/mod_authz_dbd.so +LoadModule authz_dbm_module modules/mod_authz_dbm.so +LoadModule authz_groupfile_module modules/mod_authz_groupfile.so +LoadModule authz_host_module modules/mod_authz_host.so +LoadModule authz_owner_module modules/mod_authz_owner.so +LoadModule authz_user_module modules/mod_authz_user.so +LoadModule autoindex_module modules/mod_autoindex.so +LoadModule brotli_module modules/mod_brotli.so +LoadModule cache_module modules/mod_cache.so +LoadModule cache_disk_module modules/mod_cache_disk.so +LoadModule cache_socache_module modules/mod_cache_socache.so +LoadModule data_module modules/mod_data.so +LoadModule dbd_module modules/mod_dbd.so +LoadModule deflate_module modules/mod_deflate.so +LoadModule dir_module modules/mod_dir.so +LoadModule dumpio_module modules/mod_dumpio.so +LoadModule echo_module modules/mod_echo.so +LoadModule env_module modules/mod_env.so +LoadModule expires_module modules/mod_expires.so +LoadModule ext_filter_module modules/mod_ext_filter.so +LoadModule filter_module modules/mod_filter.so +LoadModule headers_module modules/mod_headers.so +LoadModule include_module modules/mod_include.so +LoadModule info_module modules/mod_info.so +LoadModule log_config_module modules/mod_log_config.so +LoadModule logio_module modules/mod_logio.so +LoadModule macro_module modules/mod_macro.so +LoadModule mime_magic_module modules/mod_mime_magic.so +LoadModule mime_module modules/mod_mime.so +LoadModule negotiation_module modules/mod_negotiation.so +LoadModule remoteip_module modules/mod_remoteip.so +LoadModule reqtimeout_module modules/mod_reqtimeout.so +LoadModule request_module modules/mod_request.so +LoadModule rewrite_module modules/mod_rewrite.so +LoadModule setenvif_module modules/mod_setenvif.so +LoadModule slotmem_plain_module modules/mod_slotmem_plain.so +LoadModule slotmem_shm_module modules/mod_slotmem_shm.so +LoadModule socache_dbm_module modules/mod_socache_dbm.so +LoadModule socache_memcache_module modules/mod_socache_memcache.so +LoadModule socache_shmcb_module modules/mod_socache_shmcb.so +LoadModule status_module modules/mod_status.so +LoadModule substitute_module modules/mod_substitute.so +LoadModule suexec_module modules/mod_suexec.so +LoadModule unique_id_module modules/mod_unique_id.so +LoadModule unixd_module modules/mod_unixd.so +LoadModule userdir_module modules/mod_userdir.so +LoadModule version_module modules/mod_version.so +LoadModule vhost_alias_module modules/mod_vhost_alias.so +LoadModule watchdog_module modules/mod_watchdog.so + diff --git a/SOURCES/00-dav.conf b/SOURCES/00-dav.conf new file mode 100644 index 0000000..e6af8de --- /dev/null +++ b/SOURCES/00-dav.conf @@ -0,0 +1,3 @@ +LoadModule dav_module modules/mod_dav.so +LoadModule dav_fs_module modules/mod_dav_fs.so +LoadModule dav_lock_module modules/mod_dav_lock.so diff --git a/SOURCES/00-lua.conf b/SOURCES/00-lua.conf new file mode 100644 index 0000000..9e0d0db --- /dev/null +++ b/SOURCES/00-lua.conf @@ -0,0 +1 @@ +LoadModule lua_module modules/mod_lua.so diff --git a/SOURCES/00-mpm.conf b/SOURCES/00-mpm.conf new file mode 100644 index 0000000..a4a70b8 --- /dev/null +++ b/SOURCES/00-mpm.conf @@ -0,0 +1,23 @@ +# Select the MPM module which should be used by uncommenting exactly +# one of the following LoadModule lines. See the httpd.conf(5) man +# page for more information on changing the MPM. + +# prefork MPM: Implements a non-threaded, pre-forking web server +# See: http://httpd.apache.org/docs/2.4/mod/prefork.html +# +# NOTE: If enabling prefork, the httpd_graceful_shutdown SELinux +# boolean should be enabled, to allow graceful stop/shutdown. +# +#LoadModule mpm_prefork_module modules/mod_mpm_prefork.so + +# worker MPM: Multi-Processing Module implementing a hybrid +# multi-threaded multi-process web server +# See: http://httpd.apache.org/docs/2.4/mod/worker.html +# +#LoadModule mpm_worker_module modules/mod_mpm_worker.so + +# event MPM: A variant of the worker MPM with the goal of consuming +# threads only for connections with active processing +# See: http://httpd.apache.org/docs/2.4/mod/event.html +# +#LoadModule mpm_event_module modules/mod_mpm_event.so diff --git a/SOURCES/00-optional.conf b/SOURCES/00-optional.conf new file mode 100644 index 0000000..ef584ec --- /dev/null +++ b/SOURCES/00-optional.conf @@ -0,0 +1,18 @@ +# +# This file lists modules included with the Apache HTTP Server +# which are not enabled by default. +# + +#LoadModule asis_module modules/mod_asis.so +#LoadModule buffer_module modules/mod_buffer.so +#LoadModule heartbeat_module modules/mod_heartbeat.so +#LoadModule heartmonitor_module modules/mod_heartmonitor.so +#LoadModule usertrack_module modules/mod_usertrack.so +#LoadModule dialup_module modules/mod_dialup.so +#LoadModule charset_lite_module modules/mod_charset_lite.so +#LoadModule log_debug_module modules/mod_log_debug.so +#LoadModule log_forensic_module modules/mod_log_forensic.so +#LoadModule ratelimit_module modules/mod_ratelimit.so +#LoadModule reflector_module modules/mod_reflector.so +#LoadModule sed_module modules/mod_sed.so +#LoadModule speling_module modules/mod_speling.so diff --git a/SOURCES/00-proxy.conf b/SOURCES/00-proxy.conf new file mode 100644 index 0000000..f0f84c2 --- /dev/null +++ b/SOURCES/00-proxy.conf @@ -0,0 +1,18 @@ +# This file configures all the proxy modules: +LoadModule proxy_module modules/mod_proxy.so +LoadModule lbmethod_bybusyness_module modules/mod_lbmethod_bybusyness.so +LoadModule lbmethod_byrequests_module modules/mod_lbmethod_byrequests.so +LoadModule lbmethod_bytraffic_module modules/mod_lbmethod_bytraffic.so +LoadModule lbmethod_heartbeat_module modules/mod_lbmethod_heartbeat.so +LoadModule proxy_ajp_module modules/mod_proxy_ajp.so +LoadModule proxy_balancer_module modules/mod_proxy_balancer.so +LoadModule proxy_connect_module modules/mod_proxy_connect.so +LoadModule proxy_express_module modules/mod_proxy_express.so +LoadModule proxy_fcgi_module modules/mod_proxy_fcgi.so +LoadModule proxy_fdpass_module modules/mod_proxy_fdpass.so +LoadModule proxy_ftp_module modules/mod_proxy_ftp.so +LoadModule proxy_http_module modules/mod_proxy_http.so +LoadModule proxy_hcheck_module modules/mod_proxy_hcheck.so +LoadModule proxy_scgi_module modules/mod_proxy_scgi.so +LoadModule proxy_uwsgi_module modules/mod_proxy_uwsgi.so +LoadModule proxy_wstunnel_module modules/mod_proxy_wstunnel.so diff --git a/SOURCES/00-proxyhtml.conf b/SOURCES/00-proxyhtml.conf new file mode 100644 index 0000000..9a9b107 --- /dev/null +++ b/SOURCES/00-proxyhtml.conf @@ -0,0 +1,3 @@ +# This file configures mod_proxy_html and mod_xml2enc: +LoadModule xml2enc_module modules/mod_xml2enc.so +LoadModule proxy_html_module modules/mod_proxy_html.so diff --git a/SOURCES/00-ssl.conf b/SOURCES/00-ssl.conf new file mode 100644 index 0000000..53235cd --- /dev/null +++ b/SOURCES/00-ssl.conf @@ -0,0 +1 @@ +LoadModule ssl_module modules/mod_ssl.so diff --git a/SOURCES/00-systemd.conf b/SOURCES/00-systemd.conf new file mode 100644 index 0000000..b208c97 --- /dev/null +++ b/SOURCES/00-systemd.conf @@ -0,0 +1,2 @@ +# This file configures systemd module: +LoadModule systemd_module modules/mod_systemd.so diff --git a/SOURCES/01-cgi.conf b/SOURCES/01-cgi.conf new file mode 100644 index 0000000..5b8b936 --- /dev/null +++ b/SOURCES/01-cgi.conf @@ -0,0 +1,14 @@ +# This configuration file loads a CGI module appropriate to the MPM +# which has been configured in 00-mpm.conf. mod_cgid should be used +# with a threaded MPM; mod_cgi with the prefork MPM. + + + LoadModule cgid_module modules/mod_cgid.so + + + LoadModule cgid_module modules/mod_cgid.so + + + LoadModule cgi_module modules/mod_cgi.so + + diff --git a/SOURCES/01-ldap.conf b/SOURCES/01-ldap.conf new file mode 100644 index 0000000..f2ac2a2 --- /dev/null +++ b/SOURCES/01-ldap.conf @@ -0,0 +1,3 @@ +# This file configures the LDAP modules: +LoadModule ldap_module modules/mod_ldap.so +LoadModule authnz_ldap_module modules/mod_authnz_ldap.so diff --git a/SOURCES/01-session.conf b/SOURCES/01-session.conf new file mode 100644 index 0000000..f8d4d92 --- /dev/null +++ b/SOURCES/01-session.conf @@ -0,0 +1,6 @@ +LoadModule session_module modules/mod_session.so +LoadModule session_cookie_module modules/mod_session_cookie.so +LoadModule session_dbd_module modules/mod_session_dbd.so +LoadModule auth_form_module modules/mod_auth_form.so + +#LoadModule session_crypto_module modules/mod_session_crypto.so diff --git a/SOURCES/10-listen443.conf b/SOURCES/10-listen443.conf new file mode 100644 index 0000000..7e2df97 --- /dev/null +++ b/SOURCES/10-listen443.conf @@ -0,0 +1,5 @@ +# This file is part of mod_ssl. It enables listening on port 443 when +# socket activation is used. + +[Socket] +ListenStream=443 diff --git a/SOURCES/README.confd b/SOURCES/README.confd new file mode 100644 index 0000000..6071deb --- /dev/null +++ b/SOURCES/README.confd @@ -0,0 +1,9 @@ + +This directory holds configuration files for the Apache HTTP Server; +any files in this directory which have the ".conf" extension will be +processed as httpd configuration files. The directory is used in +addition to the directory /etc/httpd/conf.modules.d/, which contains +configuration files necessary to load modules. + +Files are processed in sorted order. See httpd.conf(5) for more +information. diff --git a/SOURCES/README.confmod b/SOURCES/README.confmod new file mode 100644 index 0000000..f4b055d --- /dev/null +++ b/SOURCES/README.confmod @@ -0,0 +1,10 @@ + +This directory holds configuration files for the Apache HTTP Server; +any files in this directory which have the ".conf" extension will be +processed as httpd configuration files. This directory contains +configuration fragments necessary only to load modules. +Administrators should use the directory "/etc/httpd/conf.d" to modify +the configuration of httpd, or any modules. + +Files are processed in sorted order and should have a two digit +numeric prefix. See httpd.conf(5) for more information. diff --git a/SOURCES/action-configtest.sh b/SOURCES/action-configtest.sh new file mode 100644 index 0000000..6685b0a --- /dev/null +++ b/SOURCES/action-configtest.sh @@ -0,0 +1,2 @@ +#!/bin/sh +exec /sbin/apachectl configtest "$@" diff --git a/SOURCES/action-graceful.sh b/SOURCES/action-graceful.sh new file mode 100644 index 0000000..dc68b2e --- /dev/null +++ b/SOURCES/action-graceful.sh @@ -0,0 +1,2 @@ +#!/bin/sh +exec /sbin/apachectl graceful "$@" diff --git a/SOURCES/config.layout b/SOURCES/config.layout new file mode 100644 index 0000000..3a9f6c8 --- /dev/null +++ b/SOURCES/config.layout @@ -0,0 +1,24 @@ +# Layout used in Fedora httpd packaging. + + prefix: /etc/httpd + localstatedir: /var + exec_prefix: /usr + bindir: ${exec_prefix}/bin + sbindir: ${exec_prefix}/sbin + libdir: ${exec_prefix}/lib + libexecdir: ${exec_prefix}/libexec + mandir: ${exec_prefix}/man + sysconfdir: /etc/httpd/conf + datadir: ${exec_prefix}/share/httpd + installbuilddir: ${libdir}/httpd/build + errordir: ${datadir}/error + iconsdir: ${datadir}/icons + htdocsdir: ${localstatedir}/www/html + manualdir: ${datadir}/manual + cgidir: ${localstatedir}/www/cgi-bin + includedir: ${exec_prefix}/include/httpd + runtimedir: ${prefix}/run + logfiledir: ${localstatedir}/log/httpd + statedir: ${prefix}/state + proxycachedir: ${localstatedir}/cache/httpd/proxy + diff --git a/SOURCES/htcacheclean.service b/SOURCES/htcacheclean.service new file mode 100644 index 0000000..d1e9d60 --- /dev/null +++ b/SOURCES/htcacheclean.service @@ -0,0 +1,11 @@ +[Unit] +Description=Disk Cache Cleaning Daemon for the Apache HTTP Server +After=httpd.service +Documentation=man:htcacheclean.service(8) + +[Service] +Type=forking +User=apache +PIDFile=/run/httpd/htcacheclean/pid +EnvironmentFile=/etc/sysconfig/htcacheclean +ExecStart=/usr/sbin/htcacheclean -P /run/httpd/htcacheclean/pid -d $INTERVAL -p $CACHE_ROOT -l $LIMIT $OPTIONS diff --git a/SOURCES/htcacheclean.service.xml b/SOURCES/htcacheclean.service.xml new file mode 100644 index 0000000..01b68e4 --- /dev/null +++ b/SOURCES/htcacheclean.service.xml @@ -0,0 +1,123 @@ + + + + + + htcacheclean systemd unit + httpd + AuthorOrtonJoejorton@redhat.com + + + + htcacheclean.service + 8 + + + + htcacheclean.service + htcacheclean unit file for systemd + + + + + /usr/lib/systemd/system/htcacheclean.service + + + + + Description + + This manual page describes the systemd + unit file for the htcacheclean daemon. This + unit file provides a service which runs + htcacheclean in daemon mode, + periodically cleaning the disk cache root to ensure disk space + usage is within configured limits. + + + + + Options + + The service is configured by configuration file + /etc/sysconfig/htcacheclean. The following + variables are used, following standard systemd + EnvironmentFile= syntax: + + + + INTERVAL= + + Sets the interval between cache clean runs, in + minutes. By default this is configured as + 15. + + + + CACHE_ROOT= + + Sets the directory name used for the cache + root. By default this is configured as + /var/cache/httpd/proxy. + + + + LIMIT= + + Sets the total disk cache space limit, in + bytes. Use a K or M + suffix to signify kilobytes or megabytes. By default this is + set to 100M. + + + + OPTIONS= + + Any other options to pass to + htcacheclean. + + + + + + Files + + /usr/lib/systemd/system/htcacheclean.service, + /etc/sysconfig/htcacheclean + + + + See also + + + htcacheclean8, + httpd8, + httpd.service8, + systemd.exec8 + + + + + + diff --git a/SOURCES/htcacheclean.sysconf b/SOURCES/htcacheclean.sysconf new file mode 100644 index 0000000..fffa17b --- /dev/null +++ b/SOURCES/htcacheclean.sysconf @@ -0,0 +1,16 @@ +# +# Configuration options for systemd service, htcacheclean.service. +# See htcacheclean(8) for more information on available options. +# + +# Interval between cache clean runs, in minutes +INTERVAL=15 + +# Default cache root. +CACHE_ROOT=/var/cache/httpd/proxy + +# Cache size limit in bytes (K=Kbytes, M=Mbytes) +LIMIT=100M + +# Any other options... +OPTIONS= diff --git a/SOURCES/httpd-2.4.28-apxs.patch b/SOURCES/httpd-2.4.28-apxs.patch new file mode 100644 index 0000000..7016dec --- /dev/null +++ b/SOURCES/httpd-2.4.28-apxs.patch @@ -0,0 +1,58 @@ +diff --git a/support/apxs.in b/support/apxs.in +index ad1287f..efcfcf6 100644 +--- a/support/apxs.in ++++ b/support/apxs.in +@@ -25,7 +25,18 @@ package apxs; + + my %config_vars = (); + +-my $installbuilddir = "@exp_installbuilddir@"; ++# Awful hack to make apxs libdir-agnostic: ++my $pkg_config = "/usr/bin/pkg-config"; ++if (! -x "$pkg_config") { ++ error("$pkg_config not found!"); ++ exit(1); ++} ++ ++my $libdir = `pkg-config --variable=libdir apr-1`; ++chomp $libdir; ++ ++my $installbuilddir = $libdir . "/httpd/build"; ++ + get_config_vars("$installbuilddir/config_vars.mk",\%config_vars); + + # read the configuration variables once +@@ -275,7 +286,7 @@ if ($opt_g) { + $data =~ s|%NAME%|$name|sg; + $data =~ s|%TARGET%|$CFG_TARGET|sg; + $data =~ s|%PREFIX%|$prefix|sg; +- $data =~ s|%INSTALLBUILDDIR%|$installbuilddir|sg; ++ $data =~ s|%LIBDIR%|$libdir|sg; + + my ($mkf, $mods, $src) = ($data =~ m|^(.+)-=#=-\n(.+)-=#=-\n(.+)|s); + +@@ -453,11 +464,11 @@ if ($opt_c) { + my $ldflags = "$CFG_LDFLAGS"; + if ($opt_p == 1) { + +- my $apr_libs=`$apr_config --cflags --ldflags --link-libtool --libs`; ++ my $apr_libs=`$apr_config --cflags --ldflags --link-libtool`; + chomp($apr_libs); + my $apu_libs=""; + if ($apr_major_version < 2) { +- $apu_libs=`$apu_config --ldflags --link-libtool --libs`; ++ $apu_libs=`$apu_config --ldflags --link-libtool`; + chomp($apu_libs); + } + +@@ -672,8 +683,8 @@ __DATA__ + + builddir=. + top_srcdir=%PREFIX% +-top_builddir=%PREFIX% +-include %INSTALLBUILDDIR%/special.mk ++top_builddir=%LIBDIR%/httpd ++include %LIBDIR%/httpd/build/special.mk + + # the used tools + APACHECTL=apachectl diff --git a/SOURCES/httpd-2.4.28-icons.patch b/SOURCES/httpd-2.4.28-icons.patch new file mode 100644 index 0000000..904d6a4 --- /dev/null +++ b/SOURCES/httpd-2.4.28-icons.patch @@ -0,0 +1,29 @@ + +- Fix config for /icons/ dir to allow symlink to poweredby.png +- Avoid using coredump GIF for a directory called "core" + +Upstream-Status: vendor specific patch + +diff --git a/docs/conf/extra/httpd-autoindex.conf.in b/docs/conf/extra/httpd-autoindex.conf.in +index 51b02ed..dd6f2c6 100644 +--- a/docs/conf/extra/httpd-autoindex.conf.in ++++ b/docs/conf/extra/httpd-autoindex.conf.in +@@ -21,7 +21,7 @@ IndexOptions FancyIndexing HTMLTable VersionSort + Alias /icons/ "@exp_iconsdir@/" + + +- Options Indexes MultiViews ++ Options Indexes MultiViews FollowSymlinks + AllowOverride None + Require all granted + +@@ -53,7 +53,8 @@ AddIcon /icons/dvi.gif .dvi + AddIcon /icons/uuencoded.gif .uu + AddIcon /icons/script.gif .conf .sh .shar .csh .ksh .tcl + AddIcon /icons/tex.gif .tex +-AddIcon /icons/bomb.gif core ++AddIcon /icons/bomb.gif /core ++AddIcon /icons/bomb.gif */core.* + + AddIcon /icons/back.gif .. + AddIcon /icons/hand.right.gif README diff --git a/SOURCES/httpd-2.4.28-r1811831.patch b/SOURCES/httpd-2.4.28-r1811831.patch new file mode 100644 index 0000000..b8d8215 --- /dev/null +++ b/SOURCES/httpd-2.4.28-r1811831.patch @@ -0,0 +1,81 @@ +diff --git a/server/util_script.c b/server/util_script.c +index 4121ae0..b7f8674 100644 +--- a/server/util_script.c ++++ b/server/util_script.c +@@ -92,9 +92,21 @@ static void add_unless_null(apr_table_t *table, const char *name, const char *va + } + } + +-static void env2env(apr_table_t *table, const char *name) ++/* Sets variable @name in table @dest from r->subprocess_env if ++ * available, else from the environment, else from @fallback if ++ * non-NULL. */ ++static void env2env(apr_table_t *dest, request_rec *r, ++ const char *name, const char *fallback) + { +- add_unless_null(table, name, getenv(name)); ++ const char *val; ++ ++ val = apr_table_get(r->subprocess_env, name); ++ if (!val) ++ val = apr_pstrdup(r->pool, getenv(name)); ++ if (!val) ++ val = apr_pstrdup(r->pool, fallback); ++ if (val) ++ apr_table_addn(dest, name, val); + } + + AP_DECLARE(char **) ap_create_environment(apr_pool_t *p, apr_table_t *t) +@@ -211,37 +223,29 @@ AP_DECLARE(void) ap_add_common_vars(request_rec *r) + add_unless_null(e, http2env(r, hdrs[i].key), hdrs[i].val); + } + +- env_temp = apr_table_get(r->subprocess_env, "PATH"); +- if (env_temp == NULL) { +- env_temp = getenv("PATH"); +- } +- if (env_temp == NULL) { +- env_temp = DEFAULT_PATH; +- } +- apr_table_addn(e, "PATH", apr_pstrdup(r->pool, env_temp)); +- ++ env2env(e, r, "PATH", DEFAULT_PATH); + #if defined(WIN32) +- env2env(e, "SystemRoot"); +- env2env(e, "COMSPEC"); +- env2env(e, "PATHEXT"); +- env2env(e, "WINDIR"); ++ env2env(e, r, "SystemRoot", NULL); ++ env2env(e, r, "COMSPEC", NULL); ++ env2env(e, r, "PATHEXT", NULL); ++ env2env(e, r, "WINDIR", NULL); + #elif defined(OS2) +- env2env(e, "COMSPEC"); +- env2env(e, "ETC"); +- env2env(e, "DPATH"); +- env2env(e, "PERLLIB_PREFIX"); ++ env2env(e, r, "COMSPEC", NULL); ++ env2env(e, r, "ETC", NULL); ++ env2env(e, r, "DPATH", NULL); ++ env2env(e, r, "PERLLIB_PREFIX", NULL); + #elif defined(BEOS) +- env2env(e, "LIBRARY_PATH"); ++ env2env(e, r, "LIBRARY_PATH", NULL); + #elif defined(DARWIN) +- env2env(e, "DYLD_LIBRARY_PATH"); ++ env2env(e, r, "DYLD_LIBRARY_PATH", NULL); + #elif defined(_AIX) +- env2env(e, "LIBPATH"); ++ env2env(e, r, "LIBPATH", NULL); + #elif defined(__HPUX__) + /* HPUX PARISC 2.0W knows both, otherwise redundancy is harmless */ +- env2env(e, "SHLIB_PATH"); +- env2env(e, "LD_LIBRARY_PATH"); ++ env2env(e, r, "SHLIB_PATH", NULL); ++ env2env(e, r, "LD_LIBRARY_PATH", NULL); + #else /* Some Unix */ +- env2env(e, "LD_LIBRARY_PATH"); ++ env2env(e, r, "LD_LIBRARY_PATH", NULL); + #endif + + apr_table_addn(e, "SERVER_SIGNATURE", ap_psignature("", r)); diff --git a/SOURCES/httpd-2.4.28-socket-activation.patch b/SOURCES/httpd-2.4.28-socket-activation.patch new file mode 100644 index 0000000..dbdd80c --- /dev/null +++ b/SOURCES/httpd-2.4.28-socket-activation.patch @@ -0,0 +1,300 @@ +diff --git a/server/listen.c b/server/listen.c +index a8e9e6f..1a6c1d3 100644 +--- a/server/listen.c ++++ b/server/listen.c +@@ -34,6 +34,10 @@ + #include + #endif + ++#ifdef HAVE_SYSTEMD ++#include ++#endif ++ + /* we know core's module_index is 0 */ + #undef APLOG_MODULE_INDEX + #define APLOG_MODULE_INDEX AP_CORE_MODULE_INDEX +@@ -59,9 +63,12 @@ static int ap_listenbacklog; + static int ap_listencbratio; + static int send_buffer_size; + static int receive_buffer_size; ++#ifdef HAVE_SYSTEMD ++static int use_systemd = -1; ++#endif + + /* TODO: make_sock is just begging and screaming for APR abstraction */ +-static apr_status_t make_sock(apr_pool_t *p, ap_listen_rec *server) ++static apr_status_t make_sock(apr_pool_t *p, ap_listen_rec *server, int do_bind_listen) + { + apr_socket_t *s = server->sd; + int one = 1; +@@ -94,20 +101,6 @@ static apr_status_t make_sock(apr_pool_t *p, ap_listen_rec *server) + return stat; + } + +-#if APR_HAVE_IPV6 +- if (server->bind_addr->family == APR_INET6) { +- stat = apr_socket_opt_set(s, APR_IPV6_V6ONLY, v6only_setting); +- if (stat != APR_SUCCESS && stat != APR_ENOTIMPL) { +- ap_log_perror(APLOG_MARK, APLOG_CRIT, stat, p, APLOGNO(00069) +- "make_sock: for address %pI, apr_socket_opt_set: " +- "(IPV6_V6ONLY)", +- server->bind_addr); +- apr_socket_close(s); +- return stat; +- } +- } +-#endif +- + /* + * To send data over high bandwidth-delay connections at full + * speed we must force the TCP window to open wide enough to keep the +@@ -169,21 +162,37 @@ static apr_status_t make_sock(apr_pool_t *p, ap_listen_rec *server) + } + #endif + +- if ((stat = apr_socket_bind(s, server->bind_addr)) != APR_SUCCESS) { +- ap_log_perror(APLOG_MARK, APLOG_STARTUP|APLOG_CRIT, stat, p, APLOGNO(00072) +- "make_sock: could not bind to address %pI", +- server->bind_addr); +- apr_socket_close(s); +- return stat; +- } ++ if (do_bind_listen) { ++#if APR_HAVE_IPV6 ++ if (server->bind_addr->family == APR_INET6) { ++ stat = apr_socket_opt_set(s, APR_IPV6_V6ONLY, v6only_setting); ++ if (stat != APR_SUCCESS && stat != APR_ENOTIMPL) { ++ ap_log_perror(APLOG_MARK, APLOG_CRIT, stat, p, APLOGNO(00069) ++ "make_sock: for address %pI, apr_socket_opt_set: " ++ "(IPV6_V6ONLY)", ++ server->bind_addr); ++ apr_socket_close(s); ++ return stat; ++ } ++ } ++#endif + +- if ((stat = apr_socket_listen(s, ap_listenbacklog)) != APR_SUCCESS) { +- ap_log_perror(APLOG_MARK, APLOG_STARTUP|APLOG_ERR, stat, p, APLOGNO(00073) +- "make_sock: unable to listen for connections " +- "on address %pI", +- server->bind_addr); +- apr_socket_close(s); +- return stat; ++ if ((stat = apr_socket_bind(s, server->bind_addr)) != APR_SUCCESS) { ++ ap_log_perror(APLOG_MARK, APLOG_STARTUP|APLOG_CRIT, stat, p, APLOGNO(00072) ++ "make_sock: could not bind to address %pI", ++ server->bind_addr); ++ apr_socket_close(s); ++ return stat; ++ } ++ ++ if ((stat = apr_socket_listen(s, ap_listenbacklog)) != APR_SUCCESS) { ++ ap_log_perror(APLOG_MARK, APLOG_STARTUP|APLOG_ERR, stat, p, APLOGNO(00073) ++ "make_sock: unable to listen for connections " ++ "on address %pI", ++ server->bind_addr); ++ apr_socket_close(s); ++ return stat; ++ } + } + + #ifdef WIN32 +@@ -315,6 +324,123 @@ static int find_listeners(ap_listen_rec **from, ap_listen_rec **to, + return found; + } + ++#ifdef HAVE_SYSTEMD ++ ++static int find_systemd_socket(process_rec * process, apr_port_t port) { ++ int fdcount, fd; ++ int sdc = sd_listen_fds(0); ++ ++ if (sdc < 0) { ++ ap_log_perror(APLOG_MARK, APLOG_CRIT, sdc, process->pool, APLOGNO(02486) ++ "find_systemd_socket: Error parsing enviroment, sd_listen_fds returned %d", ++ sdc); ++ return -1; ++ } ++ ++ if (sdc == 0) { ++ ap_log_perror(APLOG_MARK, APLOG_CRIT, sdc, process->pool, APLOGNO(02487) ++ "find_systemd_socket: At least one socket must be set."); ++ return -1; ++ } ++ ++ fdcount = atoi(getenv("LISTEN_FDS")); ++ for (fd = SD_LISTEN_FDS_START; fd < SD_LISTEN_FDS_START + fdcount; fd++) { ++ if (sd_is_socket_inet(fd, 0, 0, -1, port) > 0) { ++ return fd; ++ } ++ } ++ ++ return -1; ++} ++ ++static apr_status_t alloc_systemd_listener(process_rec * process, ++ int fd, const char *proto, ++ ap_listen_rec **out_rec) ++{ ++ apr_status_t rv; ++ struct sockaddr sa; ++ socklen_t len = sizeof(struct sockaddr); ++ apr_os_sock_info_t si; ++ ap_listen_rec *rec; ++ *out_rec = NULL; ++ ++ memset(&si, 0, sizeof(si)); ++ ++ rv = getsockname(fd, &sa, &len); ++ ++ if (rv != 0) { ++ rv = apr_get_netos_error(); ++ ap_log_perror(APLOG_MARK, APLOG_CRIT, rv, process->pool, APLOGNO(02489) ++ "getsockname on %d failed.", fd); ++ return rv; ++ } ++ ++ si.os_sock = &fd; ++ si.family = sa.sa_family; ++ si.local = &sa; ++ si.type = SOCK_STREAM; ++ si.protocol = APR_PROTO_TCP; ++ ++ rec = apr_palloc(process->pool, sizeof(ap_listen_rec)); ++ rec->active = 0; ++ rec->next = 0; ++ ++ ++ rv = apr_os_sock_make(&rec->sd, &si, process->pool); ++ if (rv != APR_SUCCESS) { ++ ap_log_perror(APLOG_MARK, APLOG_CRIT, rv, process->pool, APLOGNO(02490) ++ "apr_os_sock_make on %d failed.", fd); ++ return rv; ++ } ++ ++ rv = apr_socket_addr_get(&rec->bind_addr, APR_LOCAL, rec->sd); ++ if (rv != APR_SUCCESS) { ++ ap_log_perror(APLOG_MARK, APLOG_CRIT, rv, process->pool, APLOGNO(02491) ++ "apr_socket_addr_get on %d failed.", fd); ++ return rv; ++ } ++ ++ rec->protocol = apr_pstrdup(process->pool, proto); ++ ++ *out_rec = rec; ++ ++ return make_sock(process->pool, rec, 0); ++} ++ ++static const char *set_systemd_listener(process_rec *process, apr_port_t port, ++ const char *proto) ++{ ++ ap_listen_rec *last, *new; ++ apr_status_t rv; ++ int fd = find_systemd_socket(process, port); ++ if (fd < 0) { ++ return "Systemd socket activation is used, but this port is not " ++ "configured in systemd"; ++ } ++ ++ last = ap_listeners; ++ while (last && last->next) { ++ last = last->next; ++ } ++ ++ rv = alloc_systemd_listener(process, fd, proto, &new); ++ if (rv != APR_SUCCESS) { ++ return "Failed to setup socket passed by systemd using socket activation"; ++ } ++ ++ if (last == NULL) { ++ ap_listeners = last = new; ++ } ++ else { ++ last->next = new; ++ last = new; ++ } ++ ++ return NULL; ++} ++ ++#endif /* HAVE_SYSTEMD */ ++ + static const char *alloc_listener(process_rec *process, const char *addr, + apr_port_t port, const char* proto, + void *slave) +@@ -495,7 +621,7 @@ static int open_listeners(apr_pool_t *pool) + } + } + #endif +- if (make_sock(pool, lr) == APR_SUCCESS) { ++ if (make_sock(pool, lr, 1) == APR_SUCCESS) { + ++num_open; + } + else { +@@ -607,8 +733,28 @@ AP_DECLARE(int) ap_setup_listeners(server_rec *s) + } + } + +- if (open_listeners(s->process->pool)) { +- return 0; ++#ifdef HAVE_SYSTEMD ++ if (use_systemd) { ++ const char *userdata_key = "ap_open_systemd_listeners"; ++ void *data; ++ /* clear the enviroment on our second run ++ * so that none of our future children get confused. ++ */ ++ apr_pool_userdata_get(&data, userdata_key, s->process->pool); ++ if (!data) { ++ apr_pool_userdata_set((const void *)1, userdata_key, ++ apr_pool_cleanup_null, s->process->pool); ++ } ++ else { ++ sd_listen_fds(1); ++ } ++ } ++ else ++#endif ++ { ++ if (open_listeners(s->process->pool)) { ++ return 0; ++ } + } + + for (lr = ap_listeners; lr; lr = lr->next) { +@@ -698,7 +844,7 @@ AP_DECLARE(apr_status_t) ap_duplicate_listeners(apr_pool_t *p, server_rec *s, + duplr->bind_addr); + return stat; + } +- make_sock(p, duplr); ++ make_sock(p, duplr, 1); + #if AP_NONBLOCK_WHEN_MULTI_LISTEN + use_nonblock = (ap_listeners && ap_listeners->next); + stat = apr_socket_opt_set(duplr->sd, APR_SO_NONBLOCK, use_nonblock); +@@ -825,6 +971,11 @@ AP_DECLARE_NONSTD(const char *) ap_set_listener(cmd_parms *cmd, void *dummy, + if (argc < 1 || argc > 2) { + return "Listen requires 1 or 2 arguments."; + } ++#ifdef HAVE_SYSTEMD ++ if (use_systemd == -1) { ++ use_systemd = sd_listen_fds(0) > 0; ++ } ++#endif + + rv = apr_parse_addr_port(&host, &scope_id, &port, argv[0], cmd->pool); + if (rv != APR_SUCCESS) { +@@ -856,6 +1007,12 @@ AP_DECLARE_NONSTD(const char *) ap_set_listener(cmd_parms *cmd, void *dummy, + ap_str_tolower(proto); + } + ++#ifdef HAVE_SYSTEMD ++ if (use_systemd) { ++ return set_systemd_listener(cmd->server->process, port, proto); ++ } ++#endif ++ + return alloc_listener(cmd->server->process, host, port, proto, NULL); + } + diff --git a/SOURCES/httpd-2.4.28-statements-comment.patch b/SOURCES/httpd-2.4.28-statements-comment.patch new file mode 100644 index 0000000..65f1bfb --- /dev/null +++ b/SOURCES/httpd-2.4.28-statements-comment.patch @@ -0,0 +1,16 @@ +diff --git a/modules/aaa/mod_access_compat.c b/modules/aaa/mod_access_compat.c +index 3023803..2edf440 100644 +--- a/modules/aaa/mod_access_compat.c ++++ b/modules/aaa/mod_access_compat.c +@@ -152,6 +152,11 @@ static const char *allow_cmd(cmd_parms *cmd, void *dv, const char *from, + if (strcasecmp(from, "from")) + return "allow and deny must be followed by 'from'"; + ++ s = ap_strchr(where, '#'); ++ if (s) { ++ *s = '\0'; ++ } ++ + a = (allowdeny *) apr_array_push(cmd->info ? d->allows : d->denys); + a->x.from = where; + a->limited = cmd->limited; diff --git a/SOURCES/httpd-2.4.32-export.patch b/SOURCES/httpd-2.4.32-export.patch new file mode 100644 index 0000000..18cdafa --- /dev/null +++ b/SOURCES/httpd-2.4.32-export.patch @@ -0,0 +1,22 @@ + +There is no need to "suck in" the apr/apr-util symbols when using +a shared libapr{,util}, it just bloats the symbol table; so don't. + +Upstream-HEAD: needed +Upstream-2.0: omit +Upstream-Status: EXPORT_DIRS change is conditional on using shared apr + +diff --git a/server/Makefile.in b/server/Makefile.in +index 1fa3344..f635d76 100644 +--- a/server/Makefile.in ++++ b/server/Makefile.in +@@ -60,9 +60,6 @@ export_files: + ls $$dir/*.h ; \ + done; \ + echo "$(top_srcdir)/server/mpm_fdqueue.h"; \ +- for dir in $(EXPORT_DIRS_APR); do \ +- ls $$dir/ap[ru].h $$dir/ap[ru]_*.h 2>/dev/null; \ +- done; \ + ) | sed -e s,//,/,g | sort -u > $@ + + exports.c: export_files diff --git a/SOURCES/httpd-2.4.34-CVE-2019-9511-and-9516-and-9517.patch b/SOURCES/httpd-2.4.34-CVE-2019-9511-and-9516-and-9517.patch new file mode 100644 index 0000000..7cee845 --- /dev/null +++ b/SOURCES/httpd-2.4.34-CVE-2019-9511-and-9516-and-9517.patch @@ -0,0 +1,19 @@ +diff --git a/server/mpm/event/event.c b/server/mpm/event/event.c +index 16e39be..2543693 100644 +--- a/server/mpm/event/event.c ++++ b/server/mpm/event/event.c +@@ -1111,10 +1111,11 @@ read_request: + "network write failure in core output filter"); + cs->pub.state = CONN_STATE_LINGER; + } +- else if (c->data_in_output_filters) { ++ else if (c->data_in_output_filters || ++ cs->pub.sense == CONN_SENSE_WANT_READ) { + /* Still in WRITE_COMPLETION_STATE: +- * Set a write timeout for this connection, and let the +- * event thread poll for writeability. ++ * Set a read/write timeout for this connection, and let the ++ * event thread poll for read/writeability. + */ + cs->queue_timestamp = apr_time_now(); + notify_suspend(cs); diff --git a/SOURCES/httpd-2.4.35-apachectl.patch b/SOURCES/httpd-2.4.35-apachectl.patch new file mode 100644 index 0000000..f24b834 --- /dev/null +++ b/SOURCES/httpd-2.4.35-apachectl.patch @@ -0,0 +1,113 @@ +diff --git a/docs/man/apachectl.8 b/docs/man/apachectl.8 +index 870a048..32d3ee5 100644 +--- a/docs/man/apachectl.8 ++++ b/docs/man/apachectl.8 +@@ -74,7 +74,7 @@ Restarts the Apache httpd daemon\&. If the daemon is not running, it is started\ + Displays a full status report from mod_status\&. For this to work, you need to have mod_status enabled on your server and a text-based browser such as \fBlynx\fR available on your system\&. The URL used to access the status report can be set by editing the \fBSTATUSURL\fR variable in the script\&. + .TP + \fBstatus\fR +-Displays a brief status report\&. Similar to the \fBfullstatus\fR option, except that the list of requests currently being served is omitted\&. ++Displays a brief status report using systemd\&. + .TP + \fBgraceful\fR + Gracefully restarts the Apache httpd daemon\&. If the daemon is not running, it is started\&. This differs from a normal restart in that currently open connections are not aborted\&. A side effect is that old log files will not be closed immediately\&. This means that if used in a log rotation script, a substantial delay may be necessary to ensure that the old log files are closed before processing them\&. This command automatically checks the configuration files as in \fBconfigtest\fR before initiating the restart to make sure Apache doesn't die\&. This is equivalent to \fBapachectl -k graceful\fR\&. +diff --git a/support/apachectl.in b/support/apachectl.in +index 3281c2e..8ce6f2b 100644 +--- a/support/apachectl.in ++++ b/support/apachectl.in +@@ -44,19 +44,20 @@ ARGV="$@" + # the path to your httpd binary, including options if necessary + HTTPD='@exp_sbindir@/@progname@' + # +-# pick up any necessary environment variables +-if test -f @exp_sbindir@/envvars; then +- . @exp_sbindir@/envvars +-fi + # + # a command that outputs a formatted text version of the HTML at the + # url given on the command line. Designed for lynx, however other + # programs may work. +-LYNX="@LYNX_PATH@ -dump" ++if [ -x "@LYNX_PATH@" ]; then ++ LYNX="@LYNX_PATH@ -dump" ++else ++ LYNX=none ++fi + # + # the URL to your server's mod_status status page. If you do not + # have one, then status and fullstatus will not work. + STATUSURL="http://localhost:@PORT@/server-status" ++ + # + # Set this variable to a command that increases the maximum + # number of file descriptors allowed per child process. This is +@@ -76,9 +77,46 @@ if [ "x$ARGV" = "x" ] ; then + ARGV="-h" + fi + ++function checklynx() { ++if [ "$LYNX" = "none" ]; then ++ echo "The 'links' package is required for this functionality." ++ exit 8 ++fi ++} ++ ++function testconfig() { ++# httpd is denied terminal access in SELinux, so run in the ++# current context to get stdout from $HTTPD -t. ++if test -x /usr/sbin/selinuxenabled && /usr/sbin/selinuxenabled; then ++ runcon -- `id -Z` /usr/sbin/httpd $OPTIONS -t ++else ++ /usr/sbin/httpd $OPTIONS -t ++fi ++ERROR=$? ++} ++ ++if [ "x$2" != "x" ] ; then ++ echo Passing arguments to httpd using apachectl is no longer supported. ++ echo You can only start/stop/restart httpd using this script. ++ echo If you want to pass extra arguments to httpd, edit the ++ echo /etc/sysconfig/httpd config file. ++fi ++ + case $ACMD in +-start|stop|restart|graceful|graceful-stop) +- $HTTPD -k $ARGV ++start|stop|restart|status) ++ /usr/bin/systemctl $ACMD httpd.service ++ ERROR=$? ++ ;; ++graceful) ++ if /usr/bin/systemctl -q is-active httpd.service; then ++ /usr/bin/systemctl reload httpd.service ++ else ++ /usr/bin/systemctl start httpd.service ++ fi ++ ERROR=$? ++ ;; ++graceful-stop) ++ /usr/bin/systemctl stop httpd.service + ERROR=$? + ;; + startssl|sslstart|start-SSL) +@@ -88,17 +126,14 @@ startssl|sslstart|start-SSL) + ERROR=2 + ;; + configtest) +- $HTTPD -t +- ERROR=$? +- ;; +-status) +- $LYNX $STATUSURL | awk ' /process$/ { print; exit } { print } ' ++ testconfig + ;; + fullstatus) ++ checklynx + $LYNX $STATUSURL + ;; + *) +- $HTTPD "$@" ++ /usr/sbin/httpd $OPTIONS "$@" + ERROR=$? + esac + diff --git a/SOURCES/httpd-2.4.35-cachehardmax.patch b/SOURCES/httpd-2.4.35-cachehardmax.patch new file mode 100644 index 0000000..5051099 --- /dev/null +++ b/SOURCES/httpd-2.4.35-cachehardmax.patch @@ -0,0 +1,82 @@ +diff --git a/modules/cache/cache_util.h b/modules/cache/cache_util.h +index 6b92151..4c42a8e 100644 +--- a/modules/cache/cache_util.h ++++ b/modules/cache/cache_util.h +@@ -195,6 +195,9 @@ typedef struct { + unsigned int store_nostore_set:1; + unsigned int enable_set:1; + unsigned int disable_set:1; ++ /* treat maxex as hard limit */ ++ unsigned int hardmaxex:1; ++ unsigned int hardmaxex_set:1; + } cache_dir_conf; + + /* A linked-list of authn providers. */ +diff --git a/modules/cache/mod_cache.c b/modules/cache/mod_cache.c +index 56a09f5..41015b5 100644 +--- a/modules/cache/mod_cache.c ++++ b/modules/cache/mod_cache.c +@@ -1455,6 +1455,11 @@ static apr_status_t cache_save_filter(ap_filter_t *f, apr_bucket_brigade *in) + exp = date + dconf->defex; + } + } ++ /* else, forcibly cap the expiry date if required */ ++ else if (dconf->hardmaxex && (date + dconf->maxex) < exp) { ++ exp = date + dconf->maxex; ++ } ++ + info->expire = exp; + + /* We found a stale entry which wasn't really stale. */ +@@ -1954,7 +1959,9 @@ static void *create_dir_config(apr_pool_t *p, char *dummy) + + /* array of providers for this URL space */ + dconf->cacheenable = apr_array_make(p, 10, sizeof(struct cache_enable)); +- ++ /* flag; treat maxex as hard limit */ ++ dconf->hardmaxex = 0; ++ dconf->hardmaxex_set = 0; + return dconf; + } + +@@ -2004,7 +2011,10 @@ static void *merge_dir_config(apr_pool_t *p, void *basev, void *addv) { + new->enable_set = add->enable_set || base->enable_set; + new->disable = (add->disable_set == 0) ? base->disable : add->disable; + new->disable_set = add->disable_set || base->disable_set; +- ++ new->hardmaxex = ++ (add->hardmaxex_set == 0) ++ ? base->hardmaxex ++ : add->hardmaxex; + return new; + } + +@@ -2332,12 +2342,18 @@ static const char *add_cache_disable(cmd_parms *parms, void *dummy, + } + + static const char *set_cache_maxex(cmd_parms *parms, void *dummy, +- const char *arg) ++ const char *arg, const char *hard) + { + cache_dir_conf *dconf = (cache_dir_conf *)dummy; + + dconf->maxex = (apr_time_t) (atol(arg) * MSEC_ONE_SEC); + dconf->maxex_set = 1; ++ ++ if (hard && strcasecmp(hard, "hard") == 0) { ++ dconf->hardmaxex = 1; ++ dconf->hardmaxex_set = 1; ++ } ++ + return NULL; + } + +@@ -2545,7 +2561,7 @@ static const command_rec cache_cmds[] = + "caching is enabled"), + AP_INIT_TAKE1("CacheDisable", add_cache_disable, NULL, RSRC_CONF|ACCESS_CONF, + "A partial URL prefix below which caching is disabled"), +- AP_INIT_TAKE1("CacheMaxExpire", set_cache_maxex, NULL, RSRC_CONF|ACCESS_CONF, ++ AP_INIT_TAKE12("CacheMaxExpire", set_cache_maxex, NULL, RSRC_CONF|ACCESS_CONF, + "The maximum time in seconds to cache a document"), + AP_INIT_TAKE1("CacheMinExpire", set_cache_minex, NULL, RSRC_CONF|ACCESS_CONF, + "The minimum time in seconds to cache a document"), diff --git a/SOURCES/httpd-2.4.35-corelimit.patch b/SOURCES/httpd-2.4.35-corelimit.patch new file mode 100644 index 0000000..22768d4 --- /dev/null +++ b/SOURCES/httpd-2.4.35-corelimit.patch @@ -0,0 +1,37 @@ + +Bump up the core size limit if CoreDumpDirectory is +configured. + +Upstream-Status: Was discussed but there are competing desires; + there are portability oddities here too. + +diff --git a/server/core.c b/server/core.c +index aa62e15..ec74029 100644 +--- a/server/core.c ++++ b/server/core.c +@@ -4952,6 +4952,25 @@ static int core_post_config(apr_pool_t *pconf, apr_pool_t *plog, apr_pool_t *pte + } + apr_pool_cleanup_register(pconf, NULL, ap_mpm_end_gen_helper, + apr_pool_cleanup_null); ++ ++#ifdef RLIMIT_CORE ++ if (ap_coredumpdir_configured) { ++ struct rlimit lim; ++ ++ if (getrlimit(RLIMIT_CORE, &lim) == 0 && lim.rlim_cur == 0) { ++ lim.rlim_cur = lim.rlim_max; ++ if (setrlimit(RLIMIT_CORE, &lim) == 0) { ++ ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, NULL, ++ "core dump file size limit raised to %lu bytes", ++ lim.rlim_cur); ++ } else { ++ ap_log_error(APLOG_MARK, APLOG_NOTICE, errno, NULL, ++ "core dump file size is zero, setrlimit failed"); ++ } ++ } ++ } ++#endif ++ + return OK; + } + diff --git a/SOURCES/httpd-2.4.35-deplibs.patch b/SOURCES/httpd-2.4.35-deplibs.patch new file mode 100644 index 0000000..f7bc129 --- /dev/null +++ b/SOURCES/httpd-2.4.35-deplibs.patch @@ -0,0 +1,21 @@ + +Link straight against .la files. + +Upstream-Status: vendor specific + +diff --git a/configure.in b/configure.in +index 9feaceb..82bfeef 100644 +--- a/configure.in ++++ b/configure.in +@@ -784,9 +784,9 @@ APACHE_SUBST(INSTALL_SUEXEC) + + dnl APR should go after the other libs, so the right symbols can be picked up + if test x${apu_found} != xobsolete; then +- AP_LIBS="$AP_LIBS `$apu_config --avoid-ldap --link-libtool --libs`" ++ AP_LIBS="$AP_LIBS `$apu_config --avoid-ldap --link-libtool`" + fi +-AP_LIBS="$AP_LIBS `$apr_config --link-libtool --libs`" ++AP_LIBS="$AP_LIBS `$apr_config --link-libtool`" + APACHE_SUBST(AP_LIBS) + APACHE_SUBST(AP_BUILD_SRCLIB_DIRS) + APACHE_SUBST(AP_CLEAN_SRCLIB_DIRS) diff --git a/SOURCES/httpd-2.4.35-detect-systemd.patch b/SOURCES/httpd-2.4.35-detect-systemd.patch new file mode 100644 index 0000000..60dffb6 --- /dev/null +++ b/SOURCES/httpd-2.4.35-detect-systemd.patch @@ -0,0 +1,77 @@ +diff --git a/Makefile.in b/Makefile.in +index ea8366e..06b8c5a 100644 +--- a/Makefile.in ++++ b/Makefile.in +@@ -4,7 +4,7 @@ CLEAN_SUBDIRS = test + + PROGRAM_NAME = $(progname) + PROGRAM_SOURCES = modules.c +-PROGRAM_LDADD = buildmark.o $(HTTPD_LDFLAGS) $(PROGRAM_DEPENDENCIES) $(PCRE_LIBS) $(EXTRA_LIBS) $(AP_LIBS) $(LIBS) ++PROGRAM_LDADD = buildmark.o $(HTTPD_LDFLAGS) $(PROGRAM_DEPENDENCIES) $(HTTPD_LIBS) $(EXTRA_LIBS) $(AP_LIBS) $(LIBS) + PROGRAM_PRELINK = $(COMPILE) -c $(top_srcdir)/server/buildmark.c + PROGRAM_DEPENDENCIES = \ + server/libmain.la \ +diff --git a/acinclude.m4 b/acinclude.m4 +index ce1d637..0ad0c13 100644 +--- a/acinclude.m4 ++++ b/acinclude.m4 +@@ -606,6 +606,30 @@ AC_DEFUN([APACHE_CHECK_OPENSSL],[ + fi + ]) + ++AC_DEFUN(APACHE_CHECK_SYSTEMD, [ ++dnl Check for systemd support for listen.c's socket activation. ++case $host in ++*-linux-*) ++ if test -n "$PKGCONFIG" && $PKGCONFIG --exists libsystemd; then ++ SYSTEMD_LIBS=`$PKGCONFIG --libs libsystemd` ++ elif test -n "$PKGCONFIG" && $PKGCONFIG --exists libsystemd-daemon; then ++ SYSTEMD_LIBS=`$PKGCONFIG --libs libsystemd-daemon` ++ else ++ AC_CHECK_LIB(systemd-daemon, sd_notify, SYSTEMD_LIBS="-lsystemd-daemon") ++ fi ++ if test -n "$SYSTEMD_LIBS"; then ++ AC_CHECK_HEADERS(systemd/sd-daemon.h) ++ if test "${ac_cv_header_systemd_sd_daemon_h}" = "no" || test -z "${SYSTEMD_LIBS}"; then ++ AC_MSG_WARN([Your system does not support systemd.]) ++ else ++ APR_ADDTO(HTTPD_LIBS, [$SYSTEMD_LIBS]) ++ AC_DEFINE(HAVE_SYSTEMD, 1, [Define if systemd is supported]) ++ fi ++ fi ++ ;; ++esac ++]) ++ + dnl + dnl APACHE_EXPORT_ARGUMENTS + dnl Export (via APACHE_SUBST) the various path-related variables that +diff --git a/configure.in b/configure.in +index 82bfeef..eedba50 100644 +--- a/configure.in ++++ b/configure.in +@@ -234,6 +234,7 @@ if test "$PCRE_CONFIG" != "false"; then + AC_MSG_NOTICE([Using external PCRE library from $PCRE_CONFIG]) + APR_ADDTO(PCRE_INCLUDES, [`$PCRE_CONFIG --cflags`]) + APR_ADDTO(PCRE_LIBS, [`$PCRE_CONFIG --libs`]) ++ APR_ADDTO(HTTPD_LIBS, [\$(PCRE_LIBS)]) + else + AC_MSG_ERROR([pcre-config for libpcre not found. PCRE is required and available from http://pcre.org/]) + fi +@@ -504,6 +505,8 @@ if test "$ac_cv_struct_tm_gmtoff" = "yes"; then + AC_DEFINE(HAVE_GMTOFF, 1, [Define if struct tm has a tm_gmtoff field]) + fi + ++APACHE_CHECK_SYSTEMD ++ + dnl ## Set up any appropriate OS-specific environment variables for apachectl + + case $host in +@@ -677,6 +680,7 @@ APACHE_SUBST(OS_DIR) + APACHE_SUBST(BUILTIN_LIBS) + APACHE_SUBST(SHLIBPATH_VAR) + APACHE_SUBST(OS_SPECIFIC_VARS) ++APACHE_SUBST(HTTPD_LIBS) + + PRE_SHARED_CMDS='echo ""' + POST_SHARED_CMDS='echo ""' diff --git a/SOURCES/httpd-2.4.35-freebind.patch b/SOURCES/httpd-2.4.35-freebind.patch new file mode 100644 index 0000000..dc34c4e --- /dev/null +++ b/SOURCES/httpd-2.4.35-freebind.patch @@ -0,0 +1,125 @@ +diff --git a/include/ap_listen.h b/include/ap_listen.h +index 58c2574..1a53292 100644 +--- a/include/ap_listen.h ++++ b/include/ap_listen.h +@@ -137,6 +137,9 @@ AP_DECLARE_NONSTD(const char *) ap_set_listenbacklog(cmd_parms *cmd, void *dummy + AP_DECLARE_NONSTD(const char *) ap_set_listencbratio(cmd_parms *cmd, void *dummy, const char *arg); + AP_DECLARE_NONSTD(const char *) ap_set_listener(cmd_parms *cmd, void *dummy, + int argc, char *const argv[]); ++AP_DECLARE_NONSTD(const char *) ap_set_freelistener(cmd_parms *cmd, void *dummy, ++ int argc, char *const argv[]); ++ + AP_DECLARE_NONSTD(const char *) ap_set_send_buffer_size(cmd_parms *cmd, void *dummy, + const char *arg); + AP_DECLARE_NONSTD(const char *) ap_set_receive_buffer_size(cmd_parms *cmd, +@@ -150,6 +153,8 @@ AP_INIT_TAKE1("ListenCoresBucketsRatio", ap_set_listencbratio, NULL, RSRC_CONF, + "Ratio between the number of CPU cores (online) and the number of listeners buckets"), \ + AP_INIT_TAKE_ARGV("Listen", ap_set_listener, NULL, RSRC_CONF, \ + "A port number or a numeric IP address and a port number, and an optional protocol"), \ ++AP_INIT_TAKE_ARGV("ListenFree", ap_set_freelistener, NULL, RSRC_CONF, \ ++ "A port number or a numeric IP address and a port number, and an optional protocol"), \ + AP_INIT_TAKE1("SendBufferSize", ap_set_send_buffer_size, NULL, RSRC_CONF, \ + "Send buffer size in bytes"), \ + AP_INIT_TAKE1("ReceiveBufferSize", ap_set_receive_buffer_size, NULL, \ +diff --git a/server/listen.c b/server/listen.c +index 1a6c1d3..d375fee 100644 +--- a/server/listen.c ++++ b/server/listen.c +@@ -63,6 +63,7 @@ static int ap_listenbacklog; + static int ap_listencbratio; + static int send_buffer_size; + static int receive_buffer_size; ++static int ap_listenfreebind; + #ifdef HAVE_SYSTEMD + static int use_systemd = -1; + #endif +@@ -162,6 +163,21 @@ static apr_status_t make_sock(apr_pool_t *p, ap_listen_rec *server, int do_bind_ + } + #endif + ++ ++#if defined(APR_SO_FREEBIND) ++ if (ap_listenfreebind) { ++ if (apr_socket_opt_set(s, APR_SO_FREEBIND, one) < 0) { ++ stat = apr_get_netos_error(); ++ ap_log_perror(APLOG_MARK, APLOG_CRIT, stat, p, APLOGNO(02182) ++ "make_sock: apr_socket_opt_set: " ++ "error setting APR_SO_FREEBIND"); ++ apr_socket_close(s); ++ return stat; ++ } ++ } ++#endif ++ ++ + if (do_bind_listen) { + #if APR_HAVE_IPV6 + if (server->bind_addr->family == APR_INET6) { +@@ -956,6 +972,7 @@ AP_DECLARE(void) ap_listen_pre_config(void) + } + } + ++ + AP_DECLARE_NONSTD(const char *) ap_set_listener(cmd_parms *cmd, void *dummy, + int argc, char *const argv[]) + { +@@ -1016,6 +1033,14 @@ AP_DECLARE_NONSTD(const char *) ap_set_listener(cmd_parms *cmd, void *dummy, + return alloc_listener(cmd->server->process, host, port, proto, NULL); + } + ++AP_DECLARE_NONSTD(const char *) ap_set_freelistener(cmd_parms *cmd, void *dummy, ++ int argc, ++ char *const argv[]) ++{ ++ ap_listenfreebind = 1; ++ return ap_set_listener(cmd, dummy, argc, argv); ++} ++ + AP_DECLARE_NONSTD(const char *) ap_set_listenbacklog(cmd_parms *cmd, + void *dummy, + const char *arg) +diff --git a/docs/manual/mod/mpm_common.html.en b/docs/manual/mod/mpm_common.html.en +index 5d688e4..eb66c19 100644 +--- a/docs/manual/mod/mpm_common.html.en ++++ b/docs/manual/mod/mpm_common.html.en +@@ -42,6 +42,7 @@ more than one multi-processing module (MPM) +
  • EnableExceptionHook
  • +
  • GracefulShutdownTimeout
  • +
  • Listen
  • ++
  • ListenFree
  • +
  • ListenBackLog
  • +
  • ListenCoresBucketsRatio
  • +
  • MaxConnectionsPerChild
  • +@@ -233,6 +234,31 @@ discussion of the Address already in use error message, + including other causes. + + ++ ++
    top
    ++

    ListenFree Directive

    ++ ++ ++ ++ ++ ++ ++ ++
    Description:IP addresses and ports that the server ++listens to. Doesn't require IP address to be up
    Syntax:ListenFree [IP-address:]portnumber [protocol]
    Context:server config
    Status:MPM
    Module:event, worker, prefork, mpm_winnt, mpm_netware, mpmt_os2
    Compatibility:This directive is currently available only in Red Hat Enterprise Linux
    ++

    The ListenFree directive is ++ identical to the Listen directive. ++ The only difference is in the usage of the IP_FREEBIND socket ++ option, which is enabled by default with ListenFree. ++ If IP_FREEBIND is enabled, it allows httpd to bind to an IP ++ address that is nonlocal or does not (yet) exist. This allows httpd to ++ listen on a socket without requiring the underlying network interface ++ or the specified dynamic IP address to be up at the time when httpd ++ is trying to bind to it. ++

    ++
    ++ ++ +
    top
    +

    ListenBackLog Directive

    + + diff --git a/SOURCES/httpd-2.4.35-full-release.patch b/SOURCES/httpd-2.4.35-full-release.patch new file mode 100644 index 0000000..ab8cc2a --- /dev/null +++ b/SOURCES/httpd-2.4.35-full-release.patch @@ -0,0 +1,46 @@ +diff --git a/server/core.c b/server/core.c +index cb8e463..daf76b3 100644 +--- a/server/core.c ++++ b/server/core.c +@@ -3430,6 +3430,7 @@ enum server_token_type { + SrvTk_MINIMAL, /* eg: Apache/2.0.41 */ + SrvTk_OS, /* eg: Apache/2.0.41 (UNIX) */ + SrvTk_FULL, /* eg: Apache/2.0.41 (UNIX) PHP/4.2.2 FooBar/1.2b */ ++ SrvTk_FULL_RELEASE, /* eg: Apache/2.0.41 (UNIX) (Release 32.el7) PHP/4.2.2 FooBar/1.2b */ + SrvTk_PRODUCT_ONLY /* eg: Apache */ + }; + static enum server_token_type ap_server_tokens = SrvTk_FULL; +@@ -3506,7 +3507,10 @@ static void set_banner(apr_pool_t *pconf) + else if (ap_server_tokens == SrvTk_MAJOR) { + ap_add_version_component(pconf, AP_SERVER_BASEPRODUCT "/" AP_SERVER_MAJORVERSION); + } +- else { ++ else if (ap_server_tokens == SrvTk_FULL_RELEASE) { ++ ap_add_version_component(pconf, AP_SERVER_BASEVERSION " (" PLATFORM ") (Release @RELEASE@)"); ++ } ++ else { + ap_add_version_component(pconf, AP_SERVER_BASEVERSION " (" PLATFORM ")"); + } + +@@ -3514,7 +3518,7 @@ static void set_banner(apr_pool_t *pconf) + * Lock the server_banner string if we're not displaying + * the full set of tokens + */ +- if (ap_server_tokens != SrvTk_FULL) { ++ if (ap_server_tokens != SrvTk_FULL && ap_server_tokens != SrvTk_FULL_RELEASE) { + banner_locked++; + } + server_description = AP_SERVER_BASEVERSION " (" PLATFORM ")"; +@@ -3547,8 +3551,11 @@ static const char *set_serv_tokens(cmd_parms *cmd, void *dummy, + else if (!strcasecmp(arg, "Full")) { + ap_server_tokens = SrvTk_FULL; + } ++ else if (!strcasecmp(arg, "Full-Release")) { ++ ap_server_tokens = SrvTk_FULL_RELEASE; ++ } + else { +- return "ServerTokens takes 1 argument: 'Prod(uctOnly)', 'Major', 'Minor', 'Min(imal)', 'OS', or 'Full'"; ++ return "ServerTokens takes 1 argument: 'Prod(uctOnly)', 'Major', 'Minor', 'Min(imal)', 'OS', 'Full' or 'Full-Release'"; + } + + return NULL; diff --git a/SOURCES/httpd-2.4.35-ocsp-wrong-ctx.patch b/SOURCES/httpd-2.4.35-ocsp-wrong-ctx.patch new file mode 100644 index 0000000..5523ea5 --- /dev/null +++ b/SOURCES/httpd-2.4.35-ocsp-wrong-ctx.patch @@ -0,0 +1,15 @@ +diff --git a/modules/ssl/ssl_engine_kernel.c b/modules/ssl/ssl_engine_kernel.c +index a5e86e4..6611610 100644 +--- a/modules/ssl/ssl_engine_kernel.c ++++ b/modules/ssl/ssl_engine_kernel.c +@@ -1823,8 +1823,8 @@ int ssl_callback_SSLVerify(int ok, X509_STORE_CTX *ctx) + /* + * Perform OCSP-based revocation checks + */ +- if (ok && ((sc->server->ocsp_mask & SSL_OCSPCHECK_CHAIN) || +- (errdepth == 0 && (sc->server->ocsp_mask & SSL_OCSPCHECK_LEAF)))) { ++ if (ok && ((mctx->ocsp_mask & SSL_OCSPCHECK_CHAIN) || ++ (errdepth == 0 && (mctx->ocsp_mask & SSL_OCSPCHECK_LEAF)))) { + /* If there was an optional verification error, it's not + * possible to perform OCSP validation since the issuer may be + * missing/untrusted. Fail in that case. */ diff --git a/SOURCES/httpd-2.4.35-r1633085.patch b/SOURCES/httpd-2.4.35-r1633085.patch new file mode 100644 index 0000000..a14b626 --- /dev/null +++ b/SOURCES/httpd-2.4.35-r1633085.patch @@ -0,0 +1,16 @@ +diff --git a/modules/ssl/ssl_engine_io.c b/modules/ssl/ssl_engine_io.c +index d52d5e3..8a57659 100644 +--- a/modules/ssl/ssl_engine_io.c ++++ b/modules/ssl/ssl_engine_io.c +@@ -1415,6 +1415,11 @@ static apr_status_t ssl_io_filter_handshake(ssl_filter_ctx_t *filter_ctx) + "\"SSLVerifyClient optional_no_ca\" " + "configuration"); + ssl_log_ssl_error(SSLLOG_MARK, APLOG_INFO, server); ++ ++ /* on session resumption ssl_callback_SSLVerify() ++ * will not be called, therefore we have to set it here ++ */ ++ sslconn->verify_info = "GENEROUS"; + } + else { + const char *error = sslconn->verify_error ? diff --git a/SOURCES/httpd-2.4.35-r1738878.patch b/SOURCES/httpd-2.4.35-r1738878.patch new file mode 100644 index 0000000..700e80a --- /dev/null +++ b/SOURCES/httpd-2.4.35-r1738878.patch @@ -0,0 +1,140 @@ +diff --git a/modules/proxy/ajp.h b/modules/proxy/ajp.h +index c119a7e..267150a 100644 +--- a/modules/proxy/ajp.h ++++ b/modules/proxy/ajp.h +@@ -413,12 +413,14 @@ apr_status_t ajp_ilink_receive(apr_socket_t *sock, ajp_msg_t *msg); + * @param sock backend socket + * @param r current request + * @param buffsize max size of the AJP packet. ++ * @param secret authentication secret + * @param uri requested uri + * @return APR_SUCCESS or error + */ + apr_status_t ajp_send_header(apr_socket_t *sock, request_rec *r, + apr_size_t buffsize, +- apr_uri_t *uri); ++ apr_uri_t *uri, ++ const char *secret); + + /** + * Read the ajp message and return the type of the message. +diff --git a/modules/proxy/ajp_header.c b/modules/proxy/ajp_header.c +index 67353a7..680a8f3 100644 +--- a/modules/proxy/ajp_header.c ++++ b/modules/proxy/ajp_header.c +@@ -213,7 +213,8 @@ AJPV13_REQUEST/AJPV14_REQUEST= + + static apr_status_t ajp_marshal_into_msgb(ajp_msg_t *msg, + request_rec *r, +- apr_uri_t *uri) ++ apr_uri_t *uri, ++ const char *secret) + { + int method; + apr_uint32_t i, num_headers = 0; +@@ -293,17 +294,15 @@ static apr_status_t ajp_marshal_into_msgb(ajp_msg_t *msg, + i, elts[i].key, elts[i].val); + } + +-/* XXXX need to figure out how to do this +- if (s->secret) { ++ if (secret) { + if (ajp_msg_append_uint8(msg, SC_A_SECRET) || +- ajp_msg_append_string(msg, s->secret)) { ++ ajp_msg_append_string(msg, secret)) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(03228) +- "Error ajp_marshal_into_msgb - " ++ "ajp_marshal_into_msgb: " + "Error appending secret"); + return APR_EGENERAL; + } + } +- */ + + if (r->user) { + if (ajp_msg_append_uint8(msg, SC_A_REMOTE_USER) || +@@ -671,7 +670,8 @@ static apr_status_t ajp_unmarshal_response(ajp_msg_t *msg, + apr_status_t ajp_send_header(apr_socket_t *sock, + request_rec *r, + apr_size_t buffsize, +- apr_uri_t *uri) ++ apr_uri_t *uri, ++ const char *secret) + { + ajp_msg_t *msg; + apr_status_t rc; +@@ -683,7 +683,7 @@ apr_status_t ajp_send_header(apr_socket_t *sock, + return rc; + } + +- rc = ajp_marshal_into_msgb(msg, r, uri); ++ rc = ajp_marshal_into_msgb(msg, r, uri, secret); + if (rc != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00988) + "ajp_send_header: ajp_marshal_into_msgb failed"); +diff --git a/modules/proxy/mod_proxy.c b/modules/proxy/mod_proxy.c +index 69a35ce..800ede1 100644 +--- a/modules/proxy/mod_proxy.c ++++ b/modules/proxy/mod_proxy.c +@@ -327,6 +327,12 @@ static const char *set_worker_param(apr_pool_t *p, + worker->s->response_field_size = (s ? s : HUGE_STRING_LEN); + worker->s->response_field_size_set = 1; + } ++ else if (!strcasecmp(key, "secret")) { ++ if (PROXY_STRNCPY(worker->s->secret, val) != APR_SUCCESS) { ++ return apr_psprintf(p, "Secret length must be < %d characters", ++ (int)sizeof(worker->s->secret)); ++ } ++ } + else { + if (set_worker_hc_param_f) { + return set_worker_hc_param_f(p, s, worker, key, val, NULL); +diff --git a/modules/proxy/mod_proxy.h b/modules/proxy/mod_proxy.h +index aabd09f..3419023 100644 +--- a/modules/proxy/mod_proxy.h ++++ b/modules/proxy/mod_proxy.h +@@ -357,6 +357,7 @@ PROXY_WORKER_HC_FAIL ) + #define PROXY_WORKER_MAX_HOSTNAME_SIZE 64 + #define PROXY_BALANCER_MAX_HOSTNAME_SIZE PROXY_WORKER_MAX_HOSTNAME_SIZE + #define PROXY_BALANCER_MAX_STICKY_SIZE 64 ++#define PROXY_WORKER_MAX_SECRET_SIZE 64 + + #define PROXY_RFC1035_HOSTNAME_SIZE 256 + +@@ -450,6 +451,7 @@ typedef struct { + hcmethod_t method; /* method to use for health check */ + apr_interval_time_t interval; + char upgrade[PROXY_WORKER_MAX_SCHEME_SIZE];/* upgrade protocol used by mod_proxy_wstunnel */ ++ char secret[PROXY_WORKER_MAX_SECRET_SIZE]; /* authentication secret (e.g. AJP13) */ + char hostname_ex[PROXY_RFC1035_HOSTNAME_SIZE]; /* RFC1035 compliant version of the remote backend address */ + apr_size_t response_field_size; /* Size of proxy response buffer in bytes. */ + unsigned int response_field_size_set:1; +diff --git a/modules/proxy/mod_proxy_ajp.c b/modules/proxy/mod_proxy_ajp.c +index 73716af..6faabea 100644 +--- a/modules/proxy/mod_proxy_ajp.c ++++ b/modules/proxy/mod_proxy_ajp.c +@@ -193,6 +193,7 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r, + apr_off_t content_length = 0; + int original_status = r->status; + const char *original_status_line = r->status_line; ++ const char *secret = NULL; + + if (psf->io_buffer_size_set) + maxsize = psf->io_buffer_size; +@@ -202,12 +203,15 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r, + maxsize = AJP_MSG_BUFFER_SZ; + maxsize = APR_ALIGN(maxsize, 1024); + ++ if (*conn->worker->s->secret) ++ secret = conn->worker->s->secret; ++ + /* + * Send the AJP request to the remote server + */ + + /* send request headers */ +- status = ajp_send_header(conn->sock, r, maxsize, uri); ++ status = ajp_send_header(conn->sock, r, maxsize, uri, secret); + if (status != APR_SUCCESS) { + conn->close = 1; + ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(00868) diff --git a/SOURCES/httpd-2.4.35-r1825120.patch b/SOURCES/httpd-2.4.35-r1825120.patch new file mode 100644 index 0000000..6611872 --- /dev/null +++ b/SOURCES/httpd-2.4.35-r1825120.patch @@ -0,0 +1,96 @@ +diff --git a/modules/ssl/ssl_engine_init.c b/modules/ssl/ssl_engine_init.c +index 19cb611..79d5219 100644 +--- a/modules/ssl/ssl_engine_init.c ++++ b/modules/ssl/ssl_engine_init.c +@@ -2070,70 +2070,18 @@ int ssl_proxy_section_post_config(apr_pool_t *p, apr_pool_t *plog, + return OK; + } + +-static int ssl_init_FindCAList_X509NameCmp(const X509_NAME * const *a, +- const X509_NAME * const *b) +-{ +- return(X509_NAME_cmp(*a, *b)); +-} +- +-static void ssl_init_PushCAList(STACK_OF(X509_NAME) *ca_list, +- server_rec *s, apr_pool_t *ptemp, +- const char *file) +-{ +- int n; +- STACK_OF(X509_NAME) *sk; +- +- sk = (STACK_OF(X509_NAME) *) +- SSL_load_client_CA_file(file); +- +- if (!sk) { +- return; +- } +- +- for (n = 0; n < sk_X509_NAME_num(sk); n++) { +- X509_NAME *name = sk_X509_NAME_value(sk, n); +- +- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(02209) +- "CA certificate: %s", +- modssl_X509_NAME_to_string(ptemp, name, 0)); +- +- /* +- * note that SSL_load_client_CA_file() checks for duplicates, +- * but since we call it multiple times when reading a directory +- * we must also check for duplicates ourselves. +- */ +- +- if (sk_X509_NAME_find(ca_list, name) < 0) { +- /* this will be freed when ca_list is */ +- sk_X509_NAME_push(ca_list, name); +- } +- else { +- /* need to free this ourselves, else it will leak */ +- X509_NAME_free(name); +- } +- } +- +- sk_X509_NAME_free(sk); +-} +- + STACK_OF(X509_NAME) *ssl_init_FindCAList(server_rec *s, + apr_pool_t *ptemp, + const char *ca_file, + const char *ca_path) + { +- STACK_OF(X509_NAME) *ca_list; +- +- /* +- * Start with a empty stack/list where new +- * entries get added in sorted order. +- */ +- ca_list = sk_X509_NAME_new(ssl_init_FindCAList_X509NameCmp); ++ STACK_OF(X509_NAME) *ca_list = sk_X509_NAME_new_null();; + + /* + * Process CA certificate bundle file + */ + if (ca_file) { +- ssl_init_PushCAList(ca_list, s, ptemp, ca_file); ++ SSL_add_file_cert_subjects_to_stack(ca_list, ca_file); + /* + * If ca_list is still empty after trying to load ca_file + * then the file failed to load, and users should hear about that. +@@ -2168,17 +2116,12 @@ STACK_OF(X509_NAME) *ssl_init_FindCAList(server_rec *s, + continue; /* don't try to load directories */ + } + file = apr_pstrcat(ptemp, ca_path, "/", direntry.name, NULL); +- ssl_init_PushCAList(ca_list, s, ptemp, file); ++ SSL_add_file_cert_subjects_to_stack(ca_list, file); + } + + apr_dir_close(dir); + } + +- /* +- * Cleanup +- */ +- (void) sk_X509_NAME_set_cmp_func(ca_list, NULL); +- + return ca_list; + } + diff --git a/SOURCES/httpd-2.4.35-r1830819+.patch b/SOURCES/httpd-2.4.35-r1830819+.patch new file mode 100644 index 0000000..18ae1d3 --- /dev/null +++ b/SOURCES/httpd-2.4.35-r1830819+.patch @@ -0,0 +1,708 @@ +# ./pullrev.sh 1830819 1830836 1830912 1830913 1830927 1831168 1831173 + +http://svn.apache.org/viewvc?view=revision&revision=1830819 +http://svn.apache.org/viewvc?view=revision&revision=1830912 +http://svn.apache.org/viewvc?view=revision&revision=1830913 +http://svn.apache.org/viewvc?view=revision&revision=1830927 +http://svn.apache.org/viewvc?view=revision&revision=1831168 +http://svn.apache.org/viewvc?view=revision&revision=1831173 +http://svn.apache.org/viewvc?view=revision&revision=1835240 +http://svn.apache.org/viewvc?view=revision&revision=1835242 +http://svn.apache.org/viewvc?view=revision&revision=1835615 + +diff --git a/modules/ssl/ssl_engine_config.c b/modules/ssl/ssl_engine_config.c +index 43397f9..ff8f429 100644 +--- httpd-2.4.35/modules/ssl/ssl_engine_config.c.r1830819+ ++++ httpd-2.4.35/modules/ssl/ssl_engine_config.c +@@ -899,7 +899,9 @@ + SSLSrvConfigRec *sc = mySrvConfig(cmd->server); + const char *err; + +- if ((err = ssl_cmd_check_file(cmd, &arg))) { ++ /* Only check for non-ENGINE based certs. */ ++ if (!modssl_is_engine_id(arg) ++ && (err = ssl_cmd_check_file(cmd, &arg))) { + return err; + } + +@@ -915,7 +917,9 @@ + SSLSrvConfigRec *sc = mySrvConfig(cmd->server); + const char *err; + +- if ((err = ssl_cmd_check_file(cmd, &arg))) { ++ /* Check keyfile exists for non-ENGINE keys. */ ++ if (!modssl_is_engine_id(arg) ++ && (err = ssl_cmd_check_file(cmd, &arg))) { + return err; + } + +--- httpd-2.4.35/modules/ssl/ssl_engine_init.c.r1830819+ ++++ httpd-2.4.35/modules/ssl/ssl_engine_init.c +@@ -1186,12 +1186,18 @@ + (certfile = APR_ARRAY_IDX(mctx->pks->cert_files, i, + const char *)); + i++) { ++ EVP_PKEY *pkey; ++ const char *engine_certfile = NULL; ++ + key_id = apr_psprintf(ptemp, "%s:%d", vhost_id, i); + + ERR_clear_error(); + + /* first the certificate (public key) */ +- if (mctx->cert_chain) { ++ if (modssl_is_engine_id(certfile)) { ++ engine_certfile = certfile; ++ } ++ else if (mctx->cert_chain) { + if ((SSL_CTX_use_certificate_file(mctx->ssl_ctx, certfile, + SSL_FILETYPE_PEM) < 1)) { + ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(02561) +@@ -1220,12 +1226,46 @@ + + ERR_clear_error(); + +- if ((SSL_CTX_use_PrivateKey_file(mctx->ssl_ctx, keyfile, +- SSL_FILETYPE_PEM) < 1) && +- (ERR_GET_FUNC(ERR_peek_last_error()) +- != X509_F_X509_CHECK_PRIVATE_KEY)) { ++ if (modssl_is_engine_id(keyfile)) { ++ apr_status_t rv; ++ ++ cert = NULL; ++ ++ if ((rv = modssl_load_engine_keypair(s, ptemp, vhost_id, ++ engine_certfile, keyfile, ++ &cert, &pkey))) { ++ return rv; ++ } ++ ++ if (cert) { ++ if (SSL_CTX_use_certificate(mctx->ssl_ctx, cert) < 1) { ++ ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(10137) ++ "Failed to configure engine certificate %s, check %s", ++ key_id, certfile); ++ ssl_log_ssl_error(SSLLOG_MARK, APLOG_EMERG, s); ++ return APR_EGENERAL; ++ } ++ ++ /* SSL_CTX now owns the cert. */ ++ X509_free(cert); ++ } ++ ++ if (SSL_CTX_use_PrivateKey(mctx->ssl_ctx, pkey) < 1) { ++ ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(10130) ++ "Failed to configure private key %s from engine", ++ keyfile); ++ ssl_log_ssl_error(SSLLOG_MARK, APLOG_EMERG, s); ++ return APR_EGENERAL; ++ } ++ ++ /* SSL_CTX now owns the key */ ++ EVP_PKEY_free(pkey); ++ } ++ else if ((SSL_CTX_use_PrivateKey_file(mctx->ssl_ctx, keyfile, ++ SSL_FILETYPE_PEM) < 1) ++ && (ERR_GET_FUNC(ERR_peek_last_error()) ++ != X509_F_X509_CHECK_PRIVATE_KEY)) { + ssl_asn1_t *asn1; +- EVP_PKEY *pkey; + const unsigned char *ptr; + + ERR_clear_error(); +@@ -1312,8 +1352,9 @@ + /* + * Try to read DH parameters from the (first) SSLCertificateFile + */ +- if ((certfile = APR_ARRAY_IDX(mctx->pks->cert_files, 0, const char *)) && +- (dhparams = ssl_dh_GetParamFromFile(certfile))) { ++ certfile = APR_ARRAY_IDX(mctx->pks->cert_files, 0, const char *); ++ if (certfile && !modssl_is_engine_id(certfile) ++ && (dhparams = ssl_dh_GetParamFromFile(certfile))) { + SSL_CTX_set_tmp_dh(mctx->ssl_ctx, dhparams); + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(02540) + "Custom DH parameters (%d bits) for %s loaded from %s", +@@ -1325,10 +1366,10 @@ + /* + * Similarly, try to read the ECDH curve name from SSLCertificateFile... + */ +- if ((certfile != NULL) && +- (ecparams = ssl_ec_GetParamFromFile(certfile)) && +- (nid = EC_GROUP_get_curve_name(ecparams)) && +- (eckey = EC_KEY_new_by_curve_name(nid))) { ++ if (certfile && !modssl_is_engine_id(certfile) ++ && (ecparams = ssl_ec_GetParamFromFile(certfile)) ++ && (nid = EC_GROUP_get_curve_name(ecparams)) ++ && (eckey = EC_KEY_new_by_curve_name(nid))) { + SSL_CTX_set_tmp_ecdh(mctx->ssl_ctx, eckey); + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(02541) + "ECDH curve %s for %s specified in %s", +--- httpd-2.4.35/modules/ssl/ssl_engine_pphrase.c.r1830819+ ++++ httpd-2.4.35/modules/ssl/ssl_engine_pphrase.c +@@ -143,9 +143,6 @@ + const char *key_id = asn1_table_vhost_key(mc, p, sc->vhost_id, idx); + EVP_PKEY *pPrivateKey = NULL; + ssl_asn1_t *asn1; +- unsigned char *ucp; +- long int length; +- BOOL bReadable; + int nPassPhrase = (*pphrases)->nelts; + int nPassPhraseRetry = 0; + apr_time_t pkey_mtime = 0; +@@ -222,16 +219,12 @@ + * is not empty. */ + ERR_clear_error(); + +- bReadable = ((pPrivateKey = modssl_read_privatekey(ppcb_arg.pkey_file, +- NULL, ssl_pphrase_Handle_CB, &ppcb_arg)) != NULL ? +- TRUE : FALSE); +- +- /* +- * when the private key file now was readable, +- * it's fine and we go out of the loop +- */ +- if (bReadable) +- break; ++ pPrivateKey = modssl_read_privatekey(ppcb_arg.pkey_file, ++ ssl_pphrase_Handle_CB, &ppcb_arg); ++ /* If the private key was successfully read, nothing more to ++ do here. */ ++ if (pPrivateKey != NULL) ++ break; + + /* + * when we have more remembered pass phrases +@@ -356,19 +349,12 @@ + nPassPhrase++; + } + +- /* +- * Insert private key into the global module configuration +- * (we convert it to a stand-alone DER byte sequence +- * because the SSL library uses static variables inside a +- * RSA structure which do not survive DSO reloads!) +- */ +- length = i2d_PrivateKey(pPrivateKey, NULL); +- ucp = ssl_asn1_table_set(mc->tPrivateKey, key_id, length); +- (void)i2d_PrivateKey(pPrivateKey, &ucp); /* 2nd arg increments */ ++ /* Cache the private key in the global module configuration so it ++ * can be used after subsequent reloads. */ ++ asn1 = ssl_asn1_table_set(mc->tPrivateKey, key_id, pPrivateKey); + + if (ppcb_arg.nPassPhraseDialogCur != 0) { + /* remember mtime of encrypted keys */ +- asn1 = ssl_asn1_table_get(mc->tPrivateKey, key_id); + asn1->source_mtime = pkey_mtime; + } + +@@ -619,3 +605,303 @@ + */ + return (len); + } ++ ++ ++#if defined(HAVE_OPENSSL_ENGINE_H) && defined(HAVE_ENGINE_INIT) ++ ++/* OpenSSL UI implementation for passphrase entry; largely duplicated ++ * from ssl_pphrase_Handle_CB but adjusted for UI API. TODO: Might be ++ * worth trying to shift pphrase handling over to the UI API ++ * completely. */ ++static int passphrase_ui_open(UI *ui) ++{ ++ pphrase_cb_arg_t *ppcb = UI_get0_user_data(ui); ++ SSLSrvConfigRec *sc = mySrvConfig(ppcb->s); ++ ++ ppcb->nPassPhraseDialog++; ++ ppcb->nPassPhraseDialogCur++; ++ ++ /* ++ * Builtin or Pipe dialog ++ */ ++ if (sc->server->pphrase_dialog_type == SSL_PPTYPE_BUILTIN ++ || sc->server->pphrase_dialog_type == SSL_PPTYPE_PIPE) { ++ if (sc->server->pphrase_dialog_type == SSL_PPTYPE_PIPE) { ++ if (!readtty) { ++ ap_log_error(APLOG_MARK, APLOG_INFO, 0, ppcb->s, ++ APLOGNO(10143) ++ "Init: Creating pass phrase dialog pipe child " ++ "'%s'", sc->server->pphrase_dialog_path); ++ if (ssl_pipe_child_create(ppcb->p, ++ sc->server->pphrase_dialog_path) ++ != APR_SUCCESS) { ++ ap_log_error(APLOG_MARK, APLOG_ERR, 0, ppcb->s, ++ APLOGNO(10144) ++ "Init: Failed to create pass phrase pipe '%s'", ++ sc->server->pphrase_dialog_path); ++ return 0; ++ } ++ } ++ ap_log_error(APLOG_MARK, APLOG_INFO, 0, ppcb->s, APLOGNO(10145) ++ "Init: Requesting pass phrase via piped dialog"); ++ } ++ else { /* sc->server->pphrase_dialog_type == SSL_PPTYPE_BUILTIN */ ++#ifdef WIN32 ++ ap_log_error(APLOG_MARK, APLOG_ERR, 0, ppcb->s, APLOGNO(10146) ++ "Init: Failed to create pass phrase pipe '%s'", ++ sc->server->pphrase_dialog_path); ++ return 0; ++#else ++ /* ++ * stderr has already been redirected to the error_log. ++ * rather than attempting to temporarily rehook it to the terminal, ++ * we print the prompt to stdout before EVP_read_pw_string turns ++ * off tty echo ++ */ ++ apr_file_open_stdout(&writetty, ppcb->p); ++ ++ ap_log_error(APLOG_MARK, APLOG_INFO, 0, ppcb->s, APLOGNO(10147) ++ "Init: Requesting pass phrase via builtin terminal " ++ "dialog"); ++#endif ++ } ++ ++ /* ++ * The first time display a header to inform the user about what ++ * program he actually speaks to, which module is responsible for ++ * this terminal dialog and why to the hell he has to enter ++ * something... ++ */ ++ if (ppcb->nPassPhraseDialog == 1) { ++ apr_file_printf(writetty, "%s mod_ssl (Pass Phrase Dialog)\n", ++ AP_SERVER_BASEVERSION); ++ apr_file_printf(writetty, ++ "A pass phrase is required to access the private key.\n"); ++ } ++ if (ppcb->bPassPhraseDialogOnce) { ++ ppcb->bPassPhraseDialogOnce = FALSE; ++ apr_file_printf(writetty, "\n"); ++ apr_file_printf(writetty, "Private key %s (%s)\n", ++ ppcb->key_id, ppcb->pkey_file); ++ } ++ } ++ ++ return 1; ++} ++ ++static int passphrase_ui_read(UI *ui, UI_STRING *uis) ++{ ++ pphrase_cb_arg_t *ppcb = UI_get0_user_data(ui); ++ SSLSrvConfigRec *sc = mySrvConfig(ppcb->s); ++ const char *prompt; ++ int i; ++ int bufsize; ++ int len; ++ char *buf; ++ ++ prompt = UI_get0_output_string(uis); ++ if (prompt == NULL) { ++ prompt = "Enter pass phrase:"; ++ } ++ ++ /* ++ * Get the maximum expected size and allocate the buffer ++ */ ++ bufsize = UI_get_result_maxsize(uis); ++ buf = apr_pcalloc(ppcb->p, bufsize); ++ ++ if (sc->server->pphrase_dialog_type == SSL_PPTYPE_BUILTIN ++ || sc->server->pphrase_dialog_type == SSL_PPTYPE_PIPE) { ++ /* ++ * Get the pass phrase through a callback. ++ * Empty input is not accepted. ++ */ ++ for (;;) { ++ if (sc->server->pphrase_dialog_type == SSL_PPTYPE_PIPE) { ++ i = pipe_get_passwd_cb(buf, bufsize, "", FALSE); ++ } ++ else { /* sc->server->pphrase_dialog_type == SSL_PPTYPE_BUILTIN */ ++ i = EVP_read_pw_string(buf, bufsize, "", FALSE); ++ } ++ if (i != 0) { ++ OPENSSL_cleanse(buf, bufsize); ++ return 0; ++ } ++ len = strlen(buf); ++ if (len < 1){ ++ apr_file_printf(writetty, "Apache:mod_ssl:Error: Pass phrase" ++ "empty (needs to be at least 1 character).\n"); ++ apr_file_puts(prompt, writetty); ++ } ++ else { ++ break; ++ } ++ } ++ } ++ /* ++ * Filter program ++ */ ++ else if (sc->server->pphrase_dialog_type == SSL_PPTYPE_FILTER) { ++ const char *cmd = sc->server->pphrase_dialog_path; ++ const char **argv = apr_palloc(ppcb->p, sizeof(char *) * 3); ++ char *result; ++ ++ ap_log_error(APLOG_MARK, APLOG_INFO, 0, ppcb->s, APLOGNO(10148) ++ "Init: Requesting pass phrase from dialog filter " ++ "program (%s)", cmd); ++ ++ argv[0] = cmd; ++ argv[1] = ppcb->key_id; ++ argv[2] = NULL; ++ ++ result = ssl_util_readfilter(ppcb->s, ppcb->p, cmd, argv); ++ apr_cpystrn(buf, result, bufsize); ++ len = strlen(buf); ++ } ++ ++ /* ++ * Ok, we now have the pass phrase, so give it back ++ */ ++ ppcb->cpPassPhraseCur = apr_pstrdup(ppcb->p, buf); ++ UI_set_result(ui, uis, buf); ++ ++ /* Clear sensitive data. */ ++ OPENSSL_cleanse(buf, bufsize); ++ return 1; ++} ++ ++static int passphrase_ui_write(UI *ui, UI_STRING *uis) ++{ ++ pphrase_cb_arg_t *ppcb = UI_get0_user_data(ui); ++ SSLSrvConfigRec *sc; ++ const char *prompt; ++ ++ sc = mySrvConfig(ppcb->s); ++ ++ if (sc->server->pphrase_dialog_type == SSL_PPTYPE_BUILTIN ++ || sc->server->pphrase_dialog_type == SSL_PPTYPE_PIPE) { ++ prompt = UI_get0_output_string(uis); ++ apr_file_puts(prompt, writetty); ++ } ++ ++ return 1; ++} ++ ++static int passphrase_ui_close(UI *ui) ++{ ++ /* ++ * Close the pipes if they were opened ++ */ ++ if (readtty) { ++ apr_file_close(readtty); ++ apr_file_close(writetty); ++ readtty = writetty = NULL; ++ } ++ return 1; ++} ++ ++static apr_status_t pp_ui_method_cleanup(void *uip) ++{ ++ UI_METHOD *uim = uip; ++ ++ UI_destroy_method(uim); ++ ++ return APR_SUCCESS; ++} ++ ++static UI_METHOD *get_passphrase_ui(apr_pool_t *p) ++{ ++ UI_METHOD *ui_method = UI_create_method("Passphrase UI"); ++ ++ UI_method_set_opener(ui_method, passphrase_ui_open); ++ UI_method_set_reader(ui_method, passphrase_ui_read); ++ UI_method_set_writer(ui_method, passphrase_ui_write); ++ UI_method_set_closer(ui_method, passphrase_ui_close); ++ ++ apr_pool_cleanup_register(p, ui_method, pp_ui_method_cleanup, ++ pp_ui_method_cleanup); ++ ++ return ui_method; ++} ++ ++ ++apr_status_t modssl_load_engine_keypair(server_rec *s, apr_pool_t *p, ++ const char *vhostid, ++ const char *certid, const char *keyid, ++ X509 **pubkey, EVP_PKEY **privkey) ++{ ++ const char *c, *scheme; ++ ENGINE *e; ++ UI_METHOD *ui_method = get_passphrase_ui(p); ++ pphrase_cb_arg_t ppcb; ++ ++ memset(&ppcb, 0, sizeof ppcb); ++ ppcb.s = s; ++ ppcb.p = p; ++ ppcb.bPassPhraseDialogOnce = TRUE; ++ ppcb.key_id = vhostid; ++ ppcb.pkey_file = keyid; ++ ++ c = ap_strchr_c(keyid, ':'); ++ if (!c || c == keyid) { ++ ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(10131) ++ "Init: Unrecognized private key identifier `%s'", ++ keyid); ++ return ssl_die(s); ++ } ++ ++ scheme = apr_pstrmemdup(p, keyid, c - keyid); ++ if (!(e = ENGINE_by_id(scheme))) { ++ ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(10132) ++ "Init: Failed to load engine for private key %s", ++ keyid); ++ ssl_log_ssl_error(SSLLOG_MARK, APLOG_EMERG, s); ++ return ssl_die(s); ++ } ++ ++ if (!ENGINE_init(e)) { ++ ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(10149) ++ "Init: Failed to initialize engine %s for private key %s", ++ scheme, keyid); ++ ssl_log_ssl_error(SSLLOG_MARK, APLOG_EMERG, s); ++ return ssl_die(s); ++ } ++ ++ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, ++ "Init: Initialized engine %s for private key %s", ++ scheme, keyid); ++ ++ if (APLOGdebug(s)) { ++ ENGINE_ctrl_cmd_string(e, "VERBOSE", NULL, 0); ++ } ++ ++ if (certid) { ++ struct { ++ const char *cert_id; ++ X509 *cert; ++ } params = { certid, NULL }; ++ ++ if (!ENGINE_ctrl_cmd(e, "LOAD_CERT_CTRL", 0, ¶ms, NULL, 1)) { ++ ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(10136) ++ "Init: Unable to get the certificate"); ++ ssl_log_ssl_error(SSLLOG_MARK, APLOG_EMERG, s); ++ return ssl_die(s); ++ } ++ ++ *pubkey = params.cert; ++ } ++ ++ *privkey = ENGINE_load_private_key(e, keyid, ui_method, &ppcb); ++ if (*privkey == NULL) { ++ ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(10133) ++ "Init: Unable to get the private key"); ++ ssl_log_ssl_error(SSLLOG_MARK, APLOG_EMERG, s); ++ return ssl_die(s); ++ } ++ ++ ENGINE_finish(e); ++ ENGINE_free(e); ++ ++ return APR_SUCCESS; ++} ++#endif +--- httpd-2.4.35/modules/ssl/ssl_private.h.r1830819+ ++++ httpd-2.4.35/modules/ssl/ssl_private.h +@@ -986,21 +986,28 @@ + apr_status_t ssl_load_encrypted_pkey(server_rec *, apr_pool_t *, int, + const char *, apr_array_header_t **); + ++/* Load public and/or private key from the configured ENGINE. Private ++ * key returned as *pkey. certid can be NULL, in which case *pubkey ++ * is not altered. Errors logged on failure. */ ++apr_status_t modssl_load_engine_keypair(server_rec *s, apr_pool_t *p, ++ const char *vhostid, ++ const char *certid, const char *keyid, ++ X509 **pubkey, EVP_PKEY **privkey); ++ + /** Diffie-Hellman Parameter Support */ + DH *ssl_dh_GetParamFromFile(const char *); + #ifdef HAVE_ECC + EC_GROUP *ssl_ec_GetParamFromFile(const char *); + #endif + +-unsigned char *ssl_asn1_table_set(apr_hash_t *table, +- const char *key, +- long int length); +- +-ssl_asn1_t *ssl_asn1_table_get(apr_hash_t *table, +- const char *key); +- +-void ssl_asn1_table_unset(apr_hash_t *table, +- const char *key); ++/* Store the EVP_PKEY key (serialized into DER) in the hash table with ++ * key, returning the ssl_asn1_t structure pointer. */ ++ssl_asn1_t *ssl_asn1_table_set(apr_hash_t *table, const char *key, ++ EVP_PKEY *pkey); ++/* Retrieve the ssl_asn1_t structure with given key from the hash. */ ++ssl_asn1_t *ssl_asn1_table_get(apr_hash_t *table, const char *key); ++/* Remove and free the ssl_asn1_t structure with given key. */ ++void ssl_asn1_table_unset(apr_hash_t *table, const char *key); + + /** Mutex Support */ + int ssl_mutex_init(server_rec *, apr_pool_t *); +@@ -1088,6 +1095,10 @@ + int ssl_is_challenge(conn_rec *c, const char *servername, + X509 **pcert, EVP_PKEY **pkey); + ++/* Returns non-zero if the cert/key filename should be handled through ++ * the configured ENGINE. */ ++int modssl_is_engine_id(const char *name); ++ + #endif /* SSL_PRIVATE_H */ + /** @} */ + +--- httpd-2.4.35/modules/ssl/ssl_util.c.r1830819+ ++++ httpd-2.4.35/modules/ssl/ssl_util.c +@@ -175,45 +175,37 @@ + return TRUE; + } + +-/* +- * certain key data needs to survive restarts, +- * which are stored in the user data table of s->process->pool. +- * to prevent "leaking" of this data, we use malloc/free +- * rather than apr_palloc and these wrappers to help make sure +- * we do not leak the malloc-ed data. +- */ +-unsigned char *ssl_asn1_table_set(apr_hash_t *table, +- const char *key, +- long int length) ++/* Decrypted private keys are cached to survive restarts. The cached ++ * data must have lifetime of the process (hence malloc/free rather ++ * than pools), and uses raw DER since the EVP_PKEY structure ++ * internals may not survive across a module reload. */ ++ssl_asn1_t *ssl_asn1_table_set(apr_hash_t *table, const char *key, ++ EVP_PKEY *pkey) + { + apr_ssize_t klen = strlen(key); + ssl_asn1_t *asn1 = apr_hash_get(table, key, klen); ++ apr_size_t length = i2d_PrivateKey(pkey, NULL); ++ unsigned char *p; + +- /* +- * if a value for this key already exists, +- * reuse as much of the already malloc-ed data +- * as possible. +- */ ++ /* Re-use structure if cached previously. */ + if (asn1) { + if (asn1->nData != length) { +- free(asn1->cpData); /* XXX: realloc? */ +- asn1->cpData = NULL; ++ asn1->cpData = ap_realloc(asn1->cpData, length); + } + } + else { + asn1 = ap_malloc(sizeof(*asn1)); + asn1->source_mtime = 0; /* used as a note for encrypted private keys */ +- asn1->cpData = NULL; +- } +- +- asn1->nData = length; +- if (!asn1->cpData) { + asn1->cpData = ap_malloc(length); ++ ++ apr_hash_set(table, key, klen, asn1); + } + +- apr_hash_set(table, key, klen, asn1); ++ asn1->nData = length; ++ p = asn1->cpData; ++ i2d_PrivateKey(pkey, &p); /* increases p by length */ + +- return asn1->cpData; /* caller will assign a value to this */ ++ return asn1; + } + + ssl_asn1_t *ssl_asn1_table_get(apr_hash_t *table, +@@ -463,3 +455,13 @@ + } + + #endif /* #if APR_HAS_THREADS && MODSSL_USE_OPENSSL_PRE_1_1_API */ ++ ++int modssl_is_engine_id(const char *name) ++{ ++#if defined(HAVE_OPENSSL_ENGINE_H) && defined(HAVE_ENGINE_INIT) ++ /* ### Can handle any other special ENGINE key names here? */ ++ return strncmp(name, "pkcs11:", 7) == 0; ++#else ++ return 0; ++#endif ++} +--- httpd-2.4.35/modules/ssl/ssl_util_ssl.c.r1830819+ ++++ httpd-2.4.35/modules/ssl/ssl_util_ssl.c +@@ -74,7 +74,7 @@ + ** _________________________________________________________________ + */ + +-EVP_PKEY *modssl_read_privatekey(const char* filename, EVP_PKEY **key, pem_password_cb *cb, void *s) ++EVP_PKEY *modssl_read_privatekey(const char *filename, pem_password_cb *cb, void *s) + { + EVP_PKEY *rc; + BIO *bioS; +@@ -83,7 +83,7 @@ + /* 1. try PEM (= DER+Base64+headers) */ + if ((bioS=BIO_new_file(filename, "r")) == NULL) + return NULL; +- rc = PEM_read_bio_PrivateKey(bioS, key, cb, s); ++ rc = PEM_read_bio_PrivateKey(bioS, NULL, cb, s); + BIO_free(bioS); + + if (rc == NULL) { +@@ -107,41 +107,9 @@ + BIO_free(bioS); + } + } +- if (rc != NULL && key != NULL) { +- if (*key != NULL) +- EVP_PKEY_free(*key); +- *key = rc; +- } + return rc; + } + +-typedef struct { +- const char *pass; +- int pass_len; +-} pass_ctx; +- +-static int provide_pass(char *buf, int size, int rwflag, void *baton) +-{ +- pass_ctx *ctx = baton; +- if (ctx->pass_len > 0) { +- if (ctx->pass_len < size) { +- size = (int)ctx->pass_len; +- } +- memcpy(buf, ctx->pass, size); +- } +- return ctx->pass_len; +-} +- +-EVP_PKEY *modssl_read_encrypted_pkey(const char *filename, EVP_PKEY **key, +- const char *pass, apr_size_t pass_len) +-{ +- pass_ctx ctx; +- +- ctx.pass = pass; +- ctx.pass_len = pass_len; +- return modssl_read_privatekey(filename, key, provide_pass, &ctx); +-} +- + /* _________________________________________________________________ + ** + ** Smart shutdown +--- httpd-2.4.35/modules/ssl/ssl_util_ssl.h.r1830819+ ++++ httpd-2.4.35/modules/ssl/ssl_util_ssl.h +@@ -64,8 +64,11 @@ + void modssl_init_app_data2_idx(void); + void *modssl_get_app_data2(SSL *); + void modssl_set_app_data2(SSL *, void *); +-EVP_PKEY *modssl_read_privatekey(const char *, EVP_PKEY **, pem_password_cb *, void *); +-EVP_PKEY *modssl_read_encrypted_pkey(const char *, EVP_PKEY **, const char *, apr_size_t); ++ ++/* Read private key from filename in either PEM or raw base64(DER) ++ * format, using password entry callback cb and userdata. */ ++EVP_PKEY *modssl_read_privatekey(const char *filename, pem_password_cb *cb, void *ud); ++ + int modssl_smart_shutdown(SSL *ssl); + BOOL modssl_X509_getBC(X509 *, int *, int *); + char *modssl_X509_NAME_ENTRY_to_string(apr_pool_t *p, X509_NAME_ENTRY *xsne, diff --git a/SOURCES/httpd-2.4.35-r1842888.patch b/SOURCES/httpd-2.4.35-r1842888.patch new file mode 100644 index 0000000..02a25dc --- /dev/null +++ b/SOURCES/httpd-2.4.35-r1842888.patch @@ -0,0 +1,22 @@ +diff --git a/modules/filters/mod_deflate.c b/modules/filters/mod_deflate.c +index d218bab..9f86b09 100644 +--- a/modules/filters/mod_deflate.c ++++ b/modules/filters/mod_deflate.c +@@ -864,7 +864,7 @@ static apr_status_t deflate_out_filter(ap_filter_t *f, + + if (c->note_output_name) { + apr_table_setn(r->notes, c->note_output_name, +- (ctx->stream.total_in > 0) ++ (ctx->stream.total_out > 0) + ? apr_off_t_toa(r->pool, + ctx->stream.total_out) + : "-"); +@@ -1336,8 +1336,6 @@ static apr_status_t deflate_in_filter(ap_filter_t *f, + ctx->stream.next_in = (unsigned char *)data; + ctx->stream.avail_in = (int)len; + +- zRC = Z_OK; +- + if (!ctx->validation_buffer) { + while (ctx->stream.avail_in != 0) { + if (ctx->stream.avail_out == 0) { diff --git a/SOURCES/httpd-2.4.35-selinux.patch b/SOURCES/httpd-2.4.35-selinux.patch new file mode 100644 index 0000000..574259b --- /dev/null +++ b/SOURCES/httpd-2.4.35-selinux.patch @@ -0,0 +1,65 @@ + +Log the SELinux context at startup. + +Upstream-Status: unlikely to be any interest in this upstream + +diff --git a/configure.in b/configure.in +index eedba50..a208b53 100644 +--- a/configure.in ++++ b/configure.in +@@ -484,6 +484,11 @@ getloadavg + dnl confirm that a void pointer is large enough to store a long integer + APACHE_CHECK_VOID_PTR_LEN + ++AC_CHECK_LIB(selinux, is_selinux_enabled, [ ++ AC_DEFINE(HAVE_SELINUX, 1, [Defined if SELinux is supported]) ++ APR_ADDTO(HTTPD_LIBS, [-lselinux]) ++]) ++ + AC_CACHE_CHECK([for gettid()], ac_cv_gettid, + [AC_TRY_RUN(#define _GNU_SOURCE + #include +diff --git a/server/core.c b/server/core.c +index ec74029..cb8e463 100644 +--- a/server/core.c ++++ b/server/core.c +@@ -59,6 +59,10 @@ + #include + #endif + ++#ifdef HAVE_SELINUX ++#include ++#endif ++ + /* LimitRequestBody handling */ + #define AP_LIMIT_REQ_BODY_UNSET ((apr_off_t) -1) + #define AP_DEFAULT_LIMIT_REQ_BODY ((apr_off_t) 0) +@@ -4971,6 +4975,28 @@ static int core_post_config(apr_pool_t *pconf, apr_pool_t *plog, apr_pool_t *pte + } + #endif + ++#ifdef HAVE_SELINUX ++ { ++ static int already_warned = 0; ++ int is_enabled = is_selinux_enabled() > 0; ++ ++ if (is_enabled && !already_warned) { ++ security_context_t con; ++ ++ if (getcon(&con) == 0) { ++ ++ ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, NULL, ++ "SELinux policy enabled; " ++ "httpd running as context %s", con); ++ ++ already_warned = 1; ++ ++ freecon(con); ++ } ++ } ++ } ++#endif ++ + return OK; + } + diff --git a/SOURCES/httpd-2.4.35-sslciphdefault.patch b/SOURCES/httpd-2.4.35-sslciphdefault.patch new file mode 100644 index 0000000..9e740dd --- /dev/null +++ b/SOURCES/httpd-2.4.35-sslciphdefault.patch @@ -0,0 +1,31 @@ +diff --git a/modules/ssl/ssl_engine_config.c b/modules/ssl/ssl_engine_config.c +index 4cfd2d0..6ac55bd 100644 +--- a/modules/ssl/ssl_engine_config.c ++++ b/modules/ssl/ssl_engine_config.c +@@ -776,9 +776,11 @@ const char *ssl_cmd_SSLCipherSuite(cmd_parms *cmd, + } + + if (!strcmp("SSL", arg1)) { +- /* always disable null and export ciphers */ +- arg2 = apr_pstrcat(cmd->pool, arg2, ":!aNULL:!eNULL:!EXP", NULL); + if (cmd->path) { ++ /* Disable null and export ciphers by default, except for PROFILE= ++ * configs where the parser doesn't cope. */ ++ if (strncmp(arg2, "PROFILE=", 8) != 0) ++ arg2 = apr_pstrcat(cmd->pool, arg2, ":!aNULL:!eNULL:!EXP", NULL); + dc->szCipherSuite = arg2; + } + else { +@@ -1542,8 +1544,10 @@ const char *ssl_cmd_SSLProxyCipherSuite(cmd_parms *cmd, + } + + if (!strcmp("SSL", arg1)) { +- /* always disable null and export ciphers */ +- arg2 = apr_pstrcat(cmd->pool, arg2, ":!aNULL:!eNULL:!EXP", NULL); ++ /* Disable null and export ciphers by default, except for PROFILE= ++ * configs where the parser doesn't cope. */ ++ if (strncmp(arg2, "PROFILE=", 8) != 0) ++ arg2 = apr_pstrcat(cmd->pool, arg2, ":!aNULL:!eNULL:!EXP", NULL); + dc->proxy->auth.cipher_suite = arg2; + return NULL; + } diff --git a/SOURCES/httpd-2.4.35-systemd.patch b/SOURCES/httpd-2.4.35-systemd.patch new file mode 100644 index 0000000..7f5ee3b --- /dev/null +++ b/SOURCES/httpd-2.4.35-systemd.patch @@ -0,0 +1,245 @@ +--- httpd-2.4.33/modules/arch/unix/config5.m4.systemd ++++ httpd-2.4.33/modules/arch/unix/config5.m4 +@@ -18,6 +18,16 @@ + fi + ]) + ++APACHE_MODULE(systemd, Systemd support, , , all, [ ++ if test "${ac_cv_header_systemd_sd_daemon_h}" = "no" || test -z "${SYSTEMD_LIBS}"; then ++ AC_MSG_WARN([Your system does not support systemd.]) ++ enable_systemd="no" ++ else ++ APR_ADDTO(MOD_SYSTEMD_LDADD, [$SYSTEMD_LIBS]) ++ enable_systemd="yes" ++ fi ++]) ++ + APR_ADDTO(INCLUDES, [-I\$(top_srcdir)/$modpath_current]) + + APACHE_MODPATH_FINISH +--- httpd-2.4.33/modules/arch/unix/mod_systemd.c.systemd ++++ httpd-2.4.33/modules/arch/unix/mod_systemd.c +@@ -0,0 +1,223 @@ ++/* Licensed to the Apache Software Foundation (ASF) under one or more ++ * contributor license agreements. See the NOTICE file distributed with ++ * this work for additional information regarding copyright ownership. ++ * The ASF licenses this file to You under the Apache License, Version 2.0 ++ * (the "License"); you may not use this file except in compliance with ++ * the License. You may obtain a copy of the License at ++ * ++ * http://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++#include ++#include ++#include "ap_mpm.h" ++#include ++#include ++#include ++#include ++#include ++#include ++#include "unixd.h" ++#include "scoreboard.h" ++#include "mpm_common.h" ++ ++#include "systemd/sd-daemon.h" ++#include "systemd/sd-journal.h" ++ ++#if APR_HAVE_UNISTD_H ++#include ++#endif ++ ++static int shutdown_timer = 0; ++static int shutdown_counter = 0; ++static unsigned long bytes_served; ++static pid_t mainpid; ++static char describe_listeners[50]; ++ ++static int systemd_pre_config(apr_pool_t *pconf, apr_pool_t *plog, ++ apr_pool_t *ptemp) ++{ ++ sd_notify(0, ++ "RELOADING=1\n" ++ "STATUS=Reading configuration...\n"); ++ ap_extended_status = 1; ++ return OK; ++} ++ ++static char *dump_listener(ap_listen_rec *lr, apr_pool_t *p) ++{ ++ apr_sockaddr_t *sa = lr->bind_addr; ++ char addr[128]; ++ ++ if (apr_sockaddr_is_wildcard(sa)) { ++ return apr_pstrcat(p, "port ", apr_itoa(p, sa->port), NULL); ++ } ++ ++ apr_sockaddr_ip_getbuf(addr, sizeof addr, sa); ++ ++ return apr_psprintf(p, "%s port %u", addr, sa->port); ++} ++ ++static int systemd_post_config(apr_pool_t *pconf, apr_pool_t *plog, ++ apr_pool_t *ptemp, server_rec *s) ++{ ++ ap_listen_rec *lr; ++ apr_size_t plen = sizeof describe_listeners; ++ char *p = describe_listeners; ++ ++ if (ap_state_query(AP_SQ_MAIN_STATE) == AP_SQ_MS_CREATE_PRE_CONFIG) ++ return OK; ++ ++ for (lr = ap_listeners; lr; lr = lr->next) { ++ char *s = dump_listener(lr, ptemp); ++ ++ if (strlen(s) + 3 < plen) { ++ char *newp = apr_cpystrn(p, s, plen); ++ if (lr->next) ++ newp = apr_cpystrn(newp, ", ", 3); ++ plen -= newp - p; ++ p = newp; ++ } ++ else { ++ if (plen < 4) { ++ p = describe_listeners + sizeof describe_listeners - 4; ++ plen = 4; ++ } ++ apr_cpystrn(p, "...", plen); ++ break; ++ } ++ } ++ ++ sd_journal_print(LOG_INFO, "Server configured, listening on: %s", describe_listeners); ++ ++ return OK; ++} ++ ++static int systemd_pre_mpm(apr_pool_t *p, ap_scoreboard_e sb_type) ++{ ++ int rv; ++ ++ mainpid = getpid(); ++ ++ rv = sd_notifyf(0, "READY=1\n" ++ "STATUS=Started, listening on: %s\n" ++ "MAINPID=%" APR_PID_T_FMT, ++ describe_listeners, mainpid); ++ if (rv < 0) { ++ ap_log_perror(APLOG_MARK, APLOG_ERR, 0, p, APLOGNO(02395) ++ "sd_notifyf returned an error %d", rv); ++ } ++ ++ return OK; ++} ++ ++static int systemd_monitor(apr_pool_t *p, server_rec *s) ++{ ++ ap_sload_t sload; ++ apr_interval_time_t up_time; ++ char bps[5]; ++ int rv; ++ ++ if (!ap_extended_status) { ++ /* Nothing useful to report if ExtendedStatus disabled. */ ++ return DECLINED; ++ } ++ ++ ap_get_sload(&sload); ++ ++ if (sload.access_count == 0) { ++ rv = sd_notifyf(0, "READY=1\n" ++ "STATUS=Running, listening on: %s\n", ++ describe_listeners); ++ } ++ else { ++ /* up_time in seconds */ ++ up_time = (apr_uint32_t) apr_time_sec(apr_time_now() - ++ ap_scoreboard_image->global->restart_time); ++ ++ apr_strfsize((unsigned long)((float) (sload.bytes_served) ++ / (float) up_time), bps); ++ ++ rv = sd_notifyf(0, "READY=1\n" ++ "STATUS=Total requests: %lu; Idle/Busy workers %d/%d;" ++ "Requests/sec: %.3g; Bytes served/sec: %sB/sec\n", ++ sload.access_count, sload.idle, sload.busy, ++ ((float) sload.access_count) / (float) up_time, bps); ++ } ++ ++ if (rv < 0) { ++ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(02396) ++ "sd_notifyf returned an error %d", rv); ++ } ++ ++ /* Shutdown httpd when nothing is sent for shutdown_timer seconds. */ ++ if (sload.bytes_served == bytes_served) { ++ /* mpm_common.c: INTERVAL_OF_WRITABLE_PROBES is 10 */ ++ shutdown_counter += 10; ++ if (shutdown_timer > 0 && shutdown_counter >= shutdown_timer) { ++ rv = sd_notifyf(0, "READY=1\n" ++ "STATUS=Stopped as result of IdleShutdown " ++ "timeout."); ++ if (rv < 0) { ++ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(02804) ++ "sd_notifyf returned an error %d", rv); ++ } ++ kill(mainpid, AP_SIG_GRACEFUL); ++ } ++ } ++ else { ++ shutdown_counter = 0; ++ } ++ ++ bytes_served = sload.bytes_served; ++ ++ return DECLINED; ++} ++ ++static void systemd_register_hooks(apr_pool_t *p) ++{ ++ /* Enable ap_extended_status. */ ++ ap_hook_pre_config(systemd_pre_config, NULL, NULL, APR_HOOK_LAST); ++ /* Grab the listener config. */ ++ ap_hook_post_config(systemd_post_config, NULL, NULL, APR_HOOK_LAST); ++ /* We know the PID in this hook ... */ ++ ap_hook_pre_mpm(systemd_pre_mpm, NULL, NULL, APR_HOOK_LAST); ++ /* Used to update httpd's status line using sd_notifyf */ ++ ap_hook_monitor(systemd_monitor, NULL, NULL, APR_HOOK_MIDDLE); ++} ++ ++static const char *set_shutdown_timer(cmd_parms *cmd, void *dummy, ++ const char *arg) ++{ ++ const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY); ++ if (err != NULL) { ++ return err; ++ } ++ ++ shutdown_timer = atoi(arg); ++ return NULL; ++} ++ ++static const command_rec systemd_cmds[] = ++{ ++AP_INIT_TAKE1("IdleShutdown", set_shutdown_timer, NULL, RSRC_CONF, ++ "Number of seconds in idle-state after which httpd is shutdown"), ++ {NULL} ++}; ++ ++AP_DECLARE_MODULE(systemd) = { ++ STANDARD20_MODULE_STUFF, ++ NULL, ++ NULL, ++ NULL, ++ NULL, ++ systemd_cmds, ++ systemd_register_hooks, ++}; diff --git a/SOURCES/httpd-2.4.37-CVE-2019-0211.patch b/SOURCES/httpd-2.4.37-CVE-2019-0211.patch new file mode 100644 index 0000000..043476a --- /dev/null +++ b/SOURCES/httpd-2.4.37-CVE-2019-0211.patch @@ -0,0 +1,207 @@ +diff --git a/include/scoreboard.h b/include/scoreboard.h +index 9376da2..92d198d 100644 +--- a/include/scoreboard.h ++++ b/include/scoreboard.h +@@ -148,7 +148,9 @@ struct process_score { + apr_uint32_t lingering_close; /* async connections in lingering close */ + apr_uint32_t keep_alive; /* async connections in keep alive */ + apr_uint32_t suspended; /* connections suspended by some module */ +- int bucket; /* Listener bucket used by this child */ ++ int bucket; /* Listener bucket used by this child; this field is DEPRECATED ++ * and no longer updated by the MPMs (i.e. always zero). ++ */ + }; + + /* Scoreboard is now in 'local' memory, since it isn't updated once created, +diff --git a/server/mpm/event/event.c b/server/mpm/event/event.c +index ffe8a23..048ae61 100644 +--- a/server/mpm/event/event.c ++++ b/server/mpm/event/event.c +@@ -2695,7 +2695,6 @@ static int make_child(server_rec * s, int slot, int bucket) + + ap_scoreboard_image->parent[slot].quiescing = 0; + ap_scoreboard_image->parent[slot].not_accepting = 0; +- ap_scoreboard_image->parent[slot].bucket = bucket; + event_note_child_started(slot, pid); + active_daemons++; + retained->total_daemons++; +@@ -2734,6 +2733,7 @@ static void perform_idle_server_maintenance(int child_bucket, int num_buckets) + * that threads_per_child is always > 0 */ + int status = SERVER_DEAD; + int child_threads_active = 0; ++ int bucket = i % num_buckets; + + if (i >= retained->max_daemons_limit && + free_length == retained->idle_spawn_rate[child_bucket]) { +@@ -2757,7 +2757,7 @@ static void perform_idle_server_maintenance(int child_bucket, int num_buckets) + */ + if (status <= SERVER_READY && !ps->quiescing && !ps->not_accepting + && ps->generation == retained->mpm->my_generation +- && ps->bucket == child_bucket) ++ && bucket == child_bucket) + { + ++idle_thread_count; + } +@@ -2768,7 +2768,9 @@ static void perform_idle_server_maintenance(int child_bucket, int num_buckets) + last_non_dead = i; + } + active_thread_count += child_threads_active; +- if (!ps->pid && free_length < retained->idle_spawn_rate[child_bucket]) ++ if (!ps->pid ++ && bucket == child_bucket ++ && free_length < retained->idle_spawn_rate[child_bucket]) + free_slots[free_length++] = i; + else if (child_threads_active == threads_per_child) + had_healthy_child = 1; +@@ -2951,13 +2953,14 @@ static void server_main_loop(int remaining_children_to_start, int num_buckets) + retained->total_daemons--; + if (processed_status == APEXIT_CHILDSICK) { + /* resource shortage, minimize the fork rate */ +- retained->idle_spawn_rate[ps->bucket] = 1; ++ retained->idle_spawn_rate[child_slot % num_buckets] = 1; + } + else if (remaining_children_to_start) { + /* we're still doing a 1-for-1 replacement of dead + * children with new children + */ +- make_child(ap_server_conf, child_slot, ps->bucket); ++ make_child(ap_server_conf, child_slot, ++ child_slot % num_buckets); + --remaining_children_to_start; + } + } +diff --git a/server/mpm/prefork/prefork.c b/server/mpm/prefork/prefork.c +index 8efda72..7c00625 100644 +--- a/server/mpm/prefork/prefork.c ++++ b/server/mpm/prefork/prefork.c +@@ -637,8 +637,9 @@ static void child_main(int child_num_arg, int child_bucket) + } + + +-static int make_child(server_rec *s, int slot, int bucket) ++static int make_child(server_rec *s, int slot) + { ++ int bucket = slot % retained->mpm->num_buckets; + int pid; + + if (slot + 1 > retained->max_daemons_limit) { +@@ -716,7 +717,6 @@ static int make_child(server_rec *s, int slot, int bucket) + child_main(slot, bucket); + } + +- ap_scoreboard_image->parent[slot].bucket = bucket; + prefork_note_child_started(slot, pid); + + return 0; +@@ -732,7 +732,7 @@ static void startup_children(int number_to_start) + if (ap_scoreboard_image->servers[i][0].status != SERVER_DEAD) { + continue; + } +- if (make_child(ap_server_conf, i, i % retained->mpm->num_buckets) < 0) { ++ if (make_child(ap_server_conf, i) < 0) { + break; + } + --number_to_start; +@@ -741,8 +741,6 @@ static void startup_children(int number_to_start) + + static void perform_idle_server_maintenance(apr_pool_t *p) + { +- static int bucket_make_child_record = -1; +- static int bucket_kill_child_record = -1; + int i; + int idle_count; + worker_score *ws; +@@ -789,6 +787,7 @@ static void perform_idle_server_maintenance(apr_pool_t *p) + } + retained->max_daemons_limit = last_non_dead + 1; + if (idle_count > ap_daemons_max_free) { ++ static int bucket_kill_child_record = -1; + /* kill off one child... we use the pod because that'll cause it to + * shut down gracefully, in case it happened to pick up a request + * while we were counting +@@ -819,10 +818,7 @@ static void perform_idle_server_maintenance(apr_pool_t *p) + idle_count, total_non_dead); + } + for (i = 0; i < free_length; ++i) { +- bucket_make_child_record++; +- bucket_make_child_record %= retained->mpm->num_buckets; +- make_child(ap_server_conf, free_slots[i], +- bucket_make_child_record); ++ make_child(ap_server_conf, free_slots[i]); + } + /* the next time around we want to spawn twice as many if this + * wasn't good enough, but not if we've just done a graceful +@@ -867,7 +863,7 @@ static int prefork_run(apr_pool_t *_pconf, apr_pool_t *plog, server_rec *s) + + if (one_process) { + AP_MONCONTROL(1); +- make_child(ap_server_conf, 0, 0); ++ make_child(ap_server_conf, 0); + /* NOTREACHED */ + ap_assert(0); + return !OK; +@@ -976,8 +972,7 @@ static int prefork_run(apr_pool_t *_pconf, apr_pool_t *plog, server_rec *s) + /* we're still doing a 1-for-1 replacement of dead + * children with new children + */ +- make_child(ap_server_conf, child_slot, +- ap_get_scoreboard_process(child_slot)->bucket); ++ make_child(ap_server_conf, child_slot); + --remaining_children_to_start; + } + #if APR_HAS_OTHER_CHILD +diff --git a/server/mpm/worker/worker.c b/server/mpm/worker/worker.c +index 8012fe2..a927942 100644 +--- a/server/mpm/worker/worker.c ++++ b/server/mpm/worker/worker.c +@@ -1339,7 +1339,6 @@ static int make_child(server_rec *s, int slot, int bucket) + worker_note_child_lost_slot(slot, pid); + } + ap_scoreboard_image->parent[slot].quiescing = 0; +- ap_scoreboard_image->parent[slot].bucket = bucket; + worker_note_child_started(slot, pid); + return 0; + } +@@ -1388,6 +1387,7 @@ static void perform_idle_server_maintenance(int child_bucket, int num_buckets) + int any_dead_threads = 0; + int all_dead_threads = 1; + int child_threads_active = 0; ++ int bucket = i % num_buckets; + + if (i >= retained->max_daemons_limit && + totally_free_length == retained->idle_spawn_rate[child_bucket]) { +@@ -1420,7 +1420,7 @@ static void perform_idle_server_maintenance(int child_bucket, int num_buckets) + if (status <= SERVER_READY && + !ps->quiescing && + ps->generation == retained->mpm->my_generation && +- ps->bucket == child_bucket) { ++ bucket == child_bucket) { + ++idle_thread_count; + } + if (status >= SERVER_READY && status < SERVER_GRACEFUL) { +@@ -1430,6 +1430,7 @@ static void perform_idle_server_maintenance(int child_bucket, int num_buckets) + } + active_thread_count += child_threads_active; + if (any_dead_threads ++ && bucket == child_bucket + && totally_free_length < retained->idle_spawn_rate[child_bucket] + && free_length < MAX_SPAWN_RATE / num_buckets + && (!ps->pid /* no process in the slot */ +@@ -1615,14 +1616,15 @@ static void server_main_loop(int remaining_children_to_start, int num_buckets) + ps->quiescing = 0; + if (processed_status == APEXIT_CHILDSICK) { + /* resource shortage, minimize the fork rate */ +- retained->idle_spawn_rate[ps->bucket] = 1; ++ retained->idle_spawn_rate[child_slot % num_buckets] = 1; + } + else if (remaining_children_to_start + && child_slot < ap_daemons_limit) { + /* we're still doing a 1-for-1 replacement of dead + * children with new children + */ +- make_child(ap_server_conf, child_slot, ps->bucket); ++ make_child(ap_server_conf, child_slot, ++ child_slot % num_buckets); + --remaining_children_to_start; + } + } diff --git a/SOURCES/httpd-2.4.37-CVE-2019-0215.patch b/SOURCES/httpd-2.4.37-CVE-2019-0215.patch new file mode 100644 index 0000000..2967584 --- /dev/null +++ b/SOURCES/httpd-2.4.37-CVE-2019-0215.patch @@ -0,0 +1,20 @@ +diff --git a/modules/ssl/ssl_engine_kernel.c b/modules/ssl/ssl_engine_kernel.c +index de0ffb0..e6a9f67 100644 +--- a/modules/ssl/ssl_engine_kernel.c ++++ b/modules/ssl/ssl_engine_kernel.c +@@ -1154,6 +1154,7 @@ static int ssl_hook_Access_modern(request_rec *r, SSLSrvConfigRec *sc, SSLDirCon + ssl_log_ssl_error(SSLLOG_MARK, APLOG_ERR, r->server); + apr_table_setn(r->notes, "error-notes", + "Reason: Cannot perform Post-Handshake Authentication.
    "); ++ SSL_set_verify(ssl, vmode_inplace, NULL); + return HTTP_FORBIDDEN; + } + +@@ -1175,6 +1176,7 @@ static int ssl_hook_Access_modern(request_rec *r, SSLSrvConfigRec *sc, SSLDirCon + * Finally check for acceptable renegotiation results + */ + if (OK != (rc = ssl_check_post_client_verify(r, sc, dc, sslconn, ssl))) { ++ SSL_set_verify(ssl, vmode_inplace, NULL); + return rc; + } + } diff --git a/SOURCES/httpd-2.4.37-CVE-2019-0217.patch b/SOURCES/httpd-2.4.37-CVE-2019-0217.patch new file mode 100644 index 0000000..1614e72 --- /dev/null +++ b/SOURCES/httpd-2.4.37-CVE-2019-0217.patch @@ -0,0 +1,111 @@ +--- a/modules/aaa/mod_auth_digest.c 2019/03/12 09:24:19 1855297 ++++ b/modules/aaa/mod_auth_digest.c 2019/03/12 09:24:26 1855298 +@@ -92,7 +92,6 @@ + int check_nc; + const char *algorithm; + char *uri_list; +- const char *ha1; + } digest_config_rec; + + +@@ -153,6 +152,7 @@ + apr_time_t nonce_time; + enum hdr_sts auth_hdr_sts; + int needed_auth; ++ const char *ha1; + client_entry *client; + } digest_header_rec; + +@@ -1304,7 +1304,7 @@ + */ + + static authn_status get_hash(request_rec *r, const char *user, +- digest_config_rec *conf) ++ digest_config_rec *conf, const char **rethash) + { + authn_status auth_result; + char *password; +@@ -1356,7 +1356,7 @@ + } while (current_provider); + + if (auth_result == AUTH_USER_FOUND) { +- conf->ha1 = password; ++ *rethash = password; + } + + return auth_result; +@@ -1483,25 +1483,24 @@ + + /* RFC-2069 */ + static const char *old_digest(const request_rec *r, +- const digest_header_rec *resp, const char *ha1) ++ const digest_header_rec *resp) + { + const char *ha2; + + ha2 = ap_md5(r->pool, (unsigned char *)apr_pstrcat(r->pool, resp->method, ":", + resp->uri, NULL)); + return ap_md5(r->pool, +- (unsigned char *)apr_pstrcat(r->pool, ha1, ":", resp->nonce, +- ":", ha2, NULL)); ++ (unsigned char *)apr_pstrcat(r->pool, resp->ha1, ":", ++ resp->nonce, ":", ha2, NULL)); + } + + /* RFC-2617 */ + static const char *new_digest(const request_rec *r, +- digest_header_rec *resp, +- const digest_config_rec *conf) ++ digest_header_rec *resp) + { + const char *ha1, *ha2, *a2; + +- ha1 = conf->ha1; ++ ha1 = resp->ha1; + + a2 = apr_pstrcat(r->pool, resp->method, ":", resp->uri, NULL); + ha2 = ap_md5(r->pool, (const unsigned char *)a2); +@@ -1514,7 +1513,6 @@ + NULL)); + } + +- + static void copy_uri_components(apr_uri_t *dst, + apr_uri_t *src, request_rec *r) { + if (src->scheme && src->scheme[0] != '\0') { +@@ -1759,7 +1757,7 @@ + return HTTP_UNAUTHORIZED; + } + +- return_code = get_hash(r, r->user, conf); ++ return_code = get_hash(r, r->user, conf, &resp->ha1); + + if (return_code == AUTH_USER_NOT_FOUND) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01790) +@@ -1789,7 +1787,7 @@ + + if (resp->message_qop == NULL) { + /* old (rfc-2069) style digest */ +- if (strcmp(resp->digest, old_digest(r, resp, conf->ha1))) { ++ if (strcmp(resp->digest, old_digest(r, resp))) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01792) + "user %s: password mismatch: %s", r->user, + r->uri); +@@ -1819,7 +1817,7 @@ + return HTTP_UNAUTHORIZED; + } + +- exp_digest = new_digest(r, resp, conf); ++ exp_digest = new_digest(r, resp); + if (!exp_digest) { + /* we failed to allocate a client struct */ + return HTTP_INTERNAL_SERVER_ERROR; +@@ -1903,7 +1901,7 @@ + + /* calculate rspauth attribute + */ +- ha1 = conf->ha1; ++ ha1 = resp->ha1; + + a2 = apr_pstrcat(r->pool, ":", resp->uri, NULL); + ha2 = ap_md5(r->pool, (const unsigned char *)a2); diff --git a/SOURCES/httpd-2.4.37-CVE-2019-0220.patch b/SOURCES/httpd-2.4.37-CVE-2019-0220.patch new file mode 100644 index 0000000..1fcb68e --- /dev/null +++ b/SOURCES/httpd-2.4.37-CVE-2019-0220.patch @@ -0,0 +1,235 @@ +diff --git a/docs/manual/mod/core.html.en b/docs/manual/mod/core.html.en +index 0a24bc8..20d1e5a 100644 +--- a/docs/manual/mod/core.html.en ++++ b/docs/manual/mod/core.html.en +@@ -97,6 +97,7 @@ available +
  • MaxRangeOverlaps
  • +
  • MaxRangeReversals
  • +
  • MaxRanges
  • ++
  • MergeSlashes
  • +
  • MergeTrailers
  • +
  • Mutex
  • +
  • NameVirtualHost
  • +@@ -3465,6 +3466,30 @@ resource + + +
    top
    ++
    ++ ++ ++ ++ ++ ++ ++ ++
    Description:Controls whether the server merges consecutive slashes in URLs.
    Syntax:MergeSlashes ON | OFF
    Default:MergeSlashes ON
    Context:server config, virtual host
    Status:Core
    Module:core
    Compatibility:Available in Apache HTTP Server 2.4.6 in Red Hat Enterprise Linux 7
    ++

    By default, the server merges (or collapses) multiple consecutive slash ++ ('/') characters in the path component of the request URL.

    ++ ++

    When mapping URL's to the filesystem, these multiple slashes are not ++ significant. However, URL's handled other ways, such as by CGI or proxy, ++ might prefer to retain the significance of multiple consecutive slashes. ++ In these cases MergeSlashes can be set to ++ OFF to retain the multiple consecutive slashes. In these ++ configurations, regular expressions used in the configuration file that match ++ the path component of the URL (LocationMatch, ++ RewriteRule, ...) need to take into account multiple ++ consecutive slashes.

    ++
    ++
    top
    +

    MergeTrailers Directive

    + + +--- a/include/http_core.h 2019/03/18 08:49:19 1855736 ++++ b/include/http_core.h 2019/03/18 08:49:59 1855737 +@@ -740,7 +740,7 @@ + #define AP_HTTP_METHODS_LENIENT 1 + #define AP_HTTP_METHODS_REGISTERED 2 + char http_methods; +- ++ unsigned int merge_slashes; + } core_server_config; + + /* for AddOutputFiltersByType in core.c */ +diff --git a/include/httpd.h b/include/httpd.h +index 65392f8..99f7f04 100644 +--- a/include/httpd.h ++++ b/include/httpd.h +@@ -1697,11 +1697,21 @@ AP_DECLARE(int) ap_unescape_url_keep2f(char *url, int decode_slashes); + AP_DECLARE(int) ap_unescape_urlencoded(char *query); + + /** +- * Convert all double slashes to single slashes +- * @param name The string to convert ++ * Convert all double slashes to single slashes, except where significant ++ * to the filesystem on the current platform. ++ * @param name The string to convert, assumed to be a filesystem path + */ + AP_DECLARE(void) ap_no2slash(char *name); + ++/** ++ * Convert all double slashes to single slashes, except where significant ++ * to the filesystem on the current platform. ++ * @param name The string to convert ++ * @param is_fs_path if set to 0, the significance of any double-slashes is ++ * ignored. ++ */ ++AP_DECLARE(void) ap_no2slash_ex(char *name, int is_fs_path); ++ + /** + * Remove all ./ and xx/../ substrings from a file name. Also remove + * any leading ../ or /../ substrings. +diff --git a/server/request.c b/server/request.c +index dbe3e07..d5c558a 100644 +--- a/server/request.c ++++ b/server/request.c +@@ -167,6 +167,8 @@ AP_DECLARE(int) ap_process_request_internal(request_rec *r) + int file_req = (r->main && r->filename); + int access_status; + core_dir_config *d; ++ core_server_config *sconf = ++ ap_get_core_module_config(r->server->module_config); + + /* Ignore embedded %2F's in path for proxy requests */ + if (!r->proxyreq && r->parsed_uri.path) { +@@ -191,6 +193,12 @@ AP_DECLARE(int) ap_process_request_internal(request_rec *r) + } + + ap_getparents(r->uri); /* OK --- shrinking transformations... */ ++ if (sconf->merge_slashes != AP_CORE_CONFIG_OFF) { ++ ap_no2slash(r->uri); ++ if (r->parsed_uri.path) { ++ ap_no2slash(r->parsed_uri.path); ++ } ++ } + + /* All file subrequests are a huge pain... they cannot bubble through the + * next several steps. Only file subrequests are allowed an empty uri, +@@ -1411,20 +1419,7 @@ AP_DECLARE(int) ap_location_walk(request_rec *r) + + cache = prep_walk_cache(AP_NOTE_LOCATION_WALK, r); + cached = (cache->cached != NULL); +- +- /* Location and LocationMatch differ on their behaviour w.r.t. multiple +- * slashes. Location matches multiple slashes with a single slash, +- * LocationMatch doesn't. An exception, for backwards brokenness is +- * absoluteURIs... in which case neither match multiple slashes. +- */ +- if (r->uri[0] != '/') { +- entry_uri = r->uri; +- } +- else { +- char *uri = apr_pstrdup(r->pool, r->uri); +- ap_no2slash(uri); +- entry_uri = uri; +- } ++ entry_uri = r->uri; + + /* If we have an cache->cached location that matches r->uri, + * and the vhost's list of locations hasn't changed, we can skip +@@ -1491,7 +1486,7 @@ AP_DECLARE(int) ap_location_walk(request_rec *r) + pmatch = apr_palloc(rxpool, nmatch*sizeof(ap_regmatch_t)); + } + +- if (ap_regexec(entry_core->r, r->uri, nmatch, pmatch, 0)) { ++ if (ap_regexec(entry_core->r, entry_uri, nmatch, pmatch, 0)) { + continue; + } + +@@ -1501,7 +1496,7 @@ AP_DECLARE(int) ap_location_walk(request_rec *r) + apr_table_setn(r->subprocess_env, + ((const char **)entry_core->refs->elts)[i], + apr_pstrndup(r->pool, +- r->uri + pmatch[i].rm_so, ++ entry_uri + pmatch[i].rm_so, + pmatch[i].rm_eo - pmatch[i].rm_so)); + } + } +diff --git a/server/util.c b/server/util.c +index fd7a0a1..e0c558c 100644 +--- a/server/util.c ++++ b/server/util.c +@@ -561,16 +561,20 @@ AP_DECLARE(void) ap_getparents(char *name) + name[l] = '\0'; + } + } +- +-AP_DECLARE(void) ap_no2slash(char *name) ++AP_DECLARE(void) ap_no2slash_ex(char *name, int is_fs_path) + { ++ + char *d, *s; + ++ if (!*name) { ++ return; ++ } ++ + s = d = name; + + #ifdef HAVE_UNC_PATHS + /* Check for UNC names. Leave leading two slashes. */ +- if (s[0] == '/' && s[1] == '/') ++ if (is_fs_path && s[0] == '/' && s[1] == '/') + *d++ = *s++; + #endif + +@@ -587,6 +591,10 @@ AP_DECLARE(void) ap_no2slash(char *name) + *d = '\0'; + } + ++AP_DECLARE(void) ap_no2slash(char *name) ++{ ++ ap_no2slash_ex(name, 1); ++} + + /* + * copy at most n leading directories of s into d +diff --git a/server/core.c b/server/core.c +index b5ab429..a31f1e4 100644 +--- a/server/core.c ++++ b/server/core.c +@@ -493,6 +493,7 @@ static void *create_core_server_config(apr_pool_t *a, server_rec *s) + */ + + conf->trace_enable = AP_TRACE_UNSET; ++ conf->merge_slashes = AP_CORE_CONFIG_UNSET; + + conf->protocols = apr_array_make(a, 5, sizeof(const char *)); + conf->protocols_honor_order = -1; +@@ -561,7 +562,9 @@ static void *merge_core_server_configs(apr_pool_t *p, void *basev, void *virtv) + conf->protocols_honor_order = ((virt->protocols_honor_order < 0)? + base->protocols_honor_order : + virt->protocols_honor_order); +- ++ ++ AP_CORE_MERGE_FLAG(merge_slashes, conf, base, virt); ++ + return conf; + } + +@@ -1872,6 +1875,13 @@ static const char *set_qualify_redirect_url(cmd_parms *cmd, void *d_, int flag) + return NULL; + } + ++static const char *set_core_server_flag(cmd_parms *cmd, void *s_, int flag) ++{ ++ core_server_config *conf = ++ ap_get_core_module_config(cmd->server->module_config); ++ return ap_set_flag_slot(cmd, conf, flag); ++} ++ + static const char *set_override_list(cmd_parms *cmd, void *d_, int argc, char *const argv[]) + { + core_dir_config *d = d_; +@@ -4598,6 +4608,10 @@ AP_INIT_ITERATE("HttpProtocolOptions", set_http_protocol_options, NULL, RSRC_CON + "'Unsafe' or 'Strict' (default). Sets HTTP acceptance rules"), + AP_INIT_ITERATE("RegisterHttpMethod", set_http_method, NULL, RSRC_CONF, + "Registers non-standard HTTP methods"), ++AP_INIT_FLAG("MergeSlashes", set_core_server_flag, ++ (void *)APR_OFFSETOF(core_server_config, merge_slashes), ++ RSRC_CONF, ++ "Controls whether consecutive slashes in the URI path are merged"), + { NULL } + }; + diff --git a/SOURCES/httpd-2.4.37-CVE-2019-10092.patch b/SOURCES/httpd-2.4.37-CVE-2019-10092.patch new file mode 100644 index 0000000..a06d9c2 --- /dev/null +++ b/SOURCES/httpd-2.4.37-CVE-2019-10092.patch @@ -0,0 +1,192 @@ +diff --git a/modules/http/http_protocol.c b/modules/http/http_protocol.c +index e419eb6..dcafa9c 100644 +--- a/modules/http/http_protocol.c ++++ b/modules/http/http_protocol.c +@@ -1132,13 +1132,10 @@ static const char *get_canned_error_string(int status, + "\">here.

    \n", + NULL)); + case HTTP_USE_PROXY: +- return(apr_pstrcat(p, +- "

    This resource is only accessible " +- "through the proxy\n", +- ap_escape_html(r->pool, location), +- "
    \nYou will need to configure " +- "your client to use that proxy.

    \n", +- NULL)); ++ return("

    This resource is only accessible " ++ "through the proxy\n" ++ "
    \nYou will need to configure " ++ "your client to use that proxy.

    \n"); + case HTTP_PROXY_AUTHENTICATION_REQUIRED: + case HTTP_UNAUTHORIZED: + return("

    This server could not verify that you\n" +@@ -1154,34 +1151,20 @@ static const char *get_canned_error_string(int status, + "error-notes", + "

    \n")); + case HTTP_FORBIDDEN: +- s1 = apr_pstrcat(p, +- "

    You don't have permission to access ", +- ap_escape_html(r->pool, r->uri), +- "\non this server.
    \n", +- NULL); +- return(add_optional_notes(r, s1, "error-notes", "

    \n")); ++ return(add_optional_notes(r, "

    You don't have permission to access this resource.", "error-notes", "

    \n")); + case HTTP_NOT_FOUND: +- return(apr_pstrcat(p, +- "

    The requested URL ", +- ap_escape_html(r->pool, r->uri), +- " was not found on this server.

    \n", +- NULL)); ++ return("

    The requested URL was not found on this server.

    \n"); + case HTTP_METHOD_NOT_ALLOWED: + return(apr_pstrcat(p, + "

    The requested method ", + ap_escape_html(r->pool, r->method), +- " is not allowed for the URL ", +- ap_escape_html(r->pool, r->uri), +- ".

    \n", ++ " is not allowed for this URL.

    \n", + NULL)); + case HTTP_NOT_ACCEPTABLE: +- s1 = apr_pstrcat(p, +- "

    An appropriate representation of the " +- "requested resource ", +- ap_escape_html(r->pool, r->uri), +- " could not be found on this server.

    \n", +- NULL); +- return(add_optional_notes(r, s1, "variant-list", "")); ++ return(add_optional_notes(r, ++ "

    An appropriate representation of the requested resource " ++ "could not be found on this server.

    \n", ++ "variant-list", "")); + case HTTP_MULTIPLE_CHOICES: + return(add_optional_notes(r, "", "variant-list", "")); + case HTTP_LENGTH_REQUIRED: +@@ -1192,18 +1175,13 @@ static const char *get_canned_error_string(int status, + NULL); + return(add_optional_notes(r, s1, "error-notes", "

    \n")); + case HTTP_PRECONDITION_FAILED: +- return(apr_pstrcat(p, +- "

    The precondition on the request " +- "for the URL ", +- ap_escape_html(r->pool, r->uri), +- " evaluated to false.

    \n", +- NULL)); ++ return("

    The precondition on the request " ++ "for this URL evaluated to false.

    \n"); + case HTTP_NOT_IMPLEMENTED: + s1 = apr_pstrcat(p, + "

    ", +- ap_escape_html(r->pool, r->method), " to ", +- ap_escape_html(r->pool, r->uri), +- " not supported.
    \n", ++ ap_escape_html(r->pool, r->method), " ", ++ " not supported for current URL.
    \n", + NULL); + return(add_optional_notes(r, s1, "error-notes", "

    \n")); + case HTTP_BAD_GATEWAY: +@@ -1211,29 +1189,19 @@ static const char *get_canned_error_string(int status, + "response from an upstream server.
    " CRLF; + return(add_optional_notes(r, s1, "error-notes", "

    \n")); + case HTTP_VARIANT_ALSO_VARIES: +- return(apr_pstrcat(p, +- "

    A variant for the requested " +- "resource\n

    \n",
    +-                           ap_escape_html(r->pool, r->uri),
    +-                           "\n
    \nis itself a negotiable resource. " +- "This indicates a configuration error.

    \n", +- NULL)); ++ return("

    A variant for the requested " ++ "resource\n

    \n"
    ++               "\n
    \nis itself a negotiable resource. " ++ "This indicates a configuration error.

    \n"); + case HTTP_REQUEST_TIME_OUT: + return("

    Server timeout waiting for the HTTP request from the client.

    \n"); + case HTTP_GONE: +- return(apr_pstrcat(p, +- "

    The requested resource
    ", +- ap_escape_html(r->pool, r->uri), +- "
    \nis no longer available on this server " +- "and there is no forwarding address.\n" +- "Please remove all references to this " +- "resource.

    \n", +- NULL)); ++ return("

    The requested resource is no longer available on this server" ++ " and there is no forwarding address.\n" ++ "Please remove all references to this resource.

    \n"); + case HTTP_REQUEST_ENTITY_TOO_LARGE: + return(apr_pstrcat(p, +- "The requested resource
    ", +- ap_escape_html(r->pool, r->uri), "
    \n", +- "does not allow request data with ", ++ "The requested resource does not allow request data with ", + ap_escape_html(r->pool, r->method), + " requests, or the amount of data provided in\n" + "the request exceeds the capacity limit.\n", +@@ -1317,11 +1285,9 @@ static const char *get_canned_error_string(int status, + "the Server Name Indication (SNI) in use for this\n" + "connection.

    \n"); + case HTTP_UNAVAILABLE_FOR_LEGAL_REASONS: +- s1 = apr_pstrcat(p, +- "

    Access to ", ap_escape_html(r->pool, r->uri), +- "\nhas been denied for legal reasons.
    \n", +- NULL); +- return(add_optional_notes(r, s1, "error-notes", "

    \n")); ++ return(add_optional_notes(r, ++ "

    Access to this URL has been denied for legal reasons.
    \n", ++ "error-notes", "

    \n")); + default: /* HTTP_INTERNAL_SERVER_ERROR */ + /* + * This comparison to expose error-notes could be modified to +diff --git a/modules/proxy/mod_proxy.c b/modules/proxy/mod_proxy.c +index 800ede1..de48735 100644 +--- a/modules/proxy/mod_proxy.c ++++ b/modules/proxy/mod_proxy.c +@@ -1055,9 +1055,10 @@ static int proxy_handler(request_rec *r) + char *end; + maxfwd = apr_strtoi64(str, &end, 10); + if (maxfwd < 0 || maxfwd == APR_INT64_MAX || *end) { +- return ap_proxyerror(r, HTTP_BAD_REQUEST, +- apr_psprintf(r->pool, +- "Max-Forwards value '%s' could not be parsed", str)); ++ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO() ++ "Max-Forwards value '%s' could not be parsed", str); ++ return ap_proxyerror(r, HTTP_BAD_REQUEST, ++ "Max-Forwards request header could not be parsed"); + } + else if (maxfwd == 0) { + switch (r->method_number) { +diff --git a/modules/proxy/mod_proxy_ftp.c b/modules/proxy/mod_proxy_ftp.c +index 4a10987..8f6f853 100644 +--- a/modules/proxy/mod_proxy_ftp.c ++++ b/modules/proxy/mod_proxy_ftp.c +@@ -1024,8 +1024,9 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker, + /* We break the URL into host, port, path-search */ + if (r->parsed_uri.hostname == NULL) { + if (APR_SUCCESS != apr_uri_parse(p, url, &uri)) { +- return ap_proxyerror(r, HTTP_BAD_REQUEST, +- apr_psprintf(p, "URI cannot be parsed: %s", url)); ++ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO() ++ "URI cannot be parsed: %s", url); ++ return ap_proxyerror(r, HTTP_BAD_REQUEST, "URI cannot be parsed"); + } + connectname = uri.hostname; + connectport = uri.port; +diff --git a/modules/proxy/proxy_util.c b/modules/proxy/proxy_util.c +index 6501c68..0bbfa59 100644 +--- a/modules/proxy/proxy_util.c ++++ b/modules/proxy/proxy_util.c +@@ -368,12 +368,9 @@ PROXY_DECLARE(char *) + + PROXY_DECLARE(int) ap_proxyerror(request_rec *r, int statuscode, const char *message) + { +- const char *uri = ap_escape_html(r->pool, r->uri); + apr_table_setn(r->notes, "error-notes", + apr_pstrcat(r->pool, +- "The proxy server could not handle the request ", ap_escape_html(r->pool, r->method), " ", uri, +- ".

    \n" ++ "The proxy server could not handle the request

    " + "Reason: ", ap_escape_html(r->pool, message), + "

    ", + NULL)); diff --git a/SOURCES/httpd-2.4.37-CVE-2019-10097.patch b/SOURCES/httpd-2.4.37-CVE-2019-10097.patch new file mode 100644 index 0000000..b0132a9 --- /dev/null +++ b/SOURCES/httpd-2.4.37-CVE-2019-10097.patch @@ -0,0 +1,66 @@ +diff --git a/modules/metadata/mod_remoteip.c b/modules/metadata/mod_remoteip.c +index 4572ce1..a0cbc0f 100644 +--- a/modules/metadata/mod_remoteip.c ++++ b/modules/metadata/mod_remoteip.c +@@ -987,15 +987,13 @@ static remoteip_parse_status_t remoteip_process_v2_header(conn_rec *c, + return HDR_ERROR; + #endif + default: +- /* unsupported protocol, keep local connection address */ +- return HDR_DONE; ++ /* unsupported protocol */ ++ ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c, APLOGNO(10183) ++ "RemoteIPProxyProtocol: unsupported protocol %.2hx", ++ (unsigned short)hdr->v2.fam); ++ return HDR_ERROR; + } + break; /* we got a sockaddr now */ +- +- case 0x00: /* LOCAL command */ +- /* keep local connection address for LOCAL */ +- return HDR_DONE; +- + default: + /* not a supported command */ + ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c, APLOGNO(03507) +@@ -1087,11 +1085,24 @@ static apr_status_t remoteip_input_filter(ap_filter_t *f, + /* try to read a header's worth of data */ + while (!ctx->done) { + if (APR_BRIGADE_EMPTY(ctx->bb)) { +- ret = ap_get_brigade(f->next, ctx->bb, ctx->mode, block, +- ctx->need - ctx->rcvd); ++ apr_off_t got, want = ctx->need - ctx->rcvd; ++ ++ ret = ap_get_brigade(f->next, ctx->bb, ctx->mode, block, want); + if (ret != APR_SUCCESS) { ++ ap_log_cerror(APLOG_MARK, APLOG_ERR, ret, f->c, APLOGNO(10184) ++ "failed reading input"); + return ret; + } ++ ++ ret = apr_brigade_length(ctx->bb, 1, &got); ++ if (ret || got > want) { ++ ap_log_cerror(APLOG_MARK, APLOG_ERR, ret, f->c, APLOGNO(10185) ++ "RemoteIPProxyProtocol header too long, " ++ "got %" APR_OFF_T_FMT " expected %" APR_OFF_T_FMT, ++ got, want); ++ f->c->aborted = 1; ++ return APR_ECONNABORTED; ++ } + } + if (APR_BRIGADE_EMPTY(ctx->bb)) { + return block == APR_NONBLOCK_READ ? APR_SUCCESS : APR_EOF; +@@ -1139,6 +1150,13 @@ static apr_status_t remoteip_input_filter(ap_filter_t *f, + if (ctx->rcvd >= MIN_V2_HDR_LEN) { + ctx->need = MIN_V2_HDR_LEN + + remoteip_get_v2_len((proxy_header *) ctx->header); ++ if (ctx->need > sizeof(proxy_v2)) { ++ ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, f->c, APLOGNO(10186) ++ "RemoteIPProxyProtocol protocol header length too long"); ++ f->c->aborted = 1; ++ apr_brigade_destroy(ctx->bb); ++ return APR_ECONNABORTED; ++ } + } + if (ctx->rcvd >= ctx->need) { + psts = remoteip_process_v2_header(f->c, conn_conf, diff --git a/SOURCES/httpd-2.4.37-CVE-2019-10098.patch b/SOURCES/httpd-2.4.37-CVE-2019-10098.patch new file mode 100644 index 0000000..c3a559f --- /dev/null +++ b/SOURCES/httpd-2.4.37-CVE-2019-10098.patch @@ -0,0 +1,91 @@ +diff --git a/include/ap_regex.h b/include/ap_regex.h +index 7d8df79..7af2f99 100644 +--- a/include/ap_regex.h ++++ b/include/ap_regex.h +@@ -84,7 +84,11 @@ extern "C" { + + #define AP_REG_DOLLAR_ENDONLY 0x200 /* '$' matches at end of subject string only */ + +-#define AP_REG_MATCH "MATCH_" /** suggested prefix for ap_regname */ ++#define AP_REG_NO_DEFAULT 0x400 /**< Don't implicitely add AP_REG_DEFAULT options */ ++ ++#define AP_REG_MATCH "MATCH_" /**< suggested prefix for ap_regname */ ++ ++#define AP_REG_DEFAULT (AP_REG_DOTALL|AP_REG_DOLLAR_ENDONLY) + + /* Error values: */ + enum { +diff --git a/modules/filters/mod_substitute.c b/modules/filters/mod_substitute.c +index b7d5296..e976c51 100644 +--- a/modules/filters/mod_substitute.c ++++ b/modules/filters/mod_substitute.c +@@ -667,8 +667,10 @@ static const char *set_pattern(cmd_parms *cmd, void *cfg, const char *line) + + /* first see if we can compile the regex */ + if (!is_pattern) { +- r = ap_pregcomp(cmd->pool, from, AP_REG_EXTENDED | +- (ignore_case ? AP_REG_ICASE : 0)); ++ int flags = AP_REG_NO_DEFAULT ++ | (ap_regcomp_get_default_cflags() & AP_REG_DOLLAR_ENDONLY) ++ | (ignore_case ? AP_REG_ICASE : 0); ++ r = ap_pregcomp(cmd->pool, from, flags); + if (!r) + return "Substitute could not compile regex"; + } +diff --git a/server/core.c b/server/core.c +index 76432ce..6d00777 100644 +--- a/server/core.c ++++ b/server/core.c +@@ -4973,7 +4973,7 @@ static int core_pre_config(apr_pool_t *pconf, apr_pool_t *plog, apr_pool_t *ptem + init_config_defines(pconf); + apr_pool_cleanup_register(pconf, NULL, reset_config, apr_pool_cleanup_null); + +- ap_regcomp_set_default_cflags(AP_REG_DOLLAR_ENDONLY); ++ ap_regcomp_set_default_cflags(AP_REG_DEFAULT); + + mpm_common_pre_config(pconf); + +diff --git a/server/util_pcre.c b/server/util_pcre.c +index f2cb1bb..2a665c8 100644 +--- a/server/util_pcre.c ++++ b/server/util_pcre.c +@@ -120,7 +120,7 @@ AP_DECLARE(void) ap_regfree(ap_regex_t *preg) + * Compile a regular expression * + *************************************************/ + +-static int default_cflags = AP_REG_DOLLAR_ENDONLY; ++static int default_cflags = AP_REG_DEFAULT; + + AP_DECLARE(int) ap_regcomp_get_default_cflags(void) + { +@@ -168,7 +168,8 @@ AP_DECLARE(int) ap_regcomp(ap_regex_t * preg, const char *pattern, int cflags) + int errcode = 0; + int options = PCRE_DUPNAMES; + +- cflags |= default_cflags; ++ if ((cflags & AP_REG_NO_DEFAULT) == 0) ++ cflags |= default_cflags; + if ((cflags & AP_REG_ICASE) != 0) + options |= PCRE_CASELESS; + if ((cflags & AP_REG_NEWLINE) != 0) +diff --git a/server/util_regex.c b/server/util_regex.c +index 2a30d68..5405f8d 100644 +--- a/server/util_regex.c ++++ b/server/util_regex.c +@@ -94,6 +94,7 @@ AP_DECLARE(ap_rxplus_t*) ap_rxplus_compile(apr_pool_t *pool, + } + + /* anything after the current delimiter is flags */ ++ ret->flags = ap_regcomp_get_default_cflags() & AP_REG_DOLLAR_ENDONLY; + while (*++endp) { + switch (*endp) { + case 'i': ret->flags |= AP_REG_ICASE; break; +@@ -106,7 +107,7 @@ AP_DECLARE(ap_rxplus_t*) ap_rxplus_compile(apr_pool_t *pool, + default: break; /* we should probably be stricter here */ + } + } +- if (ap_regcomp(&ret->rx, rxstr, ret->flags) == 0) { ++ if (ap_regcomp(&ret->rx, rxstr, AP_REG_NO_DEFAULT | ret->flags) == 0) { + apr_pool_cleanup_register(pool, &ret->rx, rxplus_cleanup, + apr_pool_cleanup_null); + } diff --git a/SOURCES/httpd-2.4.37-CVE-2020-1934.patch b/SOURCES/httpd-2.4.37-CVE-2020-1934.patch new file mode 100644 index 0000000..69088b9 --- /dev/null +++ b/SOURCES/httpd-2.4.37-CVE-2020-1934.patch @@ -0,0 +1,68 @@ +--- a/modules/proxy/mod_proxy_ftp.c 2020/02/07 17:01:07 1873744 ++++ b/modules/proxy/mod_proxy_ftp.c 2020/02/07 17:04:45 1873745 +@@ -218,7 +218,7 @@ + * (EBCDIC) machines either. + */ + static apr_status_t ftp_string_read(conn_rec *c, apr_bucket_brigade *bb, +- char *buff, apr_size_t bufflen, int *eos) ++ char *buff, apr_size_t bufflen, int *eos, apr_size_t *outlen) + { + apr_bucket *e; + apr_status_t rv; +@@ -230,6 +230,7 @@ + /* start with an empty string */ + buff[0] = 0; + *eos = 0; ++ *outlen = 0; + + /* loop through each brigade */ + while (!found) { +@@ -273,6 +274,7 @@ + if (len > 0) { + memcpy(pos, response, len); + pos += len; ++ *outlen += len; + } + } + apr_bucket_delete(e); +@@ -385,28 +387,36 @@ + char buff[5]; + char *mb = msgbuf, *me = &msgbuf[msglen]; + apr_status_t rv; ++ apr_size_t nread; ++ + int eos; + +- if (APR_SUCCESS != (rv = ftp_string_read(ftp_ctrl, bb, response, sizeof(response), &eos))) { ++ if (APR_SUCCESS != (rv = ftp_string_read(ftp_ctrl, bb, response, sizeof(response), &eos, &nread))) { + return -1; + } + /* + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, NULL, APLOGNO(03233) + "<%s", response); + */ ++ if (nread < 4) { ++ ap_log_error(APLOG_MARK, APLOG_INFO, 0, NULL, APLOGNO(10229) "Malformed FTP response '%s'", response); ++ *mb = '\0'; ++ return -1; ++ } ++ + if (!apr_isdigit(response[0]) || !apr_isdigit(response[1]) || +- !apr_isdigit(response[2]) || (response[3] != ' ' && response[3] != '-')) ++ !apr_isdigit(response[2]) || (response[3] != ' ' && response[3] != '-')) + status = 0; + else + status = 100 * response[0] + 10 * response[1] + response[2] - 111 * '0'; + + mb = apr_cpystrn(mb, response + 4, me - mb); + +- if (response[3] == '-') { ++ if (response[3] == '-') { /* multi-line reply "123-foo\nbar\n123 baz" */ + memcpy(buff, response, 3); + buff[3] = ' '; + do { +- if (APR_SUCCESS != (rv = ftp_string_read(ftp_ctrl, bb, response, sizeof(response), &eos))) { ++ if (APR_SUCCESS != (rv = ftp_string_read(ftp_ctrl, bb, response, sizeof(response), &eos, &nread))) { + return -1; + } + mb = apr_cpystrn(mb, response + (' ' == response[0] ? 1 : 4), me - mb); diff --git a/SOURCES/httpd-2.4.37-balancer-failover.patch b/SOURCES/httpd-2.4.37-balancer-failover.patch new file mode 100644 index 0000000..ca691f7 --- /dev/null +++ b/SOURCES/httpd-2.4.37-balancer-failover.patch @@ -0,0 +1,225 @@ +diff --git a/modules/proxy/mod_proxy_http.c b/modules/proxy/mod_proxy_http.c +index ec1e042..2c0500f 100644 +--- a/modules/proxy/mod_proxy_http.c ++++ b/modules/proxy/mod_proxy_http.c +@@ -310,16 +310,18 @@ static int stream_reqbody_read(proxy_http_req_t *req, apr_bucket_brigade *bb, + return OK; + } + +-static int stream_reqbody(proxy_http_req_t *req, rb_methods rb_method) ++static int stream_reqbody(proxy_http_req_t *req) + { + request_rec *r = req->r; + int seen_eos = 0, rv = OK; + apr_size_t hdr_len; + char chunk_hdr[20]; /* must be here due to transient bucket. */ ++ conn_rec *origin = req->origin; + proxy_conn_rec *p_conn = req->backend; + apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc; + apr_bucket_brigade *header_brigade = req->header_brigade; + apr_bucket_brigade *input_brigade = req->input_brigade; ++ rb_methods rb_method = req->rb_method; + apr_off_t bytes, bytes_streamed = 0; + apr_bucket *e; + +@@ -333,7 +335,7 @@ static int stream_reqbody(proxy_http_req_t *req, rb_methods rb_method) + } + + if (!APR_BRIGADE_EMPTY(input_brigade)) { +- /* If this brigade contains EOS, either stop or remove it. */ ++ /* If this brigade contains EOS, remove it and be done. */ + if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { + seen_eos = 1; + +@@ -375,7 +377,8 @@ static int stream_reqbody(proxy_http_req_t *req, rb_methods rb_method) + APR_BRIGADE_INSERT_TAIL(input_brigade, e); + } + } +- else if (bytes_streamed > req->cl_val) { ++ else if (rb_method == RB_STREAM_CL ++ && bytes_streamed > req->cl_val) { + /* C-L < bytes streamed?!? + * We will error out after the body is completely + * consumed, but we can't stream more bytes at the +@@ -407,7 +410,7 @@ static int stream_reqbody(proxy_http_req_t *req, rb_methods rb_method) + APR_BRIGADE_PREPEND(input_brigade, header_brigade); + + /* Flush here on EOS because we won't stream_reqbody_read() again */ +- rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, req->origin, ++ rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin, + input_brigade, seen_eos); + if (rv != OK) { + return rv; +@@ -454,10 +457,6 @@ static int spool_reqbody_cl(proxy_http_req_t *req, apr_off_t *bytes_spooled) + /* If this brigade contains EOS, either stop or remove it. */ + if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { + seen_eos = 1; +- +- /* We can't pass this EOS to the output_filters. */ +- e = APR_BRIGADE_LAST(input_brigade); +- apr_bucket_delete(e); + } + + apr_brigade_length(input_brigade, 1, &bytes); +@@ -644,7 +643,18 @@ static int ap_proxy_http_prefetch(proxy_http_req_t *req, + */ + temp_brigade = apr_brigade_create(p, bucket_alloc); + block = req->prefetch_nonblocking ? APR_NONBLOCK_READ : APR_BLOCK_READ; +- do { ++ ++ /* Account for saved input, if any. */ ++ apr_brigade_length(input_brigade, 0, &bytes_read); ++ ++ /* Ensure we don't hit a wall where we have a buffer too small ++ * for ap_get_brigade's filters to fetch us another bucket, ++ * surrender once we hit 80 bytes less than MAX_MEM_SPOOL ++ * (an arbitrary value). ++ */ ++ while (bytes_read < MAX_MEM_SPOOL - 80 ++ && (APR_BRIGADE_EMPTY(input_brigade) ++ || !APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade)))) { + status = ap_get_brigade(r->input_filters, temp_brigade, + AP_MODE_READBYTES, block, + MAX_MEM_SPOOL - bytes_read); +@@ -686,15 +696,7 @@ static int ap_proxy_http_prefetch(proxy_http_req_t *req, + c->client_ip, c->remote_host ? c->remote_host: ""); + return HTTP_INTERNAL_SERVER_ERROR; + } +- +- /* Ensure we don't hit a wall where we have a buffer too small +- * for ap_get_brigade's filters to fetch us another bucket, +- * surrender once we hit 80 bytes less than MAX_MEM_SPOOL +- * (an arbitrary value.) +- */ +- } while ((bytes_read < MAX_MEM_SPOOL - 80) +- && !APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade)) +- && !req->prefetch_nonblocking); ++ } + + /* Use chunked request body encoding or send a content-length body? + * +@@ -838,35 +840,21 @@ static int ap_proxy_http_request(proxy_http_req_t *req) + { + int rv; + request_rec *r = req->r; +- apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc; +- apr_bucket_brigade *header_brigade = req->header_brigade; +- apr_bucket_brigade *input_brigade = req->input_brigade; + + /* send the request header/body, if any. */ + switch (req->rb_method) { ++ case RB_SPOOL_CL: + case RB_STREAM_CL: + case RB_STREAM_CHUNKED: + if (req->do_100_continue) { +- rv = ap_proxy_pass_brigade(bucket_alloc, r, req->backend, +- req->origin, header_brigade, 1); ++ rv = ap_proxy_pass_brigade(req->bucket_alloc, r, req->backend, ++ req->origin, req->header_brigade, 1); + } + else { +- rv = stream_reqbody(req, req->rb_method); ++ rv = stream_reqbody(req); + } + break; + +- case RB_SPOOL_CL: +- /* Prefetch has built the header and spooled the whole body; +- * if we don't expect 100-continue we can flush both all at once, +- * otherwise flush the header only. +- */ +- if (!req->do_100_continue) { +- APR_BRIGADE_CONCAT(header_brigade, input_brigade); +- } +- rv = ap_proxy_pass_brigade(bucket_alloc, r, req->backend, +- req->origin, header_brigade, 1); +- break; +- + default: + /* shouldn't be possible */ + rv = HTTP_INTERNAL_SERVER_ERROR; +@@ -1577,15 +1565,10 @@ int ap_proxy_http_process_response(proxy_http_req_t *req) + + /* Send the request body (fully). */ + switch(req->rb_method) { ++ case RB_SPOOL_CL: + case RB_STREAM_CL: + case RB_STREAM_CHUNKED: +- status = stream_reqbody(req, req->rb_method); +- break; +- case RB_SPOOL_CL: +- /* Prefetch has spooled the whole body, flush it. */ +- status = ap_proxy_pass_brigade(req->bucket_alloc, r, +- backend, origin, +- req->input_brigade, 1); ++ status = stream_reqbody(req); + break; + default: + /* Shouldn't happen */ +@@ -1940,6 +1923,7 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker, + const char *u; + proxy_http_req_t *req = NULL; + proxy_conn_rec *backend = NULL; ++ apr_bucket_brigade *input_brigade = NULL; + int is_ssl = 0; + conn_rec *c = r->connection; + proxy_dir_conf *dconf; +@@ -2005,8 +1989,20 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker, + + dconf = ap_get_module_config(r->per_dir_config, &proxy_module); + ++ /* We possibly reuse input data prefetched in previous call(s), e.g. for a ++ * balancer fallback scenario, and in this case the 100 continue settings ++ * should be consistent between balancer members. If not, we need to ignore ++ * Proxy100Continue on=>off once we tried to prefetch already, otherwise ++ * the HTTP_IN filter won't send 100 Continue for us anymore, and we might ++ * deadlock with the client waiting for each other. Note that off=>on is ++ * not an issue because in this case r->expecting_100 is false (the 100 ++ * Continue is out already), but we make sure that prefetch will be ++ * nonblocking to avoid passing more time there. ++ */ ++ apr_pool_userdata_get((void **)&input_brigade, "proxy-req-input", p); ++ + /* Should we handle end-to-end or ping 100-continue? */ +- if ((r->expecting_100 && dconf->forward_100_continue) ++ if ((r->expecting_100 && (dconf->forward_100_continue || input_brigade)) + || PROXY_DO_100_CONTINUE(worker, r)) { + /* We need to reset r->expecting_100 or prefetching will cause + * ap_http_filter() to send "100 Continue" response by itself. So +@@ -2023,7 +2019,8 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker, + /* Should we block while prefetching the body or try nonblocking and flush + * data to the backend ASAP? + */ +- else if (apr_table_get(r->subprocess_env, "proxy-prefetch-nonblocking")) { ++ else if (input_brigade || apr_table_get(r->subprocess_env, ++ "proxy-prefetch-nonblocking")) { + req->prefetch_nonblocking = 1; + } + +@@ -2048,6 +2045,17 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker, + sizeof(req->server_portstr)))) + goto cleanup; + ++ /* The header is always (re-)built since it depends on worker settings, ++ * but the body can be fetched only once (even partially), so it's saved ++ * in between proxy_http_handler() calls should we come back here. ++ */ ++ req->header_brigade = apr_brigade_create(p, req->bucket_alloc); ++ if (input_brigade == NULL) { ++ input_brigade = apr_brigade_create(p, req->bucket_alloc); ++ apr_pool_userdata_setn(input_brigade, "proxy-req-input", NULL, p); ++ } ++ req->input_brigade = input_brigade; ++ + /* Prefetch (nonlocking) the request body so to increase the chance to get + * the whole (or enough) body and determine Content-Length vs chunked or + * spooled. By doing this before connecting or reusing the backend, we want +@@ -2058,8 +2066,6 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker, + * to reduce to the minimum the unavoidable local is_socket_connected() vs + * remote keepalive race condition. + */ +- req->input_brigade = apr_brigade_create(p, req->bucket_alloc); +- req->header_brigade = apr_brigade_create(p, req->bucket_alloc); + if ((status = ap_proxy_http_prefetch(req, uri, locurl)) != OK) + goto cleanup; + diff --git a/SOURCES/httpd-2.4.37-fips-segfault.patch b/SOURCES/httpd-2.4.37-fips-segfault.patch new file mode 100644 index 0000000..6039980 --- /dev/null +++ b/SOURCES/httpd-2.4.37-fips-segfault.patch @@ -0,0 +1,42 @@ +diff --git a/modules/ssl/mod_ssl.c b/modules/ssl/mod_ssl.c +index 37947e7..b50c259 100644 +--- a/modules/ssl/mod_ssl.c ++++ b/modules/ssl/mod_ssl.c +@@ -331,9 +331,6 @@ static apr_status_t ssl_cleanup_pre_config(void *data) + /* + * Try to kill the internals of the SSL library. + */ +-#ifdef HAVE_FIPS +- FIPS_mode_set(0); +-#endif + /* Corresponds to OBJ_create()s */ + OBJ_cleanup(); + /* Corresponds to OPENSSL_load_builtin_modules() */ +diff --git a/modules/ssl/ssl_engine_init.c b/modules/ssl/ssl_engine_init.c +index 5063a72..21e41e2 100644 +--- a/modules/ssl/ssl_engine_init.c ++++ b/modules/ssl/ssl_engine_init.c +@@ -183,6 +183,14 @@ int ssl_is_challenge(conn_rec *c, const char *servername, + return 0; + } + ++#ifdef HAVE_FIPS ++static apr_status_t ssl_fips_cleanup(void *data) ++{ ++ FIPS_mode_set(0); ++ return APR_SUCCESS; ++} ++#endif ++ + /* + * Per-module initialization + */ +@@ -316,6 +324,8 @@ apr_status_t ssl_init_Module(apr_pool_t *p, apr_pool_t *plog, + if (FIPS_mode_set(1)) { + ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, s, APLOGNO(01884) + "Operating in SSL FIPS mode"); ++ apr_pool_cleanup_register(p, NULL, ssl_fips_cleanup, ++ apr_pool_cleanup_null); + } + else { + ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(01885) "FIPS mode failed"); diff --git a/SOURCES/httpd-2.4.37-logjournal.patch b/SOURCES/httpd-2.4.37-logjournal.patch new file mode 100644 index 0000000..721911c --- /dev/null +++ b/SOURCES/httpd-2.4.37-logjournal.patch @@ -0,0 +1,87 @@ +diff --git a/modules/loggers/config.m4 b/modules/loggers/config.m4 +index 762e773e94..0848d2e377 100644 +--- a/modules/loggers/config.m4 ++++ b/modules/loggers/config.m4 +@@ -5,6 +5,8 @@ dnl APACHE_MODULE(name, helptext[, objects[, structname[, default[, config]]]]) + APACHE_MODPATH_INIT(loggers) + + APACHE_MODULE(log_config, logging configuration. You won't be able to log requests to the server without this module., , , yes) ++APR_ADDTO(MOD_LOG_CONFIG_LDADD, [$SYSTEMD_LIBS]) ++ + APACHE_MODULE(log_debug, configurable debug logging, , , most) + APACHE_MODULE(log_forensic, forensic logging) + +diff --git a/modules/loggers/mod_log_config.c b/modules/loggers/mod_log_config.c +index 996c09cf49..50a056a2f8 100644 +--- a/modules/loggers/mod_log_config.c ++++ b/modules/loggers/mod_log_config.c +@@ -172,6 +172,10 @@ + #include + #endif + ++#ifdef HAVE_SYSTEMD ++#include ++#endif ++ + #define DEFAULT_LOG_FORMAT "%h %l %u %t \"%r\" %>s %b" + + module AP_MODULE_DECLARE_DATA log_config_module; +@@ -1638,6 +1642,25 @@ static apr_status_t ap_default_log_writer( request_rec *r, + + return rv; + } ++ ++static apr_status_t wrap_journal_stream(apr_pool_t *p, apr_file_t **outfd, ++ int priority) ++{ ++#ifdef HAVE_SYSTEMD ++ int fd; ++ ++ fd = sd_journal_stream_fd("httpd", priority, 0); ++ if (fd < 0) return fd; ++ ++ /* This is an AF_UNIX socket fd so is more pipe-like than ++ * file-like (the fd is neither seekable or readable), and use of ++ * apr_os_pipe_put_ex() allows cleanup registration. */ ++ return apr_os_pipe_put_ex(outfd, &fd, 1, p); ++#else ++ return APR_ENOTIMPL; ++#endif ++} ++ + static void *ap_default_log_writer_init(apr_pool_t *p, server_rec *s, + const char* name) + { +@@ -1650,6 +1673,32 @@ static void *ap_default_log_writer_init(apr_pool_t *p, server_rec *s, + } + return ap_piped_log_write_fd(pl); + } ++ else if (strncasecmp(name, "journald:", 9) == 0) { ++ int priority; ++ const char *err = ap_parse_log_level(name + 9, &priority); ++ apr_status_t rv; ++ apr_file_t *fd; ++ ++ if (err == NULL && priority > LOG_DEBUG) { ++ err = "TRACE level debugging not supported with journald"; ++ } ++ ++ if (err) { ++ ap_log_error(APLOG_MARK, APLOG_ERR, APR_EBADPATH, s, ++ "invalid journald log priority name %s: %s", ++ name, err); ++ return NULL; ++ } ++ ++ rv = wrap_journal_stream(p, &fd, priority); ++ if (rv) { ++ ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, ++ "could not open journald log stream"); ++ return NULL; ++ } ++ ++ return fd; ++ } + else { + const char *fname = ap_server_root_relative(p, name); + apr_file_t *fd; diff --git a/SOURCES/httpd-2.4.37-mod-md-mod-ssl-hooks.patch b/SOURCES/httpd-2.4.37-mod-md-mod-ssl-hooks.patch new file mode 100644 index 0000000..d7df65a --- /dev/null +++ b/SOURCES/httpd-2.4.37-mod-md-mod-ssl-hooks.patch @@ -0,0 +1,544 @@ +diff --git a/modules/ssl/mod_ssl.h b/modules/ssl/mod_ssl.h +index 24a65a0..a360911 100644 +--- a/modules/ssl/mod_ssl.h ++++ b/modules/ssl/mod_ssl.h +@@ -29,6 +29,7 @@ + #include "httpd.h" + #include "http_config.h" + #include "apr_optional.h" ++#include "apr_tables.h" /* for apr_array_header_t */ + + /* Create a set of SSL_DECLARE(type), SSL_DECLARE_NONSTD(type) and + * SSL_DECLARE_DATA with appropriate export and import tags for the platform +@@ -86,6 +87,34 @@ APR_DECLARE_OPTIONAL_FN(int, ssl_engine_disable, (conn_rec *)); + APR_DECLARE_OPTIONAL_FN(int, ssl_engine_set, (conn_rec *, + ap_conf_vector_t *, + int proxy, int enable)); ++ ++/* Check for availability of new hooks */ ++#define SSL_CERT_HOOKS ++#ifdef SSL_CERT_HOOKS ++ ++/** Lets others add certificate and key files to the given server. ++ * For each cert a key must also be added. ++ * @param cert_file and array of const char* with the path to the certificate chain ++ * @param key_file and array of const char* with the path to the private key file ++ */ ++APR_DECLARE_EXTERNAL_HOOK(ssl, SSL, int, add_cert_files, ++ (server_rec *s, apr_pool_t *p, ++ apr_array_header_t *cert_files, ++ apr_array_header_t *key_files)) ++ ++/** In case no certificates are available for a server, this ++ * lets other modules add a fallback certificate for the time ++ * being. Regular requests against this server will be answered ++ * with a 503. ++ * @param cert_file and array of const char* with the path to the certificate chain ++ * @param key_file and array of const char* with the path to the private key file ++ */ ++APR_DECLARE_EXTERNAL_HOOK(ssl, SSL, int, add_fallback_cert_files, ++ (server_rec *s, apr_pool_t *p, ++ apr_array_header_t *cert_files, ++ apr_array_header_t *key_files)) ++ ++#endif /* SSL_CERT_HOOKS */ + + #endif /* __MOD_SSL_H__ */ + /** @} */ +diff --git a/modules/ssl/mod_ssl_openssl.h b/modules/ssl/mod_ssl_openssl.h +index 0fa654a..d4f684f 100644 +--- a/modules/ssl/mod_ssl_openssl.h ++++ b/modules/ssl/mod_ssl_openssl.h +@@ -69,5 +69,45 @@ APR_DECLARE_EXTERNAL_HOOK(ssl, SSL, int, pre_handshake, + APR_DECLARE_EXTERNAL_HOOK(ssl, SSL, int, proxy_post_handshake, + (conn_rec *c, SSL *ssl)) + ++/** On TLS connections that do not relate to a configured virtual host, ++ * allow other modules to provide a X509 certificate and EVP_PKEY to ++ * be used on the connection. This first hook which does not ++ * return DECLINED will determine the outcome. */ ++APR_DECLARE_EXTERNAL_HOOK(ssl, SSL, int, answer_challenge, ++ (conn_rec *c, const char *server_name, ++ X509 **pcert, EVP_PKEY **pkey)) ++ ++/** During post_config phase, ask around if someone wants to provide ++ * OCSP stapling status information for the given cert (with the also ++ * provided issuer certificate). The first hook which does not ++ * return DECLINED promises to take responsibility (and respond ++ * in later calls via hook ssl_get_stapling_status). ++ * If no hook takes over, mod_ssl's own stapling implementation will ++ * be applied (if configured). ++ */ ++APR_DECLARE_EXTERNAL_HOOK(ssl, SSL, int, init_stapling_status, ++ (server_rec *s, apr_pool_t *p, ++ X509 *cert, X509 *issuer)) ++ ++/** Anyone answering positive to ssl_init_stapling_status for a ++ * certificate, needs to register here and supply the actual OCSP stapling ++ * status data (OCSP_RESP) for a new connection. ++ * A hook supplying the response data must return APR_SUCCESS. ++ * The data is returned in DER encoded bytes via pder and pderlen. The ++ * returned pointer may be NULL, which indicates that data is (currently) ++ * unavailable. ++ * If DER data is returned, it MUST come from a response with ++ * status OCSP_RESPONSE_STATUS_SUCCESSFUL and V_OCSP_CERTSTATUS_GOOD ++ * or V_OCSP_CERTSTATUS_REVOKED, not V_OCSP_CERTSTATUS_UNKNOWN. This means ++ * errors in OCSP retrieval are to be handled/logged by the hook and ++ * are not done by mod_ssl. ++ * Any DER bytes returned MUST be allocated via malloc() and ownership ++ * passes to mod_ssl. Meaning, the hook must return a malloced copy of ++ * the data it has. mod_ssl (or OpenSSL) will free it. ++ */ ++APR_DECLARE_EXTERNAL_HOOK(ssl, SSL, int, get_stapling_status, ++ (unsigned char **pder, int *pderlen, ++ conn_rec *c, server_rec *s, X509 *cert)) ++ + #endif /* __MOD_SSL_OPENSSL_H__ */ + /** @} */ +diff --git a/modules/ssl/ssl_engine_init.c b/modules/ssl/ssl_engine_init.c +index 21e41e2..ef631c1 100644 +--- a/modules/ssl/ssl_engine_init.c ++++ b/modules/ssl/ssl_engine_init.c +@@ -36,6 +36,25 @@ APR_IMPLEMENT_OPTIONAL_HOOK_RUN_ALL(ssl, SSL, int, init_server, + (server_rec *s,apr_pool_t *p,int is_proxy,SSL_CTX *ctx), + (s,p,is_proxy,ctx), OK, DECLINED) + ++APR_IMPLEMENT_OPTIONAL_HOOK_RUN_ALL(ssl, SSL, int, add_cert_files, ++ (server_rec *s, apr_pool_t *p, ++ apr_array_header_t *cert_files, apr_array_header_t *key_files), ++ (s, p, cert_files, key_files), ++ OK, DECLINED) ++ ++APR_IMPLEMENT_OPTIONAL_HOOK_RUN_ALL(ssl, SSL, int, add_fallback_cert_files, ++ (server_rec *s, apr_pool_t *p, ++ apr_array_header_t *cert_files, apr_array_header_t *key_files), ++ (s, p, cert_files, key_files), ++ OK, DECLINED) ++ ++APR_IMPLEMENT_OPTIONAL_HOOK_RUN_ALL(ssl, SSL, int, answer_challenge, ++ (conn_rec *c, const char *server_name, ++ X509 **pcert, EVP_PKEY **pkey), ++ (c, server_name, pcert, pkey), ++ DECLINED, DECLINED) ++ ++ + /* _________________________________________________________________ + ** + ** Module Initialization +@@ -165,18 +184,18 @@ static void ssl_add_version_components(apr_pool_t *p, + modver, AP_SERVER_BASEVERSION, incver); + } + +-/**************************************************************************************************/ +-/* Managed Domains Interface */ +- +-static APR_OPTIONAL_FN_TYPE(md_is_managed) *md_is_managed; +-static APR_OPTIONAL_FN_TYPE(md_get_certificate) *md_get_certificate; +-static APR_OPTIONAL_FN_TYPE(md_is_challenge) *md_is_challenge; ++/* _________________________________________________________________ ++** ++** Let other answer special connection attempts. ++** Used in ACME challenge handling by mod_md. ++** _________________________________________________________________ ++*/ + + int ssl_is_challenge(conn_rec *c, const char *servername, + X509 **pcert, EVP_PKEY **pkey) + { +- if (md_is_challenge) { +- return md_is_challenge(c, servername, pcert, pkey); ++ if (APR_SUCCESS == ssl_run_answer_challenge(c, servername, pcert, pkey)) { ++ return 1; + } + *pcert = NULL; + *pkey = NULL; +@@ -231,16 +250,6 @@ apr_status_t ssl_init_Module(apr_pool_t *p, apr_pool_t *plog, + ssl_config_global_create(base_server); /* just to avoid problems */ + ssl_config_global_fix(mc); + +- /* Initialize our interface to mod_md, if it is loaded +- */ +- md_is_managed = APR_RETRIEVE_OPTIONAL_FN(md_is_managed); +- md_get_certificate = APR_RETRIEVE_OPTIONAL_FN(md_get_certificate); +- md_is_challenge = APR_RETRIEVE_OPTIONAL_FN(md_is_challenge); +- if (!md_is_managed || !md_get_certificate) { +- md_is_managed = NULL; +- md_get_certificate = NULL; +- } +- + /* + * try to fix the configuration and open the dedicated SSL + * logfile as early as possible +@@ -1392,8 +1401,7 @@ static apr_status_t ssl_init_server_certs(server_rec *s, + * loaded via SSLOpenSSLConfCmd Certificate), so for 1.0.2 and + * later, we defer to the code in ssl_init_server_ctx. + */ +- if ((mctx->stapling_enabled == TRUE) && +- !ssl_stapling_init_cert(s, p, ptemp, mctx, cert)) { ++ if (!ssl_stapling_init_cert(s, p, ptemp, mctx, cert)) { + ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(02567) + "Unable to configure certificate %s for stapling", + key_id); +@@ -1788,11 +1796,13 @@ static apr_status_t ssl_init_server_ctx(server_rec *s, + apr_array_header_t *pphrases) + { + apr_status_t rv; ++ modssl_pk_server_t *pks; + #ifdef HAVE_SSL_CONF_CMD + ssl_ctx_param_t *param = (ssl_ctx_param_t *)sc->server->ssl_ctx_param->elts; + SSL_CONF_CTX *cctx = sc->server->ssl_ctx_config; + int i; + #endif ++ int n; + + /* + * Check for problematic re-initializations +@@ -1804,50 +1814,24 @@ static apr_status_t ssl_init_server_ctx(server_rec *s, + return APR_EGENERAL; + } + +- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(10083) +- "Init: (%s) mod_md support is %s.", ssl_util_vhostid(p, s), +- md_is_managed? "available" : "unavailable"); +- if (md_is_managed && md_is_managed(s)) { +- modssl_pk_server_t *const pks = sc->server->pks; +- if (pks->cert_files->nelts > 0 || pks->key_files->nelts > 0) { +- ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(10084) +- "Init: (%s) You configured certificate/key files on this host, but " +- "is is covered by a Managed Domain. You need to remove these directives " +- "for the Managed Domain to take over.", ssl_util_vhostid(p, s)); +- } +- else { +- const char *key_file, *cert_file, *chain_file; +- +- key_file = cert_file = chain_file = NULL; +- +- if (md_get_certificate) { +- rv = md_get_certificate(s, p, &key_file, &cert_file); +- } +- else { +- rv = APR_ENOTIMPL; +- } +- +- if (key_file && cert_file) { +- ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, s, +- "%s: installing key=%s, cert=%s, chain=%s", +- ssl_util_vhostid(p, s), key_file, cert_file, chain_file); +- APR_ARRAY_PUSH(pks->key_files, const char *) = key_file; +- APR_ARRAY_PUSH(pks->cert_files, const char *) = cert_file; +- sc->server->cert_chain = chain_file; +- } +- +- if (APR_STATUS_IS_EAGAIN(rv)) { +- /* Managed Domain not ready yet. This is not a reason to fail the config */ +- ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(10085) +- "Init: %s will respond with '503 Service Unavailable' for now. This " +- "host is part of a Managed Domain, but no SSL certificate is " +- "available (yet).", ssl_util_vhostid(p, s)); +- pks->service_unavailable = 1; +- } +- else if (rv != APR_SUCCESS) { +- return rv; +- } +- } ++ /* Allow others to provide certificate files */ ++ pks = sc->server->pks; ++ n = pks->cert_files->nelts; ++ ssl_run_add_cert_files(s, p, pks->cert_files, pks->key_files); ++ ++ if (n < pks->cert_files->nelts) { ++ /* this overrides any old chain configuration */ ++ sc->server->cert_chain = NULL; ++ } ++ ++ if (apr_is_empty_array(pks->cert_files) && !sc->server->cert_chain) { ++ ssl_run_add_fallback_cert_files(s, p, pks->cert_files, pks->key_files); ++ ++ pks->service_unavailable = 1; ++ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(10085) ++ "Init: %s will respond with '503 Service Unavailable' for now. There " ++ "are no SSL certificates configured and no other module contributed any.", ++ ssl_util_vhostid(p, s)); + } + + if ((rv = ssl_init_ctx(s, p, ptemp, sc->server)) != APR_SUCCESS) { +@@ -1900,7 +1884,7 @@ static apr_status_t ssl_init_server_ctx(server_rec *s, + * (late) point makes sure that we catch both certificates loaded + * via SSLCertificateFile and SSLOpenSSLConfCmd Certificate. + */ +- if (sc->server->stapling_enabled == TRUE) { ++ do { + X509 *cert; + int i = 0; + int ret = SSL_CTX_set_current_cert(sc->server->ssl_ctx, +@@ -1917,7 +1901,7 @@ static apr_status_t ssl_init_server_ctx(server_rec *s, + SSL_CERT_SET_NEXT); + i++; + } +- } ++ } while(0); + #endif + + #ifdef HAVE_TLS_SESSION_TICKETS +diff --git a/modules/ssl/ssl_engine_kernel.c b/modules/ssl/ssl_engine_kernel.c +index e6a9f67..a5e86e4 100644 +--- a/modules/ssl/ssl_engine_kernel.c ++++ b/modules/ssl/ssl_engine_kernel.c +@@ -2303,6 +2303,37 @@ void ssl_callback_Info(const SSL *ssl, int where, int rc) + } + + #ifdef HAVE_TLSEXT ++ ++static apr_status_t set_challenge_creds(conn_rec *c, const char *servername, ++ SSL *ssl, X509 *cert, EVP_PKEY *key) ++{ ++ SSLConnRec *sslcon = myConnConfig(c); ++ ++ sslcon->service_unavailable = 1; ++ if ((SSL_use_certificate(ssl, cert) < 1)) { ++ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c, APLOGNO(10086) ++ "Failed to configure challenge certificate %s", ++ servername); ++ return APR_EGENERAL; ++ } ++ ++ if (!SSL_use_PrivateKey(ssl, key)) { ++ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c, APLOGNO(10087) ++ "error '%s' using Challenge key: %s", ++ ERR_error_string(ERR_peek_last_error(), NULL), ++ servername); ++ return APR_EGENERAL; ++ } ++ ++ if (SSL_check_private_key(ssl) < 1) { ++ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c, APLOGNO(10088) ++ "Challenge certificate and private key %s " ++ "do not match", servername); ++ return APR_EGENERAL; ++ } ++ return APR_SUCCESS; ++} ++ + /* + * This function sets the virtual host from an extended + * client hello with a server name indication extension ("SNI", cf. RFC 6066). +@@ -2332,30 +2363,12 @@ static apr_status_t init_vhost(conn_rec *c, SSL *ssl) + return APR_SUCCESS; + } + else if (ssl_is_challenge(c, servername, &cert, &key)) { +- +- sslcon->service_unavailable = 1; +- if ((SSL_use_certificate(ssl, cert) < 1)) { +- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c, APLOGNO(10086) +- "Failed to configure challenge certificate %s", +- servername); ++ /* With ACMEv1 we can have challenge connections to a unknown domains ++ * that need to be answered with a special certificate and will ++ * otherwise not answer any requests. */ ++ if (set_challenge_creds(c, servername, ssl, cert, key) != APR_SUCCESS) { + return APR_EGENERAL; + } +- +- if (!SSL_use_PrivateKey(ssl, key)) { +- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c, APLOGNO(10087) +- "error '%s' using Challenge key: %s", +- ERR_error_string(ERR_peek_last_error(), NULL), +- servername); +- return APR_EGENERAL; +- } +- +- if (SSL_check_private_key(ssl) < 1) { +- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c, APLOGNO(10088) +- "Challenge certificate and private key %s " +- "do not match", servername); +- return APR_EGENERAL; +- } +- + } + else { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(02044) +@@ -2648,6 +2661,23 @@ int ssl_callback_alpn_select(SSL *ssl, + proposed); + return SSL_TLSEXT_ERR_ALERT_FATAL; + } ++ ++ /* protocol was switched, this could be a challenge protocol such as "acme-tls/1". ++ * For that to work, we need to allow overrides to our ssl certificate. ++ * However, exclude challenge checks on our best known traffic protocol. ++ * (http/1.1 is the default, we never switch to it anyway.) ++ */ ++ if (strcmp("h2", proposed)) { ++ const char *servername = SSL_get_servername(ssl, TLSEXT_NAMETYPE_host_name); ++ X509 *cert; ++ EVP_PKEY *key; ++ ++ if (ssl_is_challenge(c, servername, &cert, &key)) { ++ if (set_challenge_creds(c, servername, ssl, cert, key) != APR_SUCCESS) { ++ return SSL_TLSEXT_ERR_ALERT_FATAL; ++ } ++ } ++ } + } + + return SSL_TLSEXT_ERR_OK; +diff --git a/modules/ssl/ssl_util_stapling.c b/modules/ssl/ssl_util_stapling.c +index c3e2cfa..4df0a9a 100644 +--- a/modules/ssl/ssl_util_stapling.c ++++ b/modules/ssl/ssl_util_stapling.c +@@ -31,12 +31,28 @@ + #include "ssl_private.h" + #include "ap_mpm.h" + #include "apr_thread_mutex.h" ++#include "mod_ssl_openssl.h" ++ ++APR_IMPLEMENT_OPTIONAL_HOOK_RUN_ALL(ssl, SSL, int, init_stapling_status, ++ (server_rec *s, apr_pool_t *p, ++ X509 *cert, X509 *issuer), ++ (s, p, cert, issuer), ++ DECLINED, DECLINED) ++ ++APR_IMPLEMENT_OPTIONAL_HOOK_RUN_ALL(ssl, SSL, int, get_stapling_status, ++ (unsigned char **pder, int *pderlen, ++ conn_rec *c, server_rec *s, X509 *cert), ++ (pder, pderlen, c, s, cert), ++ DECLINED, DECLINED) ++ + + #ifdef HAVE_OCSP_STAPLING + + static int stapling_cache_mutex_on(server_rec *s); + static int stapling_cache_mutex_off(server_rec *s); + ++static int stapling_cb(SSL *ssl, void *arg); ++ + /** + * Maxiumum OCSP stapling response size. This should be the response for a + * single certificate and will typically include the responder certificate chain +@@ -119,7 +135,38 @@ int ssl_stapling_init_cert(server_rec *s, apr_pool_t *p, apr_pool_t *ptemp, + OCSP_CERTID *cid = NULL; + STACK_OF(OPENSSL_STRING) *aia = NULL; + +- if ((x == NULL) || (X509_digest(x, EVP_sha1(), idx, NULL) != 1)) ++ if (x == NULL) ++ return 0; ++ ++ if (!(issuer = stapling_get_issuer(mctx, x))) { ++ /* In Apache pre 2.4.40, we use to come here only when mod_ssl stapling ++ * was enabled. With the new hooks, we give other modules the chance ++ * to provide stapling status. However, we do not want to log ssl errors ++ * where we did not do so in the past. */ ++ if (mctx->stapling_enabled == TRUE) { ++ ssl_log_xerror(SSLLOG_MARK, APLOG_ERR, 0, ptemp, s, x, APLOGNO(02217) ++ "ssl_stapling_init_cert: can't retrieve issuer " ++ "certificate!"); ++ return 0; ++ } ++ return 1; ++ } ++ ++ if (ssl_run_init_stapling_status(s, p, x, issuer) == APR_SUCCESS) { ++ /* Someone's taken over or mod_ssl's own implementation is not enabled */ ++ if (mctx->stapling_enabled != TRUE) { ++ SSL_CTX_set_tlsext_status_cb(mctx->ssl_ctx, stapling_cb); ++ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO() "OCSP stapling added via hook"); ++ } ++ return 1; ++ } ++ ++ if (mctx->stapling_enabled != TRUE) { ++ /* mod_ssl's own implementation is not enabled */ ++ return 1; ++ } ++ ++ if (X509_digest(x, EVP_sha1(), idx, NULL) != 1) + return 0; + + cinf = apr_hash_get(stapling_certinfo, idx, sizeof(idx)); +@@ -139,13 +186,6 @@ int ssl_stapling_init_cert(server_rec *s, apr_pool_t *p, apr_pool_t *ptemp, + return 1; + } + +- if (!(issuer = stapling_get_issuer(mctx, x))) { +- ssl_log_xerror(SSLLOG_MARK, APLOG_ERR, 0, ptemp, s, x, APLOGNO(02217) +- "ssl_stapling_init_cert: can't retrieve issuer " +- "certificate!"); +- return 0; +- } +- + cid = OCSP_cert_to_id(NULL, x, issuer); + X509_free(issuer); + if (!cid) { +@@ -182,18 +222,16 @@ int ssl_stapling_init_cert(server_rec *s, apr_pool_t *p, apr_pool_t *ptemp, + mctx->sc->vhost_id); + + apr_hash_set(stapling_certinfo, cinf->idx, sizeof(cinf->idx), cinf); +- ++ + return 1; + } + +-static certinfo *stapling_get_certinfo(server_rec *s, modssl_ctx_t *mctx, ++static certinfo *stapling_get_certinfo(server_rec *s, X509 *x, modssl_ctx_t *mctx, + SSL *ssl) + { + certinfo *cinf; +- X509 *x; + UCHAR idx[SHA_DIGEST_LENGTH]; +- x = SSL_get_certificate(ssl); +- if ((x == NULL) || (X509_digest(x, EVP_sha1(), idx, NULL) != 1)) ++ if (X509_digest(x, EVP_sha1(), idx, NULL) != 1) + return NULL; + cinf = apr_hash_get(stapling_certinfo, idx, sizeof(idx)); + if (cinf && cinf->cid) +@@ -750,18 +788,34 @@ static int stapling_cb(SSL *ssl, void *arg) + OCSP_RESPONSE *rsp = NULL; + int rv; + BOOL ok = TRUE; ++ X509 *x; ++ unsigned char *rspder = NULL; ++ int rspderlen; + ++ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(01951) ++ "stapling_cb: OCSP Stapling callback called"); ++ ++ x = SSL_get_certificate(ssl); ++ if (x == NULL) { ++ return SSL_TLSEXT_ERR_NOACK; ++ } ++ ++ if (ssl_run_get_stapling_status(&rspder, &rspderlen, conn, s, x) == APR_SUCCESS) { ++ /* a hook handles stapling for this certicate and determines the response */ ++ if (rspder == NULL || rspderlen <= 0) { ++ return SSL_TLSEXT_ERR_NOACK; ++ } ++ SSL_set_tlsext_status_ocsp_resp(ssl, rspder, rspderlen); ++ return SSL_TLSEXT_ERR_OK; ++ } ++ + if (sc->server->stapling_enabled != TRUE) { + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(01950) + "stapling_cb: OCSP Stapling disabled"); + return SSL_TLSEXT_ERR_NOACK; + } + +- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(01951) +- "stapling_cb: OCSP Stapling callback called"); +- +- cinf = stapling_get_certinfo(s, mctx, ssl); +- if (cinf == NULL) { ++ if ((cinf = stapling_get_certinfo(s, x, mctx, ssl)) == NULL) { + return SSL_TLSEXT_ERR_NOACK; + } + +@@ -864,9 +918,10 @@ apr_status_t modssl_init_stapling(server_rec *s, apr_pool_t *p, + if (mctx->stapling_responder_timeout == UNSET) { + mctx->stapling_responder_timeout = 10 * APR_USEC_PER_SEC; + } ++ + SSL_CTX_set_tlsext_status_cb(ctx, stapling_cb); + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(01960) "OCSP stapling initialized"); +- ++ + return APR_SUCCESS; + } + diff --git a/SOURCES/httpd-2.4.37-mod-md-perms.patch b/SOURCES/httpd-2.4.37-mod-md-perms.patch new file mode 100644 index 0000000..78c0fc3 --- /dev/null +++ b/SOURCES/httpd-2.4.37-mod-md-perms.patch @@ -0,0 +1,44 @@ +diff --git a/modules/md/mod_md_os.c b/modules/md/mod_md_os.c +index f96d566..8df0248 100644 +--- a/modules/md/mod_md_os.c ++++ b/modules/md/mod_md_os.c +@@ -41,14 +41,20 @@ + + apr_status_t md_try_chown(const char *fname, unsigned int uid, int gid, apr_pool_t *p) + { +-#if AP_NEED_SET_MUTEX_PERMS +- if (-1 == chown(fname, (uid_t)uid, (gid_t)gid)) { +- apr_status_t rv = APR_FROM_OS_ERROR(errno); +- if (!APR_STATUS_IS_ENOENT(rv)) { +- ap_log_perror(APLOG_MARK, APLOG_ERR, rv, p, APLOGNO(10082) +- "Can't change owner of %s", fname); ++#if AP_NEED_SET_MUTEX_PERMS && HAVE_UNISTD_H ++ /* Since we only switch user when running as root, we only need to chown directories ++ * in that case. Otherwise, the server will ignore any "user/group" directives and ++ * child processes have the same privileges as the parent. ++ */ ++ if (!geteuid()) { ++ if (-1 == chown(fname, (uid_t)uid, (gid_t)gid)) { ++ apr_status_t rv = APR_FROM_OS_ERROR(errno); ++ if (!APR_STATUS_IS_ENOENT(rv)) { ++ ap_log_perror(APLOG_MARK, APLOG_ERR, rv, p, APLOGNO(10082) ++ "Can't change owner of %s", fname); ++ } ++ return rv; + } +- return rv; + } + return APR_SUCCESS; + #else +@@ -58,11 +64,7 @@ apr_status_t md_try_chown(const char *fname, unsigned int uid, int gid, apr_pool + + apr_status_t md_make_worker_accessible(const char *fname, apr_pool_t *p) + { +-#if AP_NEED_SET_MUTEX_PERMS + return md_try_chown(fname, ap_unixd_config.user_id, -1, p); +-#else +- return APR_ENOTIMPL; +-#endif + } + + #ifdef WIN32 diff --git a/SOURCES/httpd-2.4.37-mod-mime-magic-strdup.patch b/SOURCES/httpd-2.4.37-mod-mime-magic-strdup.patch new file mode 100644 index 0000000..e093818 --- /dev/null +++ b/SOURCES/httpd-2.4.37-mod-mime-magic-strdup.patch @@ -0,0 +1,24 @@ +diff --git a/docs/conf/magic b/docs/conf/magic +index 7c56119..bc891d9 100644 +--- a/docs/conf/magic ++++ b/docs/conf/magic +@@ -87,7 +87,7 @@ + # Microsoft WAVE format (*.wav) + # [GRR 950115: probably all of the shorts and longs should be leshort/lelong] + # Microsoft RIFF +-0 string RIFF audio/unknown ++0 string RIFF + # - WAVE format + >8 string WAVE audio/x-wav + # MPEG audio. +--- a/modules/metadata/mod_mime_magic.c 2013/06/11 07:36:13 1491699 ++++ b/modules/metadata/mod_mime_magic.c 2013/06/11 07:41:40 1491700 +@@ -606,7 +606,7 @@ + /* high overhead for 1 char - just hope they don't do this much */ + str[0] = c; + str[1] = '\0'; +- return magic_rsl_add(r, str); ++ return magic_rsl_add(r, apr_pstrdup(r->pool, str)); + } + + /* allocate and copy a contiguous string from a result string list */ diff --git a/SOURCES/httpd-2.4.37-proxy-continue.patch b/SOURCES/httpd-2.4.37-proxy-continue.patch new file mode 100644 index 0000000..932b043 --- /dev/null +++ b/SOURCES/httpd-2.4.37-proxy-continue.patch @@ -0,0 +1,1713 @@ +diff --git a/modules/proxy/mod_proxy.c b/modules/proxy/mod_proxy.c +index de48735..d13c249 100644 +--- a/modules/proxy/mod_proxy.c ++++ b/modules/proxy/mod_proxy.c +@@ -1574,6 +1574,8 @@ static void *create_proxy_dir_config(apr_pool_t *p, char *dummy) + new->error_override_set = 0; + new->add_forwarded_headers = 1; + new->add_forwarded_headers_set = 0; ++ new->forward_100_continue = 1; ++ new->forward_100_continue_set = 0; + + return (void *) new; + } +@@ -1610,6 +1612,11 @@ static void *merge_proxy_dir_config(apr_pool_t *p, void *basev, void *addv) + : add->add_forwarded_headers; + new->add_forwarded_headers_set = add->add_forwarded_headers_set + || base->add_forwarded_headers_set; ++ new->forward_100_continue = ++ (add->forward_100_continue_set == 0) ? base->forward_100_continue ++ : add->forward_100_continue; ++ new->forward_100_continue_set = add->forward_100_continue_set ++ || base->forward_100_continue_set; + + return new; + } +@@ -2110,6 +2117,14 @@ static const char * + conf->preserve_host_set = 1; + return NULL; + } ++static const char * ++ forward_100_continue(cmd_parms *parms, void *dconf, int flag) ++{ ++ proxy_dir_conf *conf = dconf; ++ conf->forward_100_continue = flag; ++ conf->forward_100_continue_set = 1; ++ return NULL; ++} + + static const char * + set_recv_buffer_size(cmd_parms *parms, void *dummy, const char *arg) +@@ -2683,6 +2698,9 @@ static const command_rec proxy_cmds[] = + "Configure local source IP used for request forward"), + AP_INIT_FLAG("ProxyAddHeaders", add_proxy_http_headers, NULL, RSRC_CONF|ACCESS_CONF, + "on if X-Forwarded-* headers should be added or completed"), ++ AP_INIT_FLAG("Proxy100Continue", forward_100_continue, NULL, RSRC_CONF|ACCESS_CONF, ++ "on if 100-Continue should be forwarded to the origin server, off if the " ++ "proxy should handle it by itself"), + {NULL} + }; + +diff --git a/modules/proxy/mod_proxy.h b/modules/proxy/mod_proxy.h +index 3419023..288c5d4 100644 +--- a/modules/proxy/mod_proxy.h ++++ b/modules/proxy/mod_proxy.h +@@ -240,6 +240,8 @@ typedef struct { + /** Named back references */ + apr_array_header_t *refs; + ++ unsigned int forward_100_continue:1; ++ unsigned int forward_100_continue_set:1; + } proxy_dir_conf; + + /* if we interpolate env vars per-request, we'll need a per-request +@@ -380,6 +382,12 @@ do { \ + (w)->s->io_buffer_size_set = (c)->io_buffer_size_set; \ + } while (0) + ++#define PROXY_DO_100_CONTINUE(w, r) \ ++((w)->s->ping_timeout_set \ ++ && (PROXYREQ_REVERSE == (r)->proxyreq) \ ++ && !(apr_table_get((r)->subprocess_env, "force-proxy-request-1.0")) \ ++ && ap_request_has_body((r))) ++ + /* use 2 hashes */ + typedef struct { + unsigned int def; +diff --git a/modules/proxy/mod_proxy_ftp.c b/modules/proxy/mod_proxy_ftp.c +index 8f6f853..8d66b4a 100644 +--- a/modules/proxy/mod_proxy_ftp.c ++++ b/modules/proxy/mod_proxy_ftp.c +@@ -1181,12 +1181,10 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker, + return HTTP_SERVICE_UNAVAILABLE; + } + +- if (!backend->connection) { +- status = ap_proxy_connection_create_ex("FTP", backend, r); +- if (status != OK) { +- proxy_ftp_cleanup(r, backend); +- return status; +- } ++ status = ap_proxy_connection_create_ex("FTP", backend, r); ++ if (status != OK) { ++ proxy_ftp_cleanup(r, backend); ++ return status; + } + + /* Use old naming */ +diff --git a/modules/proxy/mod_proxy_hcheck.c b/modules/proxy/mod_proxy_hcheck.c +index 2783a58..dd8e407 100644 +--- a/modules/proxy/mod_proxy_hcheck.c ++++ b/modules/proxy/mod_proxy_hcheck.c +@@ -762,10 +762,8 @@ static apr_status_t hc_check_http(baton_t *baton) + } + + r = create_request_rec(ptemp, ctx->s, baton->balancer, wctx->method); +- if (!backend->connection) { +- if ((status = ap_proxy_connection_create_ex("HCOH", backend, r)) != OK) { +- return backend_cleanup("HCOH", backend, ctx->s, status); +- } ++ if ((status = ap_proxy_connection_create_ex("HCOH", backend, r)) != OK) { ++ return backend_cleanup("HCOH", backend, ctx->s, status); + } + set_request_connection(r, backend->connection); + +diff --git a/modules/proxy/mod_proxy_http.c b/modules/proxy/mod_proxy_http.c +index 56af9a8..f007ad6 100644 +--- a/modules/proxy/mod_proxy_http.c ++++ b/modules/proxy/mod_proxy_http.c +@@ -216,8 +216,12 @@ static void add_cl(apr_pool_t *p, + APR_BRIGADE_INSERT_TAIL(header_brigade, e); + } + +-#define ASCII_CRLF "\015\012" +-#define ASCII_ZERO "\060" ++#ifndef CRLF_ASCII ++#define CRLF_ASCII "\015\012" ++#endif ++#ifndef ZERO_ASCII ++#define ZERO_ASCII "\060" ++#endif + + static void terminate_headers(apr_bucket_alloc_t *bucket_alloc, + apr_bucket_brigade *header_brigade) +@@ -225,304 +229,228 @@ static void terminate_headers(apr_bucket_alloc_t *bucket_alloc, + apr_bucket *e; + + /* add empty line at the end of the headers */ +- e = apr_bucket_immortal_create(ASCII_CRLF, 2, bucket_alloc); ++ e = apr_bucket_immortal_create(CRLF_ASCII, 2, bucket_alloc); + APR_BRIGADE_INSERT_TAIL(header_brigade, e); + } + + + #define MAX_MEM_SPOOL 16384 + +-static int stream_reqbody_chunked(apr_pool_t *p, +- request_rec *r, +- proxy_conn_rec *p_conn, +- conn_rec *origin, +- apr_bucket_brigade *header_brigade, +- apr_bucket_brigade *input_brigade) +-{ +- int seen_eos = 0, rv = OK; +- apr_size_t hdr_len; +- apr_off_t bytes; +- apr_status_t status; +- apr_bucket_alloc_t *bucket_alloc = r->connection->bucket_alloc; +- apr_bucket_brigade *bb; +- apr_bucket *e; +- +- add_te_chunked(p, bucket_alloc, header_brigade); +- terminate_headers(bucket_alloc, header_brigade); ++typedef enum { ++ RB_INIT = 0, ++ RB_STREAM_CL, ++ RB_STREAM_CHUNKED, ++ RB_SPOOL_CL ++} rb_methods; + +- while (!APR_BUCKET_IS_EOS(APR_BRIGADE_FIRST(input_brigade))) +- { +- char chunk_hdr[20]; /* must be here due to transient bucket. */ ++typedef struct { ++ apr_pool_t *p; ++ request_rec *r; ++ proxy_worker *worker; ++ proxy_server_conf *sconf; + +- /* If this brigade contains EOS, either stop or remove it. */ +- if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { +- seen_eos = 1; +- +- /* We can't pass this EOS to the output_filters. */ +- e = APR_BRIGADE_LAST(input_brigade); +- apr_bucket_delete(e); +- } +- +- apr_brigade_length(input_brigade, 1, &bytes); ++ char server_portstr[32]; ++ proxy_conn_rec *backend; ++ conn_rec *origin; + +- hdr_len = apr_snprintf(chunk_hdr, sizeof(chunk_hdr), +- "%" APR_UINT64_T_HEX_FMT CRLF, +- (apr_uint64_t)bytes); ++ apr_bucket_alloc_t *bucket_alloc; ++ apr_bucket_brigade *header_brigade; ++ apr_bucket_brigade *input_brigade; ++ char *old_cl_val, *old_te_val; ++ apr_off_t cl_val; + +- ap_xlate_proto_to_ascii(chunk_hdr, hdr_len); +- e = apr_bucket_transient_create(chunk_hdr, hdr_len, +- bucket_alloc); +- APR_BRIGADE_INSERT_HEAD(input_brigade, e); ++ rb_methods rb_method; + +- /* +- * Append the end-of-chunk CRLF +- */ +- e = apr_bucket_immortal_create(ASCII_CRLF, 2, bucket_alloc); +- APR_BRIGADE_INSERT_TAIL(input_brigade, e); ++ int expecting_100; ++ unsigned int do_100_continue:1, ++ prefetch_nonblocking:1; ++} proxy_http_req_t; + +- if (header_brigade) { +- /* we never sent the header brigade, so go ahead and +- * take care of that now +- */ +- bb = header_brigade; +- +- /* +- * Save input_brigade in bb brigade. (At least) in the SSL case +- * input_brigade contains transient buckets whose data would get +- * overwritten during the next call of ap_get_brigade in the loop. +- * ap_save_brigade ensures these buckets to be set aside. +- * Calling ap_save_brigade with NULL as filter is OK, because +- * bb brigade already has been created and does not need to get +- * created by ap_save_brigade. +- */ +- status = ap_save_brigade(NULL, &bb, &input_brigade, p); +- if (status != APR_SUCCESS) { +- return HTTP_INTERNAL_SERVER_ERROR; +- } +- +- header_brigade = NULL; +- } +- else { +- bb = input_brigade; ++/* Read what's in the client pipe. If nonblocking is set and read is EAGAIN, ++ * pass a FLUSH bucket to the backend and read again in blocking mode. ++ */ ++static int stream_reqbody_read(proxy_http_req_t *req, apr_bucket_brigade *bb, ++ int nonblocking) ++{ ++ request_rec *r = req->r; ++ proxy_conn_rec *p_conn = req->backend; ++ apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc; ++ apr_read_type_e block = nonblocking ? APR_NONBLOCK_READ : APR_BLOCK_READ; ++ apr_status_t status; ++ int rv; ++ ++ for (;;) { ++ status = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES, ++ block, HUGE_STRING_LEN); ++ if (block == APR_BLOCK_READ ++ || (!APR_STATUS_IS_EAGAIN(status) ++ && (status != APR_SUCCESS || !APR_BRIGADE_EMPTY(bb)))) { ++ break; + } + +- /* The request is flushed below this loop with chunk EOS header */ +- rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin, bb, 0); ++ /* Flush and retry (blocking) */ ++ apr_brigade_cleanup(bb); ++ rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, req->origin, bb, 1); + if (rv != OK) { + return rv; + } +- +- if (seen_eos) { +- break; +- } +- +- status = ap_get_brigade(r->input_filters, input_brigade, +- AP_MODE_READBYTES, APR_BLOCK_READ, +- HUGE_STRING_LEN); +- +- if (status != APR_SUCCESS) { +- conn_rec *c = r->connection; +- ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02608) +- "read request body failed to %pI (%s)" +- " from %s (%s)", p_conn->addr, +- p_conn->hostname ? p_conn->hostname: "", +- c->client_ip, c->remote_host ? c->remote_host: ""); +- return ap_map_http_request_error(status, HTTP_BAD_REQUEST); +- } ++ block = APR_BLOCK_READ; + } + +- if (header_brigade) { +- /* we never sent the header brigade because there was no request body; +- * send it now +- */ +- bb = header_brigade; +- } +- else { +- if (!APR_BRIGADE_EMPTY(input_brigade)) { +- /* input brigade still has an EOS which we can't pass to the output_filters. */ +- e = APR_BRIGADE_LAST(input_brigade); +- AP_DEBUG_ASSERT(APR_BUCKET_IS_EOS(e)); +- apr_bucket_delete(e); +- } +- bb = input_brigade; +- } +- +- e = apr_bucket_immortal_create(ASCII_ZERO ASCII_CRLF +- /* */ +- ASCII_CRLF, +- 5, bucket_alloc); +- APR_BRIGADE_INSERT_TAIL(bb, e); +- +- if (apr_table_get(r->subprocess_env, "proxy-sendextracrlf")) { +- e = apr_bucket_immortal_create(ASCII_CRLF, 2, bucket_alloc); +- APR_BRIGADE_INSERT_TAIL(bb, e); ++ if (status != APR_SUCCESS) { ++ conn_rec *c = r->connection; ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02608) ++ "read request body failed to %pI (%s)" ++ " from %s (%s)", p_conn->addr, ++ p_conn->hostname ? p_conn->hostname: "", ++ c->client_ip, c->remote_host ? c->remote_host: ""); ++ return ap_map_http_request_error(status, HTTP_BAD_REQUEST); + } + +- /* Now we have headers-only, or the chunk EOS mark; flush it */ +- rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin, bb, 1); +- return rv; ++ return OK; + } + +-static int stream_reqbody_cl(apr_pool_t *p, +- request_rec *r, +- proxy_conn_rec *p_conn, +- conn_rec *origin, +- apr_bucket_brigade *header_brigade, +- apr_bucket_brigade *input_brigade, +- char *old_cl_val) ++static int stream_reqbody(proxy_http_req_t *req, rb_methods rb_method) + { +- int seen_eos = 0, rv = 0; +- apr_status_t status = APR_SUCCESS; +- apr_bucket_alloc_t *bucket_alloc = r->connection->bucket_alloc; +- apr_bucket_brigade *bb; ++ request_rec *r = req->r; ++ int seen_eos = 0, rv = OK; ++ apr_size_t hdr_len; ++ char chunk_hdr[20]; /* must be here due to transient bucket. */ ++ proxy_conn_rec *p_conn = req->backend; ++ apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc; ++ apr_bucket_brigade *header_brigade = req->header_brigade; ++ apr_bucket_brigade *input_brigade = req->input_brigade; ++ apr_off_t bytes, bytes_streamed = 0; + apr_bucket *e; +- apr_off_t cl_val = 0; +- apr_off_t bytes; +- apr_off_t bytes_streamed = 0; +- +- if (old_cl_val) { +- char *endstr; + +- add_cl(p, bucket_alloc, header_brigade, old_cl_val); +- status = apr_strtoff(&cl_val, old_cl_val, &endstr, 10); +- +- if (status || *endstr || endstr == old_cl_val || cl_val < 0) { +- ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01085) +- "could not parse request Content-Length (%s)", +- old_cl_val); +- return HTTP_BAD_REQUEST; ++ do { ++ if (APR_BRIGADE_EMPTY(input_brigade) ++ && APR_BRIGADE_EMPTY(header_brigade)) { ++ rv = stream_reqbody_read(req, input_brigade, 1); ++ if (rv != OK) { ++ return rv; ++ } + } +- } +- terminate_headers(bucket_alloc, header_brigade); +- +- while (!APR_BUCKET_IS_EOS(APR_BRIGADE_FIRST(input_brigade))) +- { +- apr_brigade_length(input_brigade, 1, &bytes); +- bytes_streamed += bytes; +- +- /* If this brigade contains EOS, either stop or remove it. */ +- if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { +- seen_eos = 1; + +- /* We can't pass this EOS to the output_filters. */ +- e = APR_BRIGADE_LAST(input_brigade); +- apr_bucket_delete(e); ++ if (!APR_BRIGADE_EMPTY(input_brigade)) { ++ /* If this brigade contains EOS, either stop or remove it. */ ++ if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { ++ seen_eos = 1; + +- if (apr_table_get(r->subprocess_env, "proxy-sendextracrlf")) { +- e = apr_bucket_immortal_create(ASCII_CRLF, 2, bucket_alloc); +- APR_BRIGADE_INSERT_TAIL(input_brigade, e); ++ /* We can't pass this EOS to the output_filters. */ ++ e = APR_BRIGADE_LAST(input_brigade); ++ apr_bucket_delete(e); + } +- } + +- /* C-L < bytes streamed?!? +- * We will error out after the body is completely +- * consumed, but we can't stream more bytes at the +- * back end since they would in part be interpreted +- * as another request! If nothing is sent, then +- * just send nothing. +- * +- * Prevents HTTP Response Splitting. +- */ +- if (bytes_streamed > cl_val) { +- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01086) +- "read more bytes of request body than expected " +- "(got %" APR_OFF_T_FMT ", expected %" APR_OFF_T_FMT ")", +- bytes_streamed, cl_val); +- return HTTP_INTERNAL_SERVER_ERROR; +- } +- +- if (header_brigade) { +- /* we never sent the header brigade, so go ahead and +- * take care of that now +- */ +- bb = header_brigade; ++ apr_brigade_length(input_brigade, 1, &bytes); ++ bytes_streamed += bytes; + +- /* +- * Save input_brigade in bb brigade. (At least) in the SSL case +- * input_brigade contains transient buckets whose data would get +- * overwritten during the next call of ap_get_brigade in the loop. +- * ap_save_brigade ensures these buckets to be set aside. +- * Calling ap_save_brigade with NULL as filter is OK, because +- * bb brigade already has been created and does not need to get +- * created by ap_save_brigade. +- */ +- status = ap_save_brigade(NULL, &bb, &input_brigade, p); +- if (status != APR_SUCCESS) { ++ if (rb_method == RB_STREAM_CHUNKED) { ++ if (bytes) { ++ /* ++ * Prepend the size of the chunk ++ */ ++ hdr_len = apr_snprintf(chunk_hdr, sizeof(chunk_hdr), ++ "%" APR_UINT64_T_HEX_FMT CRLF, ++ (apr_uint64_t)bytes); ++ ap_xlate_proto_to_ascii(chunk_hdr, hdr_len); ++ e = apr_bucket_transient_create(chunk_hdr, hdr_len, ++ bucket_alloc); ++ APR_BRIGADE_INSERT_HEAD(input_brigade, e); ++ ++ /* ++ * Append the end-of-chunk CRLF ++ */ ++ e = apr_bucket_immortal_create(CRLF_ASCII, 2, bucket_alloc); ++ APR_BRIGADE_INSERT_TAIL(input_brigade, e); ++ } ++ if (seen_eos) { ++ /* ++ * Append the tailing 0-size chunk ++ */ ++ e = apr_bucket_immortal_create(ZERO_ASCII CRLF_ASCII ++ /* */ ++ CRLF_ASCII, ++ 5, bucket_alloc); ++ APR_BRIGADE_INSERT_TAIL(input_brigade, e); ++ } ++ } ++ else if (bytes_streamed > req->cl_val) { ++ /* C-L < bytes streamed?!? ++ * We will error out after the body is completely ++ * consumed, but we can't stream more bytes at the ++ * back end since they would in part be interpreted ++ * as another request! If nothing is sent, then ++ * just send nothing. ++ * ++ * Prevents HTTP Response Splitting. ++ */ ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01086) ++ "read more bytes of request body than expected " ++ "(got %" APR_OFF_T_FMT ", expected " ++ "%" APR_OFF_T_FMT ")", ++ bytes_streamed, req->cl_val); + return HTTP_INTERNAL_SERVER_ERROR; + } + +- header_brigade = NULL; +- } +- else { +- bb = input_brigade; +- } +- +- /* Once we hit EOS, we are ready to flush. */ +- rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin, bb, seen_eos); +- if (rv != OK) { +- return rv ; +- } +- +- if (seen_eos) { +- break; ++ if (seen_eos && apr_table_get(r->subprocess_env, ++ "proxy-sendextracrlf")) { ++ e = apr_bucket_immortal_create(CRLF_ASCII, 2, bucket_alloc); ++ APR_BRIGADE_INSERT_TAIL(input_brigade, e); ++ } + } + +- status = ap_get_brigade(r->input_filters, input_brigade, +- AP_MODE_READBYTES, APR_BLOCK_READ, +- HUGE_STRING_LEN); ++ /* If we never sent the header brigade, go ahead and take care of ++ * that now by prepending it (once only since header_brigade will be ++ * empty afterward). ++ */ ++ APR_BRIGADE_PREPEND(input_brigade, header_brigade); + +- if (status != APR_SUCCESS) { +- conn_rec *c = r->connection; +- ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02609) +- "read request body failed to %pI (%s)" +- " from %s (%s)", p_conn->addr, +- p_conn->hostname ? p_conn->hostname: "", +- c->client_ip, c->remote_host ? c->remote_host: ""); +- return ap_map_http_request_error(status, HTTP_BAD_REQUEST); ++ /* Flush here on EOS because we won't stream_reqbody_read() again */ ++ rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, req->origin, ++ input_brigade, seen_eos); ++ if (rv != OK) { ++ return rv; + } +- } ++ } while (!seen_eos); + +- if (bytes_streamed != cl_val) { ++ if (rb_method == RB_STREAM_CL && bytes_streamed != req->cl_val) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01087) + "client %s given Content-Length did not match" + " number of body bytes read", r->connection->client_ip); + return HTTP_BAD_REQUEST; + } + +- if (header_brigade) { +- /* we never sent the header brigade since there was no request +- * body; send it now with the flush flag +- */ +- bb = header_brigade; +- return(ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin, bb, 1)); +- } +- + return OK; + } + +-static int spool_reqbody_cl(apr_pool_t *p, +- request_rec *r, +- proxy_conn_rec *p_conn, +- conn_rec *origin, +- apr_bucket_brigade *header_brigade, +- apr_bucket_brigade *input_brigade, +- int force_cl) ++static int spool_reqbody_cl(proxy_http_req_t *req, apr_off_t *bytes_spooled) + { +- int seen_eos = 0; +- apr_status_t status; +- apr_bucket_alloc_t *bucket_alloc = r->connection->bucket_alloc; ++ apr_pool_t *p = req->p; ++ request_rec *r = req->r; ++ int seen_eos = 0, rv = OK; ++ apr_status_t status = APR_SUCCESS; ++ apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc; ++ apr_bucket_brigade *input_brigade = req->input_brigade; + apr_bucket_brigade *body_brigade; + apr_bucket *e; +- apr_off_t bytes, bytes_spooled = 0, fsize = 0; ++ apr_off_t bytes, fsize = 0; + apr_file_t *tmpfile = NULL; + apr_off_t limit; + + body_brigade = apr_brigade_create(p, bucket_alloc); ++ *bytes_spooled = 0; + + limit = ap_get_limit_req_body(r); + +- while (!APR_BUCKET_IS_EOS(APR_BRIGADE_FIRST(input_brigade))) +- { ++ do { ++ if (APR_BRIGADE_EMPTY(input_brigade)) { ++ rv = stream_reqbody_read(req, input_brigade, 0); ++ if (rv != OK) { ++ return rv; ++ } ++ } ++ + /* If this brigade contains EOS, either stop or remove it. */ + if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { + seen_eos = 1; +@@ -534,13 +462,13 @@ static int spool_reqbody_cl(apr_pool_t *p, + + apr_brigade_length(input_brigade, 1, &bytes); + +- if (bytes_spooled + bytes > MAX_MEM_SPOOL) { ++ if (*bytes_spooled + bytes > MAX_MEM_SPOOL) { + /* + * LimitRequestBody does not affect Proxy requests (Should it?). + * Let it take effect if we decide to store the body in a + * temporary file on disk. + */ +- if (limit && (bytes_spooled + bytes > limit)) { ++ if (limit && (*bytes_spooled + bytes > limit)) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01088) + "Request body is larger than the configured " + "limit of %" APR_OFF_T_FMT, limit); +@@ -610,69 +538,42 @@ static int spool_reqbody_cl(apr_pool_t *p, + + } + +- bytes_spooled += bytes; +- +- if (seen_eos) { +- break; +- } +- +- status = ap_get_brigade(r->input_filters, input_brigade, +- AP_MODE_READBYTES, APR_BLOCK_READ, +- HUGE_STRING_LEN); +- +- if (status != APR_SUCCESS) { +- conn_rec *c = r->connection; +- ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02610) +- "read request body failed to %pI (%s)" +- " from %s (%s)", p_conn->addr, +- p_conn->hostname ? p_conn->hostname: "", +- c->client_ip, c->remote_host ? c->remote_host: ""); +- return ap_map_http_request_error(status, HTTP_BAD_REQUEST); +- } +- } ++ *bytes_spooled += bytes; ++ } while (!seen_eos); + +- if (bytes_spooled || force_cl) { +- add_cl(p, bucket_alloc, header_brigade, apr_off_t_toa(p, bytes_spooled)); +- } +- terminate_headers(bucket_alloc, header_brigade); +- APR_BRIGADE_CONCAT(header_brigade, body_brigade); ++ APR_BRIGADE_CONCAT(input_brigade, body_brigade); + if (tmpfile) { +- apr_brigade_insert_file(header_brigade, tmpfile, 0, fsize, p); ++ apr_brigade_insert_file(input_brigade, tmpfile, 0, fsize, p); + } + if (apr_table_get(r->subprocess_env, "proxy-sendextracrlf")) { +- e = apr_bucket_immortal_create(ASCII_CRLF, 2, bucket_alloc); +- APR_BRIGADE_INSERT_TAIL(header_brigade, e); ++ e = apr_bucket_immortal_create(CRLF_ASCII, 2, bucket_alloc); ++ APR_BRIGADE_INSERT_TAIL(input_brigade, e); + } +- /* This is all a single brigade, pass with flush flagged */ +- return(ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin, header_brigade, 1)); ++ return OK; + } + +-static +-int ap_proxy_http_request(apr_pool_t *p, request_rec *r, +- proxy_conn_rec *p_conn, proxy_worker *worker, +- proxy_server_conf *conf, +- apr_uri_t *uri, +- char *url, char *server_portstr) ++static int ap_proxy_http_prefetch(proxy_http_req_t *req, ++ apr_uri_t *uri, char *url) + { ++ apr_pool_t *p = req->p; ++ request_rec *r = req->r; + conn_rec *c = r->connection; +- apr_bucket_alloc_t *bucket_alloc = c->bucket_alloc; +- apr_bucket_brigade *header_brigade; +- apr_bucket_brigade *input_brigade; ++ proxy_conn_rec *p_conn = req->backend; ++ apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc; ++ apr_bucket_brigade *header_brigade = req->header_brigade; ++ apr_bucket_brigade *input_brigade = req->input_brigade; + apr_bucket_brigade *temp_brigade; + apr_bucket *e; + char *buf; + apr_status_t status; +- enum rb_methods {RB_INIT, RB_STREAM_CL, RB_STREAM_CHUNKED, RB_SPOOL_CL}; +- enum rb_methods rb_method = RB_INIT; +- char *old_cl_val = NULL; +- char *old_te_val = NULL; + apr_off_t bytes_read = 0; + apr_off_t bytes; + int force10, rv; ++ apr_read_type_e block; + conn_rec *origin = p_conn->connection; + + if (apr_table_get(r->subprocess_env, "force-proxy-request-1.0")) { +- if (r->expecting_100) { ++ if (req->expecting_100) { + return HTTP_EXPECTATION_FAILED; + } + force10 = 1; +@@ -680,17 +581,14 @@ int ap_proxy_http_request(apr_pool_t *p, request_rec *r, + force10 = 0; + } + +- header_brigade = apr_brigade_create(p, bucket_alloc); + rv = ap_proxy_create_hdrbrgd(p, header_brigade, r, p_conn, +- worker, conf, uri, url, server_portstr, +- &old_cl_val, &old_te_val); ++ req->worker, req->sconf, ++ uri, url, req->server_portstr, ++ &req->old_cl_val, &req->old_te_val); + if (rv != OK) { + return rv; + } + +- /* We have headers, let's figure out our request body... */ +- input_brigade = apr_brigade_create(p, bucket_alloc); +- + /* sub-requests never use keepalives, and mustn't pass request bodies. + * Because the new logic looks at input_brigade, we will self-terminate + * input_brigade and jump past all of the request body logic... +@@ -703,9 +601,9 @@ int ap_proxy_http_request(apr_pool_t *p, request_rec *r, + if (!r->kept_body && r->main) { + /* XXX: Why DON'T sub-requests use keepalives? */ + p_conn->close = 1; +- old_cl_val = NULL; +- old_te_val = NULL; +- rb_method = RB_STREAM_CL; ++ req->old_te_val = NULL; ++ req->old_cl_val = NULL; ++ req->rb_method = RB_STREAM_CL; + e = apr_bucket_eos_create(input_brigade->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(input_brigade, e); + goto skip_body; +@@ -719,18 +617,19 @@ int ap_proxy_http_request(apr_pool_t *p, request_rec *r, + * encoding has been done by the extensions' handler, and + * do not modify add_te_chunked's logic + */ +- if (old_te_val && strcasecmp(old_te_val, "chunked") != 0) { ++ if (req->old_te_val && ap_cstr_casecmp(req->old_te_val, "chunked") != 0) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01093) +- "%s Transfer-Encoding is not supported", old_te_val); ++ "%s Transfer-Encoding is not supported", ++ req->old_te_val); + return HTTP_INTERNAL_SERVER_ERROR; + } + +- if (old_cl_val && old_te_val) { ++ if (req->old_cl_val && req->old_te_val) { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01094) + "client %s (%s) requested Transfer-Encoding " + "chunked body with Content-Length (C-L ignored)", + c->client_ip, c->remote_host ? c->remote_host: ""); +- old_cl_val = NULL; ++ req->old_cl_val = NULL; + origin->keepalive = AP_CONN_CLOSE; + p_conn->close = 1; + } +@@ -744,10 +643,19 @@ int ap_proxy_http_request(apr_pool_t *p, request_rec *r, + * reasonable size. + */ + temp_brigade = apr_brigade_create(p, bucket_alloc); ++ block = req->prefetch_nonblocking ? APR_NONBLOCK_READ : APR_BLOCK_READ; + do { + status = ap_get_brigade(r->input_filters, temp_brigade, +- AP_MODE_READBYTES, APR_BLOCK_READ, ++ AP_MODE_READBYTES, block, + MAX_MEM_SPOOL - bytes_read); ++ /* ap_get_brigade may return success with an empty brigade ++ * for a non-blocking read which would block ++ */ ++ if (block == APR_NONBLOCK_READ ++ && ((status == APR_SUCCESS && APR_BRIGADE_EMPTY(temp_brigade)) ++ || APR_STATUS_IS_EAGAIN(status))) { ++ break; ++ } + if (status != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01095) + "prefetch request body failed to %pI (%s)" +@@ -785,7 +693,8 @@ int ap_proxy_http_request(apr_pool_t *p, request_rec *r, + * (an arbitrary value.) + */ + } while ((bytes_read < MAX_MEM_SPOOL - 80) +- && !APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))); ++ && !APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade)) ++ && !req->prefetch_nonblocking); + + /* Use chunked request body encoding or send a content-length body? + * +@@ -822,7 +731,8 @@ int ap_proxy_http_request(apr_pool_t *p, request_rec *r, + * is absent, and the filters are unchanged (the body won't + * be resized by another content filter). + */ +- if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { ++ if (!APR_BRIGADE_EMPTY(input_brigade) ++ && APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { + /* The whole thing fit, so our decision is trivial, use + * the filtered bytes read from the client for the request + * body Content-Length. +@@ -830,34 +740,43 @@ int ap_proxy_http_request(apr_pool_t *p, request_rec *r, + * If we expected no body, and read no body, do not set + * the Content-Length. + */ +- if (old_cl_val || old_te_val || bytes_read) { +- old_cl_val = apr_off_t_toa(r->pool, bytes_read); ++ if (req->old_cl_val || req->old_te_val || bytes_read) { ++ req->old_cl_val = apr_off_t_toa(r->pool, bytes_read); ++ req->cl_val = bytes_read; + } +- rb_method = RB_STREAM_CL; ++ req->rb_method = RB_STREAM_CL; + } +- else if (old_te_val) { ++ else if (req->old_te_val) { + if (force10 + || (apr_table_get(r->subprocess_env, "proxy-sendcl") + && !apr_table_get(r->subprocess_env, "proxy-sendchunks") + && !apr_table_get(r->subprocess_env, "proxy-sendchunked"))) { +- rb_method = RB_SPOOL_CL; ++ req->rb_method = RB_SPOOL_CL; + } + else { +- rb_method = RB_STREAM_CHUNKED; ++ req->rb_method = RB_STREAM_CHUNKED; + } + } +- else if (old_cl_val) { ++ else if (req->old_cl_val) { + if (r->input_filters == r->proto_input_filters) { +- rb_method = RB_STREAM_CL; ++ char *endstr; ++ status = apr_strtoff(&req->cl_val, req->old_cl_val, &endstr, 10); ++ if (status != APR_SUCCESS || *endstr || req->cl_val < 0) { ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01085) ++ "could not parse request Content-Length (%s)", ++ req->old_cl_val); ++ return HTTP_BAD_REQUEST; ++ } ++ req->rb_method = RB_STREAM_CL; + } + else if (!force10 + && (apr_table_get(r->subprocess_env, "proxy-sendchunks") + || apr_table_get(r->subprocess_env, "proxy-sendchunked")) + && !apr_table_get(r->subprocess_env, "proxy-sendcl")) { +- rb_method = RB_STREAM_CHUNKED; ++ req->rb_method = RB_STREAM_CHUNKED; + } + else { +- rb_method = RB_SPOOL_CL; ++ req->rb_method = RB_SPOOL_CL; + } + } + else { +@@ -865,7 +784,31 @@ int ap_proxy_http_request(apr_pool_t *p, request_rec *r, + * requests, and has the behavior that it will not add any C-L + * when the old_cl_val is NULL. + */ +- rb_method = RB_SPOOL_CL; ++ req->rb_method = RB_SPOOL_CL; ++ } ++ ++ switch (req->rb_method) { ++ case RB_STREAM_CHUNKED: ++ add_te_chunked(req->p, bucket_alloc, header_brigade); ++ break; ++ ++ case RB_STREAM_CL: ++ if (req->old_cl_val) { ++ add_cl(req->p, bucket_alloc, header_brigade, req->old_cl_val); ++ } ++ break; ++ ++ default: /* => RB_SPOOL_CL */ ++ /* If we have to spool the body, do it now, before connecting or ++ * reusing the backend connection. ++ */ ++ rv = spool_reqbody_cl(req, &bytes); ++ if (rv != OK) { ++ return rv; ++ } ++ if (bytes || req->old_te_val || req->old_cl_val) { ++ add_cl(p, bucket_alloc, header_brigade, apr_off_t_toa(p, bytes)); ++ } + } + + /* Yes I hate gotos. This is the subrequest shortcut */ +@@ -886,23 +829,44 @@ skip_body: + e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(header_brigade, e); + } ++ terminate_headers(bucket_alloc, header_brigade); + +- /* send the request body, if any. */ +- switch(rb_method) { +- case RB_STREAM_CHUNKED: +- rv = stream_reqbody_chunked(p, r, p_conn, origin, header_brigade, +- input_brigade); +- break; ++ return OK; ++} ++ ++static int ap_proxy_http_request(proxy_http_req_t *req) ++{ ++ int rv; ++ request_rec *r = req->r; ++ apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc; ++ apr_bucket_brigade *header_brigade = req->header_brigade; ++ apr_bucket_brigade *input_brigade = req->input_brigade; ++ ++ /* send the request header/body, if any. */ ++ switch (req->rb_method) { + case RB_STREAM_CL: +- rv = stream_reqbody_cl(p, r, p_conn, origin, header_brigade, +- input_brigade, old_cl_val); ++ case RB_STREAM_CHUNKED: ++ if (req->do_100_continue) { ++ rv = ap_proxy_pass_brigade(bucket_alloc, r, req->backend, ++ req->origin, header_brigade, 1); ++ } ++ else { ++ rv = stream_reqbody(req, req->rb_method); ++ } + break; ++ + case RB_SPOOL_CL: +- rv = spool_reqbody_cl(p, r, p_conn, origin, header_brigade, +- input_brigade, (old_cl_val != NULL) +- || (old_te_val != NULL) +- || (bytes_read > 0)); ++ /* Prefetch has built the header and spooled the whole body; ++ * if we don't expect 100-continue we can flush both all at once, ++ * otherwise flush the header only. ++ */ ++ if (!req->do_100_continue) { ++ APR_BRIGADE_CONCAT(header_brigade, input_brigade); ++ } ++ rv = ap_proxy_pass_brigade(bucket_alloc, r, req->backend, ++ req->origin, header_brigade, 1); + break; ++ + default: + /* shouldn't be possible */ + rv = HTTP_INTERNAL_SERVER_ERROR; +@@ -910,10 +874,12 @@ skip_body: + } + + if (rv != OK) { ++ conn_rec *c = r->connection; + /* apr_status_t value has been logged in lower level method */ + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01097) + "pass request body failed to %pI (%s) from %s (%s)", +- p_conn->addr, p_conn->hostname ? p_conn->hostname: "", ++ req->backend->addr, ++ req->backend->hostname ? req->backend->hostname: "", + c->client_ip, c->remote_host ? c->remote_host: ""); + return rv; + } +@@ -1189,12 +1155,16 @@ static int add_trailers(void *data, const char *key, const char *val) + } + + static +-apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, +- proxy_conn_rec **backend_ptr, +- proxy_worker *worker, +- proxy_server_conf *conf, +- char *server_portstr) { ++int ap_proxy_http_process_response(proxy_http_req_t *req) ++{ ++ apr_pool_t *p = req->p; ++ request_rec *r = req->r; + conn_rec *c = r->connection; ++ proxy_worker *worker = req->worker; ++ proxy_conn_rec *backend = req->backend; ++ conn_rec *origin = req->origin; ++ int do_100_continue = req->do_100_continue; ++ + char *buffer; + char fixed_buffer[HUGE_STRING_LEN]; + const char *buf; +@@ -1217,19 +1187,11 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, + int proxy_status = OK; + const char *original_status_line = r->status_line; + const char *proxy_status_line = NULL; +- proxy_conn_rec *backend = *backend_ptr; +- conn_rec *origin = backend->connection; + apr_interval_time_t old_timeout = 0; + proxy_dir_conf *dconf; +- int do_100_continue; + + dconf = ap_get_module_config(r->per_dir_config, &proxy_module); + +- do_100_continue = (worker->s->ping_timeout_set +- && ap_request_has_body(r) +- && (PROXYREQ_REVERSE == r->proxyreq) +- && !(apr_table_get(r->subprocess_env, "force-proxy-request-1.0"))); +- + bb = apr_brigade_create(p, c->bucket_alloc); + pass_bb = apr_brigade_create(p, c->bucket_alloc); + +@@ -1248,7 +1210,7 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, + } + + /* Setup for 100-Continue timeout if appropriate */ +- if (do_100_continue) { ++ if (do_100_continue && worker->s->ping_timeout_set) { + apr_socket_timeout_get(backend->sock, &old_timeout); + if (worker->s->ping_timeout != old_timeout) { + apr_status_t rc; +@@ -1273,6 +1235,8 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, + origin->local_addr->port)); + do { + apr_status_t rc; ++ int major = 0, minor = 0; ++ int toclose = 0; + + apr_brigade_cleanup(bb); + +@@ -1360,9 +1324,6 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, + * This is buggy if we ever see an HTTP/1.10 + */ + if (apr_date_checkmask(buffer, "HTTP/#.# ###*")) { +- int major, minor; +- int toclose; +- + major = buffer[5] - '0'; + minor = buffer[7] - '0'; + +@@ -1412,8 +1373,8 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, + "Set-Cookie", NULL); + + /* shove the headers direct into r->headers_out */ +- ap_proxy_read_headers(r, backend->r, buffer, response_field_size, origin, +- &pread_len); ++ ap_proxy_read_headers(r, backend->r, buffer, response_field_size, ++ origin, &pread_len); + + if (r->headers_out == NULL) { + ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01106) +@@ -1491,7 +1452,8 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, + r->headers_out = ap_proxy_clean_warnings(p, r->headers_out); + + /* handle Via header in response */ +- if (conf->viaopt != via_off && conf->viaopt != via_block) { ++ if (req->sconf->viaopt != via_off ++ && req->sconf->viaopt != via_block) { + const char *server_name = ap_get_server_name(r); + /* If USE_CANONICAL_NAME_OFF was configured for the proxy virtual host, + * then the server name returned by ap_get_server_name() is the +@@ -1502,18 +1464,18 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, + server_name = r->server->server_hostname; + /* create a "Via:" response header entry and merge it */ + apr_table_addn(r->headers_out, "Via", +- (conf->viaopt == via_full) ++ (req->sconf->viaopt == via_full) + ? apr_psprintf(p, "%d.%d %s%s (%s)", + HTTP_VERSION_MAJOR(r->proto_num), + HTTP_VERSION_MINOR(r->proto_num), + server_name, +- server_portstr, ++ req->server_portstr, + AP_SERVER_BASEVERSION) + : apr_psprintf(p, "%d.%d %s%s", + HTTP_VERSION_MAJOR(r->proto_num), + HTTP_VERSION_MINOR(r->proto_num), + server_name, +- server_portstr) ++ req->server_portstr) + ); + } + +@@ -1531,18 +1493,6 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, + } + + if (ap_is_HTTP_INFO(proxy_status)) { +- interim_response++; +- /* Reset to old timeout iff we've adjusted it */ +- if (do_100_continue +- && (r->status == HTTP_CONTINUE) +- && (worker->s->ping_timeout != old_timeout)) { +- apr_socket_timeout_set(backend->sock, old_timeout); +- } +- } +- else { +- interim_response = 0; +- } +- if (interim_response) { + /* RFC2616 tells us to forward this. + * + * OTOH, an interim response here may mean the backend +@@ -1563,7 +1513,13 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, + ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, + "HTTP: received interim %d response", r->status); + if (!policy +- || (!strcasecmp(policy, "RFC") && ((r->expecting_100 = 1)))) { ++ || (!strcasecmp(policy, "RFC") ++ && (proxy_status != HTTP_CONTINUE ++ || (req->expecting_100 = 1)))) { ++ if (proxy_status == HTTP_CONTINUE) { ++ r->expecting_100 = req->expecting_100; ++ req->expecting_100 = 0; ++ } + ap_send_interim_response(r, 1); + } + /* FIXME: refine this to be able to specify per-response-status +@@ -1573,7 +1529,106 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, + ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01108) + "undefined proxy interim response policy"); + } ++ interim_response++; + } ++ else { ++ interim_response = 0; ++ } ++ ++ /* If we still do 100-continue (end-to-end or ping), either the ++ * current response is the expected "100 Continue" and we are done ++ * with this mode, or this is another interim response and we'll wait ++ * for the next one, or this is a final response and hence the backend ++ * did not honor our expectation. ++ */ ++ if (do_100_continue && (!interim_response ++ || proxy_status == HTTP_CONTINUE)) { ++ /* RFC 7231 - Section 5.1.1 - Expect - Requirement for servers ++ * A server that responds with a final status code before ++ * reading the entire message body SHOULD indicate in that ++ * response whether it intends to close the connection or ++ * continue reading and discarding the request message. ++ * ++ * So, if this response is not an interim 100 Continue, we can ++ * avoid sending the request body if the backend responded with ++ * "Connection: close" or HTTP < 1.1, and either let the core ++ * discard it or the caller try another balancer member with the ++ * same body (given status 503, though not implemented yet). ++ */ ++ int do_send_body = (proxy_status == HTTP_CONTINUE ++ || (!toclose && major > 0 && minor > 0)); ++ ++ /* Reset to old timeout iff we've adjusted it. */ ++ if (worker->s->ping_timeout_set) { ++ apr_socket_timeout_set(backend->sock, old_timeout); ++ } ++ ++ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(10153) ++ "HTTP: %s100 continue sent by %pI (%s): " ++ "%ssending body (response: HTTP/%i.%i %s)", ++ proxy_status != HTTP_CONTINUE ? "no " : "", ++ backend->addr, ++ backend->hostname ? backend->hostname : "", ++ do_send_body ? "" : "not ", ++ major, minor, proxy_status_line); ++ ++ if (do_send_body) { ++ int status; ++ ++ /* Send the request body (fully). */ ++ switch(req->rb_method) { ++ case RB_STREAM_CL: ++ case RB_STREAM_CHUNKED: ++ status = stream_reqbody(req, req->rb_method); ++ break; ++ case RB_SPOOL_CL: ++ /* Prefetch has spooled the whole body, flush it. */ ++ status = ap_proxy_pass_brigade(req->bucket_alloc, r, ++ backend, origin, ++ req->input_brigade, 1); ++ break; ++ default: ++ /* Shouldn't happen */ ++ status = HTTP_INTERNAL_SERVER_ERROR; ++ break; ++ } ++ if (status != OK) { ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, ++ APLOGNO(10154) "pass request body failed " ++ "to %pI (%s) from %s (%s) with status %i", ++ backend->addr, ++ backend->hostname ? backend->hostname : "", ++ c->client_ip, ++ c->remote_host ? c->remote_host : "", ++ status); ++ backend->close = 1; ++ return status; ++ } ++ } ++ else { ++ /* If we don't read the client connection any further, since ++ * there are pending data it should be "Connection: close"d to ++ * prevent reuse. We don't exactly c->keepalive = AP_CONN_CLOSE ++ * here though, because error_override or a potential retry on ++ * another backend could finally read that data and finalize ++ * the request processing, making keep-alive possible. So what ++ * we do is restoring r->expecting_100 for ap_set_keepalive() ++ * to do the right thing according to the final response and ++ * any later update of r->expecting_100. ++ */ ++ r->expecting_100 = req->expecting_100; ++ req->expecting_100 = 0; ++ } ++ ++ /* Once only! */ ++ do_100_continue = 0; ++ } ++ ++ if (interim_response) { ++ /* Already forwarded above, read next response */ ++ continue; ++ } ++ + /* Moved the fixups of Date headers and those affected by + * ProxyPassReverse/etc from here to ap_proxy_read_headers + */ +@@ -1648,7 +1703,6 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, + + /* send body - but only if a body is expected */ + if ((!r->header_only) && /* not HEAD request */ +- !interim_response && /* not any 1xx response */ + (proxy_status != HTTP_NO_CONTENT) && /* not 204 */ + (proxy_status != HTTP_NOT_MODIFIED)) { /* not 304 */ + +@@ -1697,7 +1751,7 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, + + rv = ap_get_brigade(backend->r->input_filters, bb, + AP_MODE_READBYTES, mode, +- conf->io_buffer_size); ++ req->sconf->io_buffer_size); + + /* ap_get_brigade will return success with an empty brigade + * for a non-blocking read which would block: */ +@@ -1789,7 +1843,7 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, + ap_proxy_release_connection(backend->worker->s->scheme, + backend, r->server); + /* Ensure that the backend is not reused */ +- *backend_ptr = NULL; ++ req->backend = NULL; + + } + +@@ -1798,12 +1852,13 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, + || c->aborted) { + /* Ack! Phbtt! Die! User aborted! */ + /* Only close backend if we haven't got all from the +- * backend. Furthermore if *backend_ptr is NULL it is no ++ * backend. Furthermore if req->backend is NULL it is no + * longer safe to fiddle around with backend as it might + * be already in use by another thread. + */ +- if (*backend_ptr) { +- backend->close = 1; /* this causes socket close below */ ++ if (req->backend) { ++ /* this causes socket close below */ ++ req->backend->close = 1; + } + finish = TRUE; + } +@@ -1816,7 +1871,7 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, + } + ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, "end body send"); + } +- else if (!interim_response) { ++ else { + ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, "header only"); + + /* make sure we release the backend connection as soon +@@ -1826,7 +1881,7 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, + */ + ap_proxy_release_connection(backend->worker->s->scheme, + backend, r->server); +- *backend_ptr = NULL; ++ req->backend = NULL; + + /* Pass EOS bucket down the filter chain. */ + e = apr_bucket_eos_create(c->bucket_alloc); +@@ -1880,14 +1935,17 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker, + apr_port_t proxyport) + { + int status; +- char server_portstr[32]; + char *scheme; + const char *proxy_function; + const char *u; ++ proxy_http_req_t *req = NULL; + proxy_conn_rec *backend = NULL; + int is_ssl = 0; + conn_rec *c = r->connection; ++ proxy_dir_conf *dconf; + int retry = 0; ++ char *locurl = url; ++ int toclose = 0; + /* + * Use a shorter-lived pool to reduce memory usage + * and avoid a memory leak +@@ -1928,14 +1986,47 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker, + } + ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "HTTP: serving URL %s", url); + +- + /* create space for state information */ + if ((status = ap_proxy_acquire_connection(proxy_function, &backend, +- worker, r->server)) != OK) +- goto cleanup; ++ worker, r->server)) != OK) { ++ return status; ++ } + + backend->is_ssl = is_ssl; + ++ req = apr_pcalloc(p, sizeof(*req)); ++ req->p = p; ++ req->r = r; ++ req->sconf = conf; ++ req->worker = worker; ++ req->backend = backend; ++ req->bucket_alloc = c->bucket_alloc; ++ req->rb_method = RB_INIT; ++ ++ dconf = ap_get_module_config(r->per_dir_config, &proxy_module); ++ ++ /* Should we handle end-to-end or ping 100-continue? */ ++ if ((r->expecting_100 && dconf->forward_100_continue) ++ || PROXY_DO_100_CONTINUE(worker, r)) { ++ /* We need to reset r->expecting_100 or prefetching will cause ++ * ap_http_filter() to send "100 Continue" response by itself. So ++ * we'll use req->expecting_100 in mod_proxy_http to determine whether ++ * the client should be forwarded "100 continue", and r->expecting_100 ++ * will be restored at the end of the function with the actual value of ++ * req->expecting_100 (i.e. cleared only if mod_proxy_http sent the ++ * "100 Continue" according to its policy). ++ */ ++ req->do_100_continue = req->prefetch_nonblocking = 1; ++ req->expecting_100 = r->expecting_100; ++ r->expecting_100 = 0; ++ } ++ /* Should we block while prefetching the body or try nonblocking and flush ++ * data to the backend ASAP? ++ */ ++ else if (apr_table_get(r->subprocess_env, "proxy-prefetch-nonblocking")) { ++ req->prefetch_nonblocking = 1; ++ } ++ + /* + * In the case that we are handling a reverse proxy connection and this + * is not a request that is coming over an already kept alive connection +@@ -1949,15 +2040,53 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker, + backend->close = 1; + } + ++ /* Step One: Determine Who To Connect To */ ++ if ((status = ap_proxy_determine_connection(p, r, conf, worker, backend, ++ uri, &locurl, proxyname, ++ proxyport, req->server_portstr, ++ sizeof(req->server_portstr)))) ++ goto cleanup; ++ ++ /* Prefetch (nonlocking) the request body so to increase the chance to get ++ * the whole (or enough) body and determine Content-Length vs chunked or ++ * spooled. By doing this before connecting or reusing the backend, we want ++ * to minimize the delay between this connection is considered alive and ++ * the first bytes sent (should the client's link be slow or some input ++ * filter retain the data). This is a best effort to prevent the backend ++ * from closing (from under us) what it thinks is an idle connection, hence ++ * to reduce to the minimum the unavoidable local is_socket_connected() vs ++ * remote keepalive race condition. ++ */ ++ req->input_brigade = apr_brigade_create(p, req->bucket_alloc); ++ req->header_brigade = apr_brigade_create(p, req->bucket_alloc); ++ if ((status = ap_proxy_http_prefetch(req, uri, locurl)) != OK) ++ goto cleanup; ++ ++ /* We need to reset backend->close now, since ap_proxy_http_prefetch() set ++ * it to disable the reuse of the connection *after* this request (no keep- ++ * alive), not to close any reusable connection before this request. However ++ * assure what is expected later by using a local flag and do the right thing ++ * when ap_proxy_connect_backend() below provides the connection to close. ++ */ ++ toclose = backend->close; ++ backend->close = 0; ++ + while (retry < 2) { +- char *locurl = url; ++ if (retry) { ++ char *newurl = url; + +- /* Step One: Determine Who To Connect To */ +- if ((status = ap_proxy_determine_connection(p, r, conf, worker, backend, +- uri, &locurl, proxyname, +- proxyport, server_portstr, +- sizeof(server_portstr))) != OK) +- break; ++ /* Step One (again): (Re)Determine Who To Connect To */ ++ if ((status = ap_proxy_determine_connection(p, r, conf, worker, ++ backend, uri, &newurl, proxyname, proxyport, ++ req->server_portstr, sizeof(req->server_portstr)))) ++ break; ++ ++ /* The code assumes locurl is not changed during the loop, or ++ * ap_proxy_http_prefetch() would have to be called every time, ++ * and header_brigade be changed accordingly... ++ */ ++ AP_DEBUG_ASSERT(strcmp(newurl, locurl) == 0); ++ } + + /* Step Two: Make the Connection */ + if (ap_proxy_check_connection(proxy_function, backend, r->server, 1, +@@ -1972,54 +2101,64 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker, + } + + /* Step Three: Create conn_rec */ +- if (!backend->connection) { +- if ((status = ap_proxy_connection_create_ex(proxy_function, +- backend, r)) != OK) +- break; +- /* +- * On SSL connections set a note on the connection what CN is +- * requested, such that mod_ssl can check if it is requested to do +- * so. +- */ +- if (backend->ssl_hostname) { +- apr_table_setn(backend->connection->notes, +- "proxy-request-hostname", +- backend->ssl_hostname); +- } ++ if ((status = ap_proxy_connection_create_ex(proxy_function, ++ backend, r)) != OK) ++ break; ++ req->origin = backend->connection; ++ ++ /* Don't recycle the connection if prefetch (above) told not to do so */ ++ if (toclose) { ++ backend->close = 1; ++ req->origin->keepalive = AP_CONN_CLOSE; ++ } ++ ++ /* ++ * On SSL connections set a note on the connection what CN is ++ * requested, such that mod_ssl can check if it is requested to do ++ * so. ++ * ++ * https://github.com/apache/httpd/commit/7d272e2628b4ae05f68cdc74b070707250896a34 ++ */ ++ if (backend->ssl_hostname) { ++ apr_table_setn(backend->connection->notes, ++ "proxy-request-hostname", ++ backend->ssl_hostname); + } + + /* Step Four: Send the Request + * On the off-chance that we forced a 100-Continue as a + * kinda HTTP ping test, allow for retries + */ +- if ((status = ap_proxy_http_request(p, r, backend, worker, +- conf, uri, locurl, server_portstr)) != OK) { +- if ((status == HTTP_SERVICE_UNAVAILABLE) && worker->s->ping_timeout_set) { +- backend->close = 1; ++ status = ap_proxy_http_request(req); ++ if (status != OK) { ++ if (req->do_100_continue && status == HTTP_SERVICE_UNAVAILABLE) { + ap_log_rerror(APLOG_MARK, APLOG_INFO, status, r, APLOGNO(01115) + "HTTP: 100-Continue failed to %pI (%s)", + worker->cp->addr, worker->s->hostname_ex); ++ backend->close = 1; + retry++; + continue; +- } else { +- break; +- } ++ } + ++ break; + } + + /* Step Five: Receive the Response... Fall thru to cleanup */ +- status = ap_proxy_http_process_response(p, r, &backend, worker, +- conf, server_portstr); ++ status = ap_proxy_http_process_response(req); + + break; + } + + /* Step Six: Clean Up */ + cleanup: +- if (backend) { ++ if (req->backend) { + if (status != OK) +- backend->close = 1; +- ap_proxy_http_cleanup(proxy_function, r, backend); ++ req->backend->close = 1; ++ ap_proxy_http_cleanup(proxy_function, r, req->backend); ++ } ++ if (req->expecting_100) { ++ /* Restore r->expecting_100 if we didn't touch it */ ++ r->expecting_100 = req->expecting_100; + } + return status; + } +diff --git a/modules/proxy/mod_proxy_uwsgi.c b/modules/proxy/mod_proxy_uwsgi.c +index c5d4f8e..e1253e4 100644 +--- a/modules/proxy/mod_proxy_uwsgi.c ++++ b/modules/proxy/mod_proxy_uwsgi.c +@@ -509,12 +509,11 @@ static int uwsgi_handler(request_rec *r, proxy_worker * worker, + } + + /* Step Three: Create conn_rec */ +- if (!backend->connection) { +- if ((status = ap_proxy_connection_create(UWSGI_SCHEME, backend, +- r->connection, +- r->server)) != OK) +- goto cleanup; +- } ++ if ((status = ap_proxy_connection_create(UWSGI_SCHEME, backend, ++ r->connection, ++ r->server)) != OK) ++ goto cleanup; ++ + + /* Step Four: Process the Request */ + if (((status = ap_setup_client_block(r, REQUEST_CHUNKED_ERROR)) != OK) +diff --git a/modules/proxy/mod_proxy_wstunnel.c b/modules/proxy/mod_proxy_wstunnel.c +index 9dda010..4aadbab 100644 +--- a/modules/proxy/mod_proxy_wstunnel.c ++++ b/modules/proxy/mod_proxy_wstunnel.c +@@ -284,8 +284,8 @@ static int proxy_wstunnel_handler(request_rec *r, proxy_worker *worker, + char server_portstr[32]; + proxy_conn_rec *backend = NULL; + char *scheme; +- int retry; + apr_pool_t *p = r->pool; ++ char *locurl = url; + apr_uri_t *uri; + int is_ssl = 0; + const char *upgrade_method = *worker->s->upgrade ? worker->s->upgrade : "WebSocket"; +@@ -318,59 +318,51 @@ static int proxy_wstunnel_handler(request_rec *r, proxy_worker *worker, + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02451) "serving URL %s", url); + + /* create space for state information */ +- status = ap_proxy_acquire_connection(scheme, &backend, worker, +- r->server); ++ status = ap_proxy_acquire_connection(scheme, &backend, worker, r->server); + if (status != OK) { +- if (backend) { +- backend->close = 1; +- ap_proxy_release_connection(scheme, backend, r->server); +- } +- return status; ++ goto cleanup; + } + + backend->is_ssl = is_ssl; + backend->close = 0; + +- retry = 0; +- while (retry < 2) { +- char *locurl = url; +- /* Step One: Determine Who To Connect To */ +- status = ap_proxy_determine_connection(p, r, conf, worker, backend, +- uri, &locurl, proxyname, proxyport, +- server_portstr, +- sizeof(server_portstr)); +- +- if (status != OK) +- break; +- +- /* Step Two: Make the Connection */ +- if (ap_proxy_connect_backend(scheme, backend, worker, r->server)) { +- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02452) +- "failed to make connection to backend: %s", +- backend->hostname); +- status = HTTP_SERVICE_UNAVAILABLE; +- break; +- } +- +- /* Step Three: Create conn_rec */ +- if (!backend->connection) { +- status = ap_proxy_connection_create_ex(scheme, backend, r); +- if (status != OK) { +- break; +- } +- } +- +- backend->close = 1; /* must be after ap_proxy_determine_connection */ +- ++ /* Step One: Determine Who To Connect To */ ++ status = ap_proxy_determine_connection(p, r, conf, worker, backend, ++ uri, &locurl, proxyname, proxyport, ++ server_portstr, ++ sizeof(server_portstr)); ++ ++ if (status != OK) { ++ goto cleanup; ++ } ++ ++ /* Step Two: Make the Connection */ ++ if (ap_proxy_connect_backend(scheme, backend, worker, r->server)) { ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02452) ++ "failed to make connection to backend: %s", ++ backend->hostname); ++ status = HTTP_SERVICE_UNAVAILABLE; ++ goto cleanup; ++ } + +- /* Step Three: Process the Request */ +- status = proxy_wstunnel_request(p, r, backend, worker, conf, uri, locurl, +- server_portstr); +- break; ++ /* Step Three: Create conn_rec */ ++ /* keep it because of */ ++ /* https://github.com/apache/httpd/commit/313d5ee40f390da1a6ee2c2752864ad3aad0a1c3 */ ++ status = ap_proxy_connection_create_ex(scheme, backend, r); ++ if (status != OK) { ++ goto cleanup; + } ++ ++ /* Step Four: Process the Request */ ++ status = proxy_wstunnel_request(p, r, backend, worker, conf, uri, locurl, ++ server_portstr); + ++cleanup: + /* Do not close the socket */ +- ap_proxy_release_connection(scheme, backend, r->server); ++ if (backend) { ++ backend->close = 1; ++ ap_proxy_release_connection(scheme, backend, r->server); ++ } + return status; + } + +diff --git a/modules/proxy/proxy_util.c b/modules/proxy/proxy_util.c +index 0bbfa59..0759dac 100644 +--- a/modules/proxy/proxy_util.c ++++ b/modules/proxy/proxy_util.c +@@ -3573,10 +3573,7 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, + * To be compliant, we only use 100-Continue for requests with bodies. + * We also make sure we won't be talking HTTP/1.0 as well. + */ +- do_100_continue = (worker->s->ping_timeout_set +- && ap_request_has_body(r) +- && (PROXYREQ_REVERSE == r->proxyreq) +- && !(apr_table_get(r->subprocess_env, "force-proxy-request-1.0"))); ++ do_100_continue = PROXY_DO_100_CONTINUE(worker, r); + + if (apr_table_get(r->subprocess_env, "force-proxy-request-1.0")) { + /* +@@ -3593,7 +3590,9 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, + buf = apr_pstrcat(p, r->method, " ", url, " HTTP/1.1" CRLF, NULL); + } + if (apr_table_get(r->subprocess_env, "proxy-nokeepalive")) { +- origin->keepalive = AP_CONN_CLOSE; ++ if (origin) { ++ origin->keepalive = AP_CONN_CLOSE; ++ } + p_conn->close = 1; + } + ap_xlate_proto_to_ascii(buf, strlen(buf)); +@@ -3685,14 +3684,6 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, + if (do_100_continue) { + const char *val; + +- if (!r->expecting_100) { +- /* Don't forward any "100 Continue" response if the client is +- * not expecting it. +- */ +- apr_table_setn(r->subprocess_env, "proxy-interim-response", +- "Suppress"); +- } +- + /* Add the Expect header if not already there. */ + if (((val = apr_table_get(r->headers_in, "Expect")) == NULL) + || (strcasecmp(val, "100-Continue") != 0 /* fast path */ +diff --git a/server/protocol.c b/server/protocol.c +index 8d90055..8d1fdd2 100644 +--- a/server/protocol.c ++++ b/server/protocol.c +@@ -2188,21 +2188,23 @@ AP_DECLARE(void) ap_send_interim_response(request_rec *r, int send_headers) + "Status is %d - not sending interim response", r->status); + return; + } +- if ((r->status == HTTP_CONTINUE) && !r->expecting_100) { +- /* +- * Don't send 100-Continue when there was no Expect: 100-continue +- * in the request headers. For origin servers this is a SHOULD NOT +- * for proxies it is a MUST NOT according to RFC 2616 8.2.3 +- */ +- return; +- } ++ if (r->status == HTTP_CONTINUE) { ++ if (!r->expecting_100) { ++ /* ++ * Don't send 100-Continue when there was no Expect: 100-continue ++ * in the request headers. For origin servers this is a SHOULD NOT ++ * for proxies it is a MUST NOT according to RFC 2616 8.2.3 ++ */ ++ return; ++ } + +- /* if we send an interim response, we're no longer in a state of +- * expecting one. Also, this could feasibly be in a subrequest, +- * so we need to propagate the fact that we responded. +- */ +- for (rr = r; rr != NULL; rr = rr->main) { +- rr->expecting_100 = 0; ++ /* if we send an interim response, we're no longer in a state of ++ * expecting one. Also, this could feasibly be in a subrequest, ++ * so we need to propagate the fact that we responded. ++ */ ++ for (rr = r; rr != NULL; rr = rr->main) { ++ rr->expecting_100 = 0; ++ } + } + + status_line = apr_pstrcat(r->pool, AP_SERVER_PROTOCOL, " ", r->status_line, CRLF, NULL); diff --git a/SOURCES/httpd-2.4.37-r1828172+.patch b/SOURCES/httpd-2.4.37-r1828172+.patch new file mode 100644 index 0000000..72b124b --- /dev/null +++ b/SOURCES/httpd-2.4.37-r1828172+.patch @@ -0,0 +1,1420 @@ +# ./pullrev.sh 1828172 1862968 1863191 1867878 1867882 1867968 1867970 1867971 +http://svn.apache.org/viewvc?view=revision&revision=1828172 +http://svn.apache.org/viewvc?view=revision&revision=1862968 +http://svn.apache.org/viewvc?view=revision&revision=1863191 +http://svn.apache.org/viewvc?view=revision&revision=1867878 +http://svn.apache.org/viewvc?view=revision&revision=1867882 +http://svn.apache.org/viewvc?view=revision&revision=1867968 +http://svn.apache.org/viewvc?view=revision&revision=1867970 +http://svn.apache.org/viewvc?view=revision&revision=1867971 + +--- httpd-2.4.41/modules/generators/mod_cgi.c ++++ httpd-2.4.41/modules/generators/mod_cgi.c +@@ -92,6 +92,10 @@ + apr_size_t bufbytes; + } cgi_server_conf; + ++typedef struct { ++ apr_interval_time_t timeout; ++} cgi_dirconf; ++ + static void *create_cgi_config(apr_pool_t *p, server_rec *s) + { + cgi_server_conf *c = +@@ -112,6 +116,12 @@ + return overrides->logname ? overrides : base; + } + ++static void *create_cgi_dirconf(apr_pool_t *p, char *dummy) ++{ ++ cgi_dirconf *c = (cgi_dirconf *) apr_pcalloc(p, sizeof(cgi_dirconf)); ++ return c; ++} ++ + static const char *set_scriptlog(cmd_parms *cmd, void *dummy, const char *arg) + { + server_rec *s = cmd->server; +@@ -150,6 +160,17 @@ + return NULL; + } + ++static const char *set_script_timeout(cmd_parms *cmd, void *dummy, const char *arg) ++{ ++ cgi_dirconf *dc = dummy; ++ ++ if (ap_timeout_parameter_parse(arg, &dc->timeout, "s") != APR_SUCCESS) { ++ return "CGIScriptTimeout has wrong format"; ++ } ++ ++ return NULL; ++} ++ + static const command_rec cgi_cmds[] = + { + AP_INIT_TAKE1("ScriptLog", set_scriptlog, NULL, RSRC_CONF, +@@ -158,6 +179,9 @@ + "the maximum length (in bytes) of the script debug log"), + AP_INIT_TAKE1("ScriptLogBuffer", set_scriptlog_buffer, NULL, RSRC_CONF, + "the maximum size (in bytes) to record of a POST request"), ++AP_INIT_TAKE1("CGIScriptTimeout", set_script_timeout, NULL, RSRC_CONF | ACCESS_CONF, ++ "The amount of time to wait between successful reads from " ++ "the CGI script, in seconds."), + {NULL} + }; + +@@ -471,23 +495,26 @@ + apr_filepath_name_get(r->filename)); + } + else { ++ cgi_dirconf *dc = ap_get_module_config(r->per_dir_config, &cgi_module); ++ apr_interval_time_t timeout = dc->timeout > 0 ? dc->timeout : r->server->timeout; ++ + apr_pool_note_subprocess(p, procnew, APR_KILL_AFTER_TIMEOUT); + + *script_in = procnew->out; + if (!*script_in) + return APR_EBADF; +- apr_file_pipe_timeout_set(*script_in, r->server->timeout); ++ apr_file_pipe_timeout_set(*script_in, timeout); + + if (e_info->prog_type == RUN_AS_CGI) { + *script_out = procnew->in; + if (!*script_out) + return APR_EBADF; +- apr_file_pipe_timeout_set(*script_out, r->server->timeout); ++ apr_file_pipe_timeout_set(*script_out, timeout); + + *script_err = procnew->err; + if (!*script_err) + return APR_EBADF; +- apr_file_pipe_timeout_set(*script_err, r->server->timeout); ++ apr_file_pipe_timeout_set(*script_err, timeout); + } + } + } +@@ -541,212 +568,10 @@ + return APR_SUCCESS; + } + +-static void discard_script_output(apr_bucket_brigade *bb) +-{ +- apr_bucket *e; +- const char *buf; +- apr_size_t len; +- apr_status_t rv; +- +- for (e = APR_BRIGADE_FIRST(bb); +- e != APR_BRIGADE_SENTINEL(bb); +- e = APR_BUCKET_NEXT(e)) +- { +- if (APR_BUCKET_IS_EOS(e)) { +- break; +- } +- rv = apr_bucket_read(e, &buf, &len, APR_BLOCK_READ); +- if (rv != APR_SUCCESS) { +- break; +- } +- } +-} +- + #if APR_FILES_AS_SOCKETS +- +-/* A CGI bucket type is needed to catch any output to stderr from the +- * script; see PR 22030. */ +-static const apr_bucket_type_t bucket_type_cgi; +- +-struct cgi_bucket_data { +- apr_pollset_t *pollset; +- request_rec *r; +-}; +- +-/* Create a CGI bucket using pipes from script stdout 'out' +- * and stderr 'err', for request 'r'. */ +-static apr_bucket *cgi_bucket_create(request_rec *r, +- apr_file_t *out, apr_file_t *err, +- apr_bucket_alloc_t *list) +-{ +- apr_bucket *b = apr_bucket_alloc(sizeof(*b), list); +- apr_status_t rv; +- apr_pollfd_t fd; +- struct cgi_bucket_data *data = apr_palloc(r->pool, sizeof *data); +- +- APR_BUCKET_INIT(b); +- b->free = apr_bucket_free; +- b->list = list; +- b->type = &bucket_type_cgi; +- b->length = (apr_size_t)(-1); +- b->start = -1; +- +- /* Create the pollset */ +- rv = apr_pollset_create(&data->pollset, 2, r->pool, 0); +- if (rv != APR_SUCCESS) { +- ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01217) +- "apr_pollset_create(); check system or user limits"); +- return NULL; +- } +- +- fd.desc_type = APR_POLL_FILE; +- fd.reqevents = APR_POLLIN; +- fd.p = r->pool; +- fd.desc.f = out; /* script's stdout */ +- fd.client_data = (void *)1; +- rv = apr_pollset_add(data->pollset, &fd); +- if (rv != APR_SUCCESS) { +- ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01218) +- "apr_pollset_add(); check system or user limits"); +- return NULL; +- } +- +- fd.desc.f = err; /* script's stderr */ +- fd.client_data = (void *)2; +- rv = apr_pollset_add(data->pollset, &fd); +- if (rv != APR_SUCCESS) { +- ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01219) +- "apr_pollset_add(); check system or user limits"); +- return NULL; +- } +- +- data->r = r; +- b->data = data; +- return b; +-} +- +-/* Create a duplicate CGI bucket using given bucket data */ +-static apr_bucket *cgi_bucket_dup(struct cgi_bucket_data *data, +- apr_bucket_alloc_t *list) +-{ +- apr_bucket *b = apr_bucket_alloc(sizeof(*b), list); +- APR_BUCKET_INIT(b); +- b->free = apr_bucket_free; +- b->list = list; +- b->type = &bucket_type_cgi; +- b->length = (apr_size_t)(-1); +- b->start = -1; +- b->data = data; +- return b; +-} +- +-/* Handle stdout from CGI child. Duplicate of logic from the _read +- * method of the real APR pipe bucket implementation. */ +-static apr_status_t cgi_read_stdout(apr_bucket *a, apr_file_t *out, +- const char **str, apr_size_t *len) +-{ +- char *buf; +- apr_status_t rv; +- +- *str = NULL; +- *len = APR_BUCKET_BUFF_SIZE; +- buf = apr_bucket_alloc(*len, a->list); /* XXX: check for failure? */ +- +- rv = apr_file_read(out, buf, len); +- +- if (rv != APR_SUCCESS && rv != APR_EOF) { +- apr_bucket_free(buf); +- return rv; +- } +- +- if (*len > 0) { +- struct cgi_bucket_data *data = a->data; +- apr_bucket_heap *h; +- +- /* Change the current bucket to refer to what we read */ +- a = apr_bucket_heap_make(a, buf, *len, apr_bucket_free); +- h = a->data; +- h->alloc_len = APR_BUCKET_BUFF_SIZE; /* note the real buffer size */ +- *str = buf; +- APR_BUCKET_INSERT_AFTER(a, cgi_bucket_dup(data, a->list)); +- } +- else { +- apr_bucket_free(buf); +- a = apr_bucket_immortal_make(a, "", 0); +- *str = a->data; +- } +- return rv; +-} +- +-/* Read method of CGI bucket: polls on stderr and stdout of the child, +- * sending any stderr output immediately away to the error log. */ +-static apr_status_t cgi_bucket_read(apr_bucket *b, const char **str, +- apr_size_t *len, apr_read_type_e block) +-{ +- struct cgi_bucket_data *data = b->data; +- apr_interval_time_t timeout; +- apr_status_t rv; +- int gotdata = 0; +- +- timeout = block == APR_NONBLOCK_READ ? 0 : data->r->server->timeout; +- +- do { +- const apr_pollfd_t *results; +- apr_int32_t num; +- +- rv = apr_pollset_poll(data->pollset, timeout, &num, &results); +- if (APR_STATUS_IS_TIMEUP(rv)) { +- if (timeout) { +- ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, data->r, APLOGNO(01220) +- "Timeout waiting for output from CGI script %s", +- data->r->filename); +- return rv; +- } +- else { +- return APR_EAGAIN; +- } +- } +- else if (APR_STATUS_IS_EINTR(rv)) { +- continue; +- } +- else if (rv != APR_SUCCESS) { +- ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, data->r, APLOGNO(01221) +- "poll failed waiting for CGI child"); +- return rv; +- } +- +- for (; num; num--, results++) { +- if (results[0].client_data == (void *)1) { +- /* stdout */ +- rv = cgi_read_stdout(b, results[0].desc.f, str, len); +- if (APR_STATUS_IS_EOF(rv)) { +- rv = APR_SUCCESS; +- } +- gotdata = 1; +- } else { +- /* stderr */ +- apr_status_t rv2 = log_script_err(data->r, results[0].desc.f); +- if (APR_STATUS_IS_EOF(rv2)) { +- apr_pollset_remove(data->pollset, &results[0]); +- } +- } +- } +- +- } while (!gotdata); +- +- return rv; +-} +- +-static const apr_bucket_type_t bucket_type_cgi = { +- "CGI", 5, APR_BUCKET_DATA, +- apr_bucket_destroy_noop, +- cgi_bucket_read, +- apr_bucket_setaside_notimpl, +- apr_bucket_split_notimpl, +- apr_bucket_copy_notimpl +-}; +- ++#define WANT_CGI_BUCKET + #endif ++#include "cgi_common.h" + + static int cgi_handler(request_rec *r) + { +@@ -766,6 +591,8 @@ + apr_status_t rv; + cgi_exec_info_t e_info; + conn_rec *c; ++ cgi_dirconf *dc = ap_get_module_config(r->per_dir_config, &cgi_module); ++ apr_interval_time_t timeout = dc->timeout > 0 ? dc->timeout : r->server->timeout; + + if (strcmp(r->handler, CGI_MAGIC_TYPE) && strcmp(r->handler, "cgi-script")) { + return DECLINED; +@@ -925,10 +752,7 @@ + AP_DEBUG_ASSERT(script_in != NULL); + + #if APR_FILES_AS_SOCKETS +- apr_file_pipe_timeout_set(script_in, 0); +- apr_file_pipe_timeout_set(script_err, 0); +- +- b = cgi_bucket_create(r, script_in, script_err, c->bucket_alloc); ++ b = cgi_bucket_create(r, dc->timeout, script_in, script_err, c->bucket_alloc); + if (b == NULL) + return HTTP_INTERNAL_SERVER_ERROR; + #else +@@ -938,111 +762,7 @@ + b = apr_bucket_eos_create(c->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(bb, b); + +- /* Handle script return... */ +- if (!nph) { +- const char *location; +- char sbuf[MAX_STRING_LEN]; +- int ret; +- +- if ((ret = ap_scan_script_header_err_brigade_ex(r, bb, sbuf, +- APLOG_MODULE_INDEX))) +- { +- ret = log_script(r, conf, ret, dbuf, sbuf, bb, script_err); +- +- /* +- * ret could be HTTP_NOT_MODIFIED in the case that the CGI script +- * does not set an explicit status and ap_meets_conditions, which +- * is called by ap_scan_script_header_err_brigade, detects that +- * the conditions of the requests are met and the response is +- * not modified. +- * In this case set r->status and return OK in order to prevent +- * running through the error processing stack as this would +- * break with mod_cache, if the conditions had been set by +- * mod_cache itself to validate a stale entity. +- * BTW: We circumvent the error processing stack anyway if the +- * CGI script set an explicit status code (whatever it is) and +- * the only possible values for ret here are: +- * +- * HTTP_NOT_MODIFIED (set by ap_meets_conditions) +- * HTTP_PRECONDITION_FAILED (set by ap_meets_conditions) +- * HTTP_INTERNAL_SERVER_ERROR (if something went wrong during the +- * processing of the response of the CGI script, e.g broken headers +- * or a crashed CGI process). +- */ +- if (ret == HTTP_NOT_MODIFIED) { +- r->status = ret; +- return OK; +- } +- +- return ret; +- } +- +- location = apr_table_get(r->headers_out, "Location"); +- +- if (location && r->status == 200) { +- /* For a redirect whether internal or not, discard any +- * remaining stdout from the script, and log any remaining +- * stderr output, as normal. */ +- discard_script_output(bb); +- apr_brigade_destroy(bb); +- apr_file_pipe_timeout_set(script_err, r->server->timeout); +- log_script_err(r, script_err); +- } +- +- if (location && location[0] == '/' && r->status == 200) { +- /* This redirect needs to be a GET no matter what the original +- * method was. +- */ +- r->method = "GET"; +- r->method_number = M_GET; +- +- /* We already read the message body (if any), so don't allow +- * the redirected request to think it has one. We can ignore +- * Transfer-Encoding, since we used REQUEST_CHUNKED_ERROR. +- */ +- apr_table_unset(r->headers_in, "Content-Length"); +- +- ap_internal_redirect_handler(location, r); +- return OK; +- } +- else if (location && r->status == 200) { +- /* XXX: Note that if a script wants to produce its own Redirect +- * body, it now has to explicitly *say* "Status: 302" +- */ +- return HTTP_MOVED_TEMPORARILY; +- } +- +- rv = ap_pass_brigade(r->output_filters, bb); +- } +- else /* nph */ { +- struct ap_filter_t *cur; +- +- /* get rid of all filters up through protocol... since we +- * haven't parsed off the headers, there is no way they can +- * work +- */ +- +- cur = r->proto_output_filters; +- while (cur && cur->frec->ftype < AP_FTYPE_CONNECTION) { +- cur = cur->next; +- } +- r->output_filters = r->proto_output_filters = cur; +- +- rv = ap_pass_brigade(r->output_filters, bb); +- } +- +- /* don't soak up script output if errors occurred writing it +- * out... otherwise, we prolong the life of the script when the +- * connection drops or we stopped sending output for some other +- * reason */ +- if (rv == APR_SUCCESS && !r->connection->aborted) { +- apr_file_pipe_timeout_set(script_err, r->server->timeout); +- log_script_err(r, script_err); +- } +- +- apr_file_close(script_err); +- +- return OK; /* NOT r->status, even if it has changed. */ ++ return cgi_handle_response(r, nph, bb, timeout, conf, dbuf, script_err); + } + + /*============================================================================ +@@ -1277,7 +997,7 @@ + AP_DECLARE_MODULE(cgi) = + { + STANDARD20_MODULE_STUFF, +- NULL, /* dir config creater */ ++ create_cgi_dirconf, /* dir config creater */ + NULL, /* dir merger --- default is to override */ + create_cgi_config, /* server config */ + merge_cgi_config, /* merge server config */ +--- httpd-2.4.41/modules/generators/config5.m4 ++++ httpd-2.4.41/modules/generators/config5.m4 +@@ -78,4 +78,15 @@ + + APR_ADDTO(INCLUDES, [-I\$(top_srcdir)/$modpath_current]) + ++AC_ARG_ENABLE(cgid-fdpassing, ++ [APACHE_HELP_STRING(--enable-cgid-fdpassing,Enable experimental mod_cgid support for fd passing)], ++ [if test "$enableval" = "yes"; then ++ AC_CHECK_DECL(CMSG_DATA, ++ [AC_DEFINE([HAVE_CGID_FDPASSING], 1, [Enable FD passing support in mod_cgid])], ++ [AC_MSG_ERROR([cannot support mod_cgid fd-passing on this system])], [ ++#include ++#include ]) ++ fi ++]) ++ + APACHE_MODPATH_FINISH +--- httpd-2.4.41/modules/generators/mod_cgid.c ++++ httpd-2.4.41/modules/generators/mod_cgid.c +@@ -342,15 +342,19 @@ + return close(fd); + } + +-/* deal with incomplete reads and signals +- * assume you really have to read buf_size bytes +- */ +-static apr_status_t sock_read(int fd, void *vbuf, size_t buf_size) ++/* Read from the socket dealing with incomplete messages and signals. ++ * Returns 0 on success or errno on failure. Stderr fd passed as ++ * auxiliary data from other end is written to *errfd, or else stderr ++ * fileno if not present. */ ++static apr_status_t sock_readhdr(int fd, int *errfd, void *vbuf, size_t buf_size) + { +- char *buf = vbuf; + int rc; ++#ifndef HAVE_CGID_FDPASSING ++ char *buf = vbuf; + size_t bytes_read = 0; + ++ if (errfd) *errfd = 0; ++ + do { + do { + rc = read(fd, buf + bytes_read, buf_size - bytes_read); +@@ -365,9 +369,60 @@ + } + } while (bytes_read < buf_size); + ++ ++#else /* with FD passing */ ++ struct msghdr msg = {0}; ++ struct iovec vec = {vbuf, buf_size}; ++ struct cmsghdr *cmsg; ++ union { /* union to ensure alignment */ ++ struct cmsghdr cm; ++ char buf[CMSG_SPACE(sizeof(int))]; ++ } u; ++ ++ msg.msg_iov = &vec; ++ msg.msg_iovlen = 1; ++ ++ if (errfd) { ++ msg.msg_control = u.buf; ++ msg.msg_controllen = sizeof(u.buf); ++ *errfd = 0; ++ } ++ ++ /* use MSG_WAITALL to skip loop on truncated reads */ ++ do { ++ rc = recvmsg(fd, &msg, MSG_WAITALL); ++ } while (rc < 0 && errno == EINTR); ++ ++ if (rc == 0) { ++ return ECONNRESET; ++ } ++ else if (rc < 0) { ++ return errno; ++ } ++ else if (rc != buf_size) { ++ /* MSG_WAITALL should ensure the recvmsg blocks until the ++ * entire length is read, but let's be paranoid. */ ++ return APR_INCOMPLETE; ++ } ++ ++ if (errfd ++ && (cmsg = CMSG_FIRSTHDR(&msg)) != NULL ++ && cmsg->cmsg_len == CMSG_LEN(sizeof(*errfd)) ++ && cmsg->cmsg_level == SOL_SOCKET ++ && cmsg->cmsg_type == SCM_RIGHTS) { ++ *errfd = *((int *) CMSG_DATA(cmsg)); ++ } ++#endif ++ + return APR_SUCCESS; + } + ++/* As sock_readhdr but without auxiliary fd passing. */ ++static apr_status_t sock_read(int fd, void *vbuf, size_t buf_size) ++{ ++ return sock_readhdr(fd, NULL, vbuf, buf_size); ++} ++ + /* deal with signals + */ + static apr_status_t sock_write(int fd, const void *buf, size_t buf_size) +@@ -384,7 +439,7 @@ + return APR_SUCCESS; + } + +-static apr_status_t sock_writev(int fd, request_rec *r, int count, ...) ++static apr_status_t sock_writev(int fd, int auxfd, request_rec *r, int count, ...) + { + va_list ap; + int rc; +@@ -399,9 +454,39 @@ + } + va_end(ap); + ++#ifndef HAVE_CGID_FDPASSING + do { + rc = writev(fd, vec, count); + } while (rc < 0 && errno == EINTR); ++#else ++ { ++ struct msghdr msg = { 0 }; ++ struct cmsghdr *cmsg; ++ union { /* union for alignment */ ++ char buf[CMSG_SPACE(sizeof(int))]; ++ struct cmsghdr align; ++ } u; ++ ++ msg.msg_iov = vec; ++ msg.msg_iovlen = count; ++ ++ if (auxfd) { ++ msg.msg_control = u.buf; ++ msg.msg_controllen = sizeof(u.buf); ++ ++ cmsg = CMSG_FIRSTHDR(&msg); ++ cmsg->cmsg_level = SOL_SOCKET; ++ cmsg->cmsg_type = SCM_RIGHTS; ++ cmsg->cmsg_len = CMSG_LEN(sizeof(int)); ++ *((int *) CMSG_DATA(cmsg)) = auxfd; ++ } ++ ++ do { ++ rc = sendmsg(fd, &msg, 0); ++ } while (rc < 0 && errno == EINTR); ++ } ++#endif ++ + if (rc < 0) { + return errno; + } +@@ -410,7 +495,7 @@ + } + + static apr_status_t get_req(int fd, request_rec *r, char **argv0, char ***env, +- cgid_req_t *req) ++ int *errfd, cgid_req_t *req) + { + int i; + char **environ; +@@ -421,7 +506,7 @@ + r->server = apr_pcalloc(r->pool, sizeof(server_rec)); + + /* read the request header */ +- stat = sock_read(fd, req, sizeof(*req)); ++ stat = sock_readhdr(fd, errfd, req, sizeof(*req)); + if (stat != APR_SUCCESS) { + return stat; + } +@@ -479,14 +564,15 @@ + return APR_SUCCESS; + } + +-static apr_status_t send_req(int fd, request_rec *r, char *argv0, char **env, +- int req_type) ++static apr_status_t send_req(int fd, apr_file_t *errpipe, request_rec *r, ++ char *argv0, char **env, int req_type) + { + int i; + cgid_req_t req = {0}; + apr_status_t stat; + ap_unix_identity_t * ugid = ap_run_get_suexec_identity(r); + core_dir_config *core_conf = ap_get_core_module_config(r->per_dir_config); ++ int errfd; + + + if (ugid == NULL) { +@@ -507,16 +593,21 @@ + req.args_len = r->args ? strlen(r->args) : 0; + req.loglevel = r->server->log.level; + ++ if (errpipe) ++ apr_os_file_get(&errfd, errpipe); ++ else ++ errfd = 0; ++ + /* Write the request header */ + if (req.args_len) { +- stat = sock_writev(fd, r, 5, ++ stat = sock_writev(fd, errfd, r, 5, + &req, sizeof(req), + r->filename, req.filename_len, + argv0, req.argv0_len, + r->uri, req.uri_len, + r->args, req.args_len); + } else { +- stat = sock_writev(fd, r, 4, ++ stat = sock_writev(fd, errfd, r, 4, + &req, sizeof(req), + r->filename, req.filename_len, + argv0, req.argv0_len, +@@ -531,7 +622,7 @@ + for (i = 0; i < req.env_count; i++) { + apr_size_t curlen = strlen(env[i]); + +- if ((stat = sock_writev(fd, r, 2, &curlen, sizeof(curlen), ++ if ((stat = sock_writev(fd, 0, r, 2, &curlen, sizeof(curlen), + env[i], curlen)) != APR_SUCCESS) { + return stat; + } +@@ -582,20 +673,34 @@ + } + } + ++/* Callback executed in the forked child process if exec of the CGI ++ * script fails. For the fd-passing case, output to stderr goes to ++ * the client (request handling thread) and is logged via ++ * ap_log_rerror there. For the non-fd-passing case, the "fake" ++ * request_rec passed via userdata is used to log. */ + static void cgid_child_errfn(apr_pool_t *pool, apr_status_t err, + const char *description) + { +- request_rec *r; + void *vr; + + apr_pool_userdata_get(&vr, ERRFN_USERDATA_KEY, pool); +- r = vr; +- +- /* sure we got r, but don't call ap_log_rerror() because we don't +- * have r->headers_in and possibly other storage referenced by +- * ap_log_rerror() +- */ +- ap_log_error(APLOG_MARK, APLOG_ERR, err, r->server, APLOGNO(01241) "%s", description); ++ if (vr) { ++ request_rec *r = vr; ++ ++ /* sure we got r, but don't call ap_log_rerror() because we don't ++ * have r->headers_in and possibly other storage referenced by ++ * ap_log_rerror() ++ */ ++ ap_log_error(APLOG_MARK, APLOG_ERR, err, r->server, APLOGNO(01241) "%s", description); ++ } ++ else { ++ const char *logstr; ++ ++ logstr = apr_psprintf(pool, APLOGNO(01241) "error spawning CGI child: %s (%pm)\n", ++ description, &err); ++ fputs(logstr, stderr); ++ fflush(stderr); ++ } + } + + static int cgid_server(void *data) +@@ -669,7 +774,7 @@ + } + + while (!daemon_should_exit) { +- int errfileno = STDERR_FILENO; ++ int errfileno; + char *argv0 = NULL; + char **env = NULL; + const char * const *argv; +@@ -709,7 +814,7 @@ + r = apr_pcalloc(ptrans, sizeof(request_rec)); + procnew = apr_pcalloc(ptrans, sizeof(*procnew)); + r->pool = ptrans; +- stat = get_req(sd2, r, &argv0, &env, &cgid_req); ++ stat = get_req(sd2, r, &argv0, &env, &errfileno, &cgid_req); + if (stat != APR_SUCCESS) { + ap_log_error(APLOG_MARK, APLOG_ERR, stat, + main_server, APLOGNO(01248) +@@ -741,6 +846,16 @@ + continue; + } + ++ if (errfileno == 0) { ++ errfileno = STDERR_FILENO; ++ } ++ else { ++ ap_log_error(APLOG_MARK, APLOG_DEBUG, rv, main_server, ++ "using passed fd %d as stderr", errfileno); ++ /* Limit the received fd lifetime to pool lifetime */ ++ apr_pool_cleanup_register(ptrans, (void *)((long)errfileno), ++ close_unix_socket, close_unix_socket); ++ } + apr_os_file_put(&r->server->error_log, &errfileno, 0, r->pool); + apr_os_file_put(&inout, &sd2, 0, r->pool); + +@@ -800,7 +915,10 @@ + close(sd2); + } + else { +- apr_pool_userdata_set(r, ERRFN_USERDATA_KEY, apr_pool_cleanup_null, ptrans); ++ if (errfileno == STDERR_FILENO) { ++ /* Used by cgid_child_errfn without fd-passing. */ ++ apr_pool_userdata_set(r, ERRFN_USERDATA_KEY, apr_pool_cleanup_null, ptrans); ++ } + + argv = (const char * const *)create_argv(r->pool, NULL, NULL, NULL, argv0, r->args); + +@@ -1099,6 +1217,33 @@ + return ret; + } + ++/* Soak up stderr from a script and redirect it to the error log. ++ * TODO: log_scripterror() and this could move to cgi_common.h. */ ++static apr_status_t log_script_err(request_rec *r, apr_file_t *script_err) ++{ ++ char argsbuffer[HUGE_STRING_LEN]; ++ char *newline; ++ apr_status_t rv; ++ cgid_server_conf *conf = ap_get_module_config(r->server->module_config, &cgid_module); ++ ++ while ((rv = apr_file_gets(argsbuffer, HUGE_STRING_LEN, ++ script_err)) == APR_SUCCESS) { ++ ++ newline = strchr(argsbuffer, '\n'); ++ if (newline) { ++ char *prev = newline - 1; ++ if (prev >= argsbuffer && *prev == '\r') { ++ newline = prev; ++ } ++ ++ *newline = '\0'; ++ } ++ log_scripterror(r, conf, r->status, 0, argsbuffer); ++ } ++ ++ return rv; ++} ++ + static int log_script(request_rec *r, cgid_server_conf * conf, int ret, + char *dbuf, const char *sbuf, apr_bucket_brigade *bb, + apr_file_t *script_err) +@@ -1204,6 +1349,13 @@ + return ret; + } + ++/* Pull in CGI bucket implementation. */ ++#define cgi_server_conf cgid_server_conf ++#ifdef HAVE_CGID_FDPASSING ++#define WANT_CGI_BUCKET ++#endif ++#include "cgi_common.h" ++ + static int connect_to_daemon(int *sdptr, request_rec *r, + cgid_server_conf *conf) + { +@@ -1270,27 +1422,6 @@ + return OK; + } + +-static void discard_script_output(apr_bucket_brigade *bb) +-{ +- apr_bucket *e; +- const char *buf; +- apr_size_t len; +- apr_status_t rv; +- +- for (e = APR_BRIGADE_FIRST(bb); +- e != APR_BRIGADE_SENTINEL(bb); +- e = APR_BUCKET_NEXT(e)) +- { +- if (APR_BUCKET_IS_EOS(e)) { +- break; +- } +- rv = apr_bucket_read(e, &buf, &len, APR_BLOCK_READ); +- if (rv != APR_SUCCESS) { +- break; +- } +- } +-} +- + /**************************************************************** + * + * Actual cgid handling... +@@ -1395,6 +1526,7 @@ + + static int cgid_handler(request_rec *r) + { ++ conn_rec *c = r->connection; + int retval, nph, dbpos; + char *argv0, *dbuf; + apr_bucket_brigade *bb; +@@ -1404,10 +1536,11 @@ + int seen_eos, child_stopped_reading; + int sd; + char **env; +- apr_file_t *tempsock; ++ apr_file_t *tempsock, *script_err, *errpipe_out; + struct cleanup_script_info *info; + apr_status_t rv; + cgid_dirconf *dc; ++ apr_interval_time_t timeout; + + if (strcmp(r->handler, CGI_MAGIC_TYPE) && strcmp(r->handler, "cgi-script")) { + return DECLINED; +@@ -1416,7 +1549,7 @@ + conf = ap_get_module_config(r->server->module_config, &cgid_module); + dc = ap_get_module_config(r->per_dir_config, &cgid_module); + +- ++ timeout = dc->timeout > 0 ? dc->timeout : r->server->timeout; + is_included = !strcmp(r->protocol, "INCLUDED"); + + if ((argv0 = strrchr(r->filename, '/')) != NULL) { +@@ -1469,6 +1602,17 @@ + } + */ + ++#ifdef HAVE_CGID_FDPASSING ++ rv = apr_file_pipe_create(&script_err, &errpipe_out, r->pool); ++ if (rv) { ++ return log_scripterror(r, conf, HTTP_SERVICE_UNAVAILABLE, rv, APLOGNO(10176) ++ "could not create pipe for stderr"); ++ } ++#else ++ script_err = NULL; ++ errpipe_out = NULL; ++#endif ++ + /* + * httpd core function used to add common environment variables like + * DOCUMENT_ROOT. +@@ -1481,12 +1625,16 @@ + return retval; + } + +- rv = send_req(sd, r, argv0, env, CGI_REQ); ++ rv = send_req(sd, errpipe_out, r, argv0, env, CGI_REQ); + if (rv != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01268) + "write to cgi daemon process"); + } + ++ /* The write-end of the pipe is only used by the server, so close ++ * it here. */ ++ if (errpipe_out) apr_file_close(errpipe_out); ++ + info = apr_palloc(r->pool, sizeof(struct cleanup_script_info)); + info->conf = conf; + info->r = r; +@@ -1508,12 +1656,7 @@ + */ + + apr_os_pipe_put_ex(&tempsock, &sd, 1, r->pool); +- if (dc->timeout > 0) { +- apr_file_pipe_timeout_set(tempsock, dc->timeout); +- } +- else { +- apr_file_pipe_timeout_set(tempsock, r->server->timeout); +- } ++ apr_file_pipe_timeout_set(tempsock, timeout); + apr_pool_cleanup_kill(r->pool, (void *)((long)sd), close_unix_socket); + + /* Transfer any put/post args, CERN style... +@@ -1605,114 +1748,19 @@ + */ + shutdown(sd, 1); + +- /* Handle script return... */ +- if (!nph) { +- conn_rec *c = r->connection; +- const char *location; +- char sbuf[MAX_STRING_LEN]; +- int ret; +- +- bb = apr_brigade_create(r->pool, c->bucket_alloc); +- b = apr_bucket_pipe_create(tempsock, c->bucket_alloc); +- APR_BRIGADE_INSERT_TAIL(bb, b); +- b = apr_bucket_eos_create(c->bucket_alloc); +- APR_BRIGADE_INSERT_TAIL(bb, b); +- +- if ((ret = ap_scan_script_header_err_brigade_ex(r, bb, sbuf, +- APLOG_MODULE_INDEX))) +- { +- ret = log_script(r, conf, ret, dbuf, sbuf, bb, NULL); +- +- /* +- * ret could be HTTP_NOT_MODIFIED in the case that the CGI script +- * does not set an explicit status and ap_meets_conditions, which +- * is called by ap_scan_script_header_err_brigade, detects that +- * the conditions of the requests are met and the response is +- * not modified. +- * In this case set r->status and return OK in order to prevent +- * running through the error processing stack as this would +- * break with mod_cache, if the conditions had been set by +- * mod_cache itself to validate a stale entity. +- * BTW: We circumvent the error processing stack anyway if the +- * CGI script set an explicit status code (whatever it is) and +- * the only possible values for ret here are: +- * +- * HTTP_NOT_MODIFIED (set by ap_meets_conditions) +- * HTTP_PRECONDITION_FAILED (set by ap_meets_conditions) +- * HTTP_INTERNAL_SERVER_ERROR (if something went wrong during the +- * processing of the response of the CGI script, e.g broken headers +- * or a crashed CGI process). +- */ +- if (ret == HTTP_NOT_MODIFIED) { +- r->status = ret; +- return OK; +- } +- +- return ret; +- } +- +- location = apr_table_get(r->headers_out, "Location"); +- +- if (location && location[0] == '/' && r->status == 200) { +- +- /* Soak up all the script output */ +- discard_script_output(bb); +- apr_brigade_destroy(bb); +- /* This redirect needs to be a GET no matter what the original +- * method was. +- */ +- r->method = "GET"; +- r->method_number = M_GET; +- +- /* We already read the message body (if any), so don't allow +- * the redirected request to think it has one. We can ignore +- * Transfer-Encoding, since we used REQUEST_CHUNKED_ERROR. +- */ +- apr_table_unset(r->headers_in, "Content-Length"); +- +- ap_internal_redirect_handler(location, r); +- return OK; +- } +- else if (location && r->status == 200) { +- /* XXX: Note that if a script wants to produce its own Redirect +- * body, it now has to explicitly *say* "Status: 302" +- */ +- discard_script_output(bb); +- apr_brigade_destroy(bb); +- return HTTP_MOVED_TEMPORARILY; +- } +- +- rv = ap_pass_brigade(r->output_filters, bb); +- if (rv != APR_SUCCESS) { +- ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, r, +- "Failed to flush CGI output to client"); +- } +- } +- +- if (nph) { +- conn_rec *c = r->connection; +- struct ap_filter_t *cur; +- +- /* get rid of all filters up through protocol... since we +- * haven't parsed off the headers, there is no way they can +- * work +- */ +- +- cur = r->proto_output_filters; +- while (cur && cur->frec->ftype < AP_FTYPE_CONNECTION) { +- cur = cur->next; +- } +- r->output_filters = r->proto_output_filters = cur; +- +- bb = apr_brigade_create(r->pool, c->bucket_alloc); +- b = apr_bucket_pipe_create(tempsock, c->bucket_alloc); +- APR_BRIGADE_INSERT_TAIL(bb, b); +- b = apr_bucket_eos_create(c->bucket_alloc); +- APR_BRIGADE_INSERT_TAIL(bb, b); +- ap_pass_brigade(r->output_filters, bb); +- } ++ bb = apr_brigade_create(r->pool, c->bucket_alloc); ++#ifdef HAVE_CGID_FDPASSING ++ b = cgi_bucket_create(r, dc->timeout, tempsock, script_err, c->bucket_alloc); ++ if (b == NULL) ++ return HTTP_INTERNAL_SERVER_ERROR; /* should call log_scripterror() w/ _UNAVAILABLE? */ ++#else ++ b = apr_bucket_pipe_create(tempsock, c->bucket_alloc); ++#endif ++ APR_BRIGADE_INSERT_TAIL(bb, b); ++ b = apr_bucket_eos_create(c->bucket_alloc); ++ APR_BRIGADE_INSERT_TAIL(bb, b); + +- return OK; /* NOT r->status, even if it has changed. */ ++ return cgi_handle_response(r, nph, bb, timeout, conf, dbuf, script_err); + } + + +@@ -1829,7 +1877,7 @@ + return retval; + } + +- send_req(sd, r, command, env, SSI_REQ); ++ send_req(sd, NULL, r, command, env, SSI_REQ); + + info = apr_palloc(r->pool, sizeof(struct cleanup_script_info)); + info->conf = conf; +--- httpd-2.4.41/modules/generators/cgi_common.h ++++ httpd-2.4.41/modules/generators/cgi_common.h +@@ -0,0 +1,359 @@ ++/* Licensed to the Apache Software Foundation (ASF) under one or more ++ * contributor license agreements. See the NOTICE file distributed with ++ * this work for additional information regarding copyright ownership. ++ * The ASF licenses this file to You under the Apache License, Version 2.0 ++ * (the "License"); you may not use this file except in compliance with ++ * the License. You may obtain a copy of the License at ++ * ++ * http://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ */ ++ ++#include "apr.h" ++#include "apr_strings.h" ++#include "apr_buckets.h" ++#include "apr_lib.h" ++#include "apr_poll.h" ++ ++#define APR_WANT_STRFUNC ++#define APR_WANT_MEMFUNC ++#include "apr_want.h" ++ ++#include "httpd.h" ++#include "util_filter.h" ++ ++static void discard_script_output(apr_bucket_brigade *bb) ++{ ++ apr_bucket *e; ++ const char *buf; ++ apr_size_t len; ++ ++ for (e = APR_BRIGADE_FIRST(bb); ++ e != APR_BRIGADE_SENTINEL(bb) && !APR_BUCKET_IS_EOS(e); ++ e = APR_BRIGADE_FIRST(bb)) ++ { ++ if (apr_bucket_read(e, &buf, &len, APR_BLOCK_READ)) { ++ break; ++ } ++ apr_bucket_delete(e); ++ } ++} ++ ++#ifdef WANT_CGI_BUCKET ++/* A CGI bucket type is needed to catch any output to stderr from the ++ * script; see PR 22030. */ ++static const apr_bucket_type_t bucket_type_cgi; ++ ++struct cgi_bucket_data { ++ apr_pollset_t *pollset; ++ request_rec *r; ++ apr_interval_time_t timeout; ++}; ++ ++/* Create a CGI bucket using pipes from script stdout 'out' ++ * and stderr 'err', for request 'r'. */ ++static apr_bucket *cgi_bucket_create(request_rec *r, ++ apr_interval_time_t timeout, ++ apr_file_t *out, apr_file_t *err, ++ apr_bucket_alloc_t *list) ++{ ++ apr_bucket *b = apr_bucket_alloc(sizeof(*b), list); ++ apr_status_t rv; ++ apr_pollfd_t fd; ++ struct cgi_bucket_data *data = apr_palloc(r->pool, sizeof *data); ++ ++ /* Disable APR timeout handling since we'll use poll() entirely. */ ++ apr_file_pipe_timeout_set(out, 0); ++ apr_file_pipe_timeout_set(err, 0); ++ ++ APR_BUCKET_INIT(b); ++ b->free = apr_bucket_free; ++ b->list = list; ++ b->type = &bucket_type_cgi; ++ b->length = (apr_size_t)(-1); ++ b->start = -1; ++ ++ /* Create the pollset */ ++ rv = apr_pollset_create(&data->pollset, 2, r->pool, 0); ++ if (rv != APR_SUCCESS) { ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01217) ++ "apr_pollset_create(); check system or user limits"); ++ return NULL; ++ } ++ ++ fd.desc_type = APR_POLL_FILE; ++ fd.reqevents = APR_POLLIN; ++ fd.p = r->pool; ++ fd.desc.f = out; /* script's stdout */ ++ fd.client_data = (void *)1; ++ rv = apr_pollset_add(data->pollset, &fd); ++ if (rv != APR_SUCCESS) { ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01218) ++ "apr_pollset_add(); check system or user limits"); ++ return NULL; ++ } ++ ++ fd.desc.f = err; /* script's stderr */ ++ fd.client_data = (void *)2; ++ rv = apr_pollset_add(data->pollset, &fd); ++ if (rv != APR_SUCCESS) { ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01219) ++ "apr_pollset_add(); check system or user limits"); ++ return NULL; ++ } ++ ++ data->r = r; ++ data->timeout = timeout; ++ b->data = data; ++ return b; ++} ++ ++/* Create a duplicate CGI bucket using given bucket data */ ++static apr_bucket *cgi_bucket_dup(struct cgi_bucket_data *data, ++ apr_bucket_alloc_t *list) ++{ ++ apr_bucket *b = apr_bucket_alloc(sizeof(*b), list); ++ APR_BUCKET_INIT(b); ++ b->free = apr_bucket_free; ++ b->list = list; ++ b->type = &bucket_type_cgi; ++ b->length = (apr_size_t)(-1); ++ b->start = -1; ++ b->data = data; ++ return b; ++} ++ ++/* Handle stdout from CGI child. Duplicate of logic from the _read ++ * method of the real APR pipe bucket implementation. */ ++static apr_status_t cgi_read_stdout(apr_bucket *a, apr_file_t *out, ++ const char **str, apr_size_t *len) ++{ ++ char *buf; ++ apr_status_t rv; ++ ++ *str = NULL; ++ *len = APR_BUCKET_BUFF_SIZE; ++ buf = apr_bucket_alloc(*len, a->list); /* XXX: check for failure? */ ++ ++ rv = apr_file_read(out, buf, len); ++ ++ if (rv != APR_SUCCESS && rv != APR_EOF) { ++ apr_bucket_free(buf); ++ return rv; ++ } ++ ++ if (*len > 0) { ++ struct cgi_bucket_data *data = a->data; ++ apr_bucket_heap *h; ++ ++ /* Change the current bucket to refer to what we read */ ++ a = apr_bucket_heap_make(a, buf, *len, apr_bucket_free); ++ h = a->data; ++ h->alloc_len = APR_BUCKET_BUFF_SIZE; /* note the real buffer size */ ++ *str = buf; ++ APR_BUCKET_INSERT_AFTER(a, cgi_bucket_dup(data, a->list)); ++ } ++ else { ++ apr_bucket_free(buf); ++ a = apr_bucket_immortal_make(a, "", 0); ++ *str = a->data; ++ } ++ return rv; ++} ++ ++/* Read method of CGI bucket: polls on stderr and stdout of the child, ++ * sending any stderr output immediately away to the error log. */ ++static apr_status_t cgi_bucket_read(apr_bucket *b, const char **str, ++ apr_size_t *len, apr_read_type_e block) ++{ ++ struct cgi_bucket_data *data = b->data; ++ apr_interval_time_t timeout = 0; ++ apr_status_t rv; ++ int gotdata = 0; ++ ++ if (block != APR_NONBLOCK_READ) { ++ timeout = data->timeout > 0 ? data->timeout : data->r->server->timeout; ++ } ++ ++ do { ++ const apr_pollfd_t *results; ++ apr_int32_t num; ++ ++ rv = apr_pollset_poll(data->pollset, timeout, &num, &results); ++ if (APR_STATUS_IS_TIMEUP(rv)) { ++ if (timeout) { ++ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, data->r, APLOGNO(01220) ++ "Timeout waiting for output from CGI script %s", ++ data->r->filename); ++ return rv; ++ } ++ else { ++ return APR_EAGAIN; ++ } ++ } ++ else if (APR_STATUS_IS_EINTR(rv)) { ++ continue; ++ } ++ else if (rv != APR_SUCCESS) { ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, data->r, APLOGNO(01221) ++ "poll failed waiting for CGI child"); ++ return rv; ++ } ++ ++ for (; num; num--, results++) { ++ if (results[0].client_data == (void *)1) { ++ /* stdout */ ++ rv = cgi_read_stdout(b, results[0].desc.f, str, len); ++ if (APR_STATUS_IS_EOF(rv)) { ++ rv = APR_SUCCESS; ++ } ++ gotdata = 1; ++ } else { ++ /* stderr */ ++ apr_status_t rv2 = log_script_err(data->r, results[0].desc.f); ++ if (APR_STATUS_IS_EOF(rv2)) { ++ apr_pollset_remove(data->pollset, &results[0]); ++ } ++ } ++ } ++ ++ } while (!gotdata); ++ ++ return rv; ++} ++ ++static const apr_bucket_type_t bucket_type_cgi = { ++ "CGI", 5, APR_BUCKET_DATA, ++ apr_bucket_destroy_noop, ++ cgi_bucket_read, ++ apr_bucket_setaside_notimpl, ++ apr_bucket_split_notimpl, ++ apr_bucket_copy_notimpl ++}; ++ ++#endif /* WANT_CGI_BUCKET */ ++ ++/* Handle the CGI response output, having set up the brigade with the ++ * CGI or PIPE bucket as appropriate. */ ++static int cgi_handle_response(request_rec *r, int nph, apr_bucket_brigade *bb, ++ apr_interval_time_t timeout, cgi_server_conf *conf, ++ char *logdata, apr_file_t *script_err) ++{ ++ apr_status_t rv; ++ ++ /* Handle script return... */ ++ if (!nph) { ++ const char *location; ++ char sbuf[MAX_STRING_LEN]; ++ int ret; ++ ++ if ((ret = ap_scan_script_header_err_brigade_ex(r, bb, sbuf, ++ APLOG_MODULE_INDEX))) ++ { ++ ret = log_script(r, conf, ret, logdata, sbuf, bb, script_err); ++ ++ /* ++ * ret could be HTTP_NOT_MODIFIED in the case that the CGI script ++ * does not set an explicit status and ap_meets_conditions, which ++ * is called by ap_scan_script_header_err_brigade, detects that ++ * the conditions of the requests are met and the response is ++ * not modified. ++ * In this case set r->status and return OK in order to prevent ++ * running through the error processing stack as this would ++ * break with mod_cache, if the conditions had been set by ++ * mod_cache itself to validate a stale entity. ++ * BTW: We circumvent the error processing stack anyway if the ++ * CGI script set an explicit status code (whatever it is) and ++ * the only possible values for ret here are: ++ * ++ * HTTP_NOT_MODIFIED (set by ap_meets_conditions) ++ * HTTP_PRECONDITION_FAILED (set by ap_meets_conditions) ++ * HTTP_INTERNAL_SERVER_ERROR (if something went wrong during the ++ * processing of the response of the CGI script, e.g broken headers ++ * or a crashed CGI process). ++ */ ++ if (ret == HTTP_NOT_MODIFIED) { ++ r->status = ret; ++ return OK; ++ } ++ ++ return ret; ++ } ++ ++ location = apr_table_get(r->headers_out, "Location"); ++ ++ if (location && r->status == 200) { ++ /* For a redirect whether internal or not, discard any ++ * remaining stdout from the script, and log any remaining ++ * stderr output, as normal. */ ++ discard_script_output(bb); ++ apr_brigade_destroy(bb); ++ ++ if (script_err) { ++ apr_file_pipe_timeout_set(script_err, timeout); ++ log_script_err(r, script_err); ++ } ++ } ++ ++ if (location && location[0] == '/' && r->status == 200) { ++ /* This redirect needs to be a GET no matter what the original ++ * method was. ++ */ ++ r->method = "GET"; ++ r->method_number = M_GET; ++ ++ /* We already read the message body (if any), so don't allow ++ * the redirected request to think it has one. We can ignore ++ * Transfer-Encoding, since we used REQUEST_CHUNKED_ERROR. ++ */ ++ apr_table_unset(r->headers_in, "Content-Length"); ++ ++ ap_internal_redirect_handler(location, r); ++ return OK; ++ } ++ else if (location && r->status == 200) { ++ /* XXX: Note that if a script wants to produce its own Redirect ++ * body, it now has to explicitly *say* "Status: 302" ++ */ ++ discard_script_output(bb); ++ apr_brigade_destroy(bb); ++ return HTTP_MOVED_TEMPORARILY; ++ } ++ ++ rv = ap_pass_brigade(r->output_filters, bb); ++ } ++ else /* nph */ { ++ struct ap_filter_t *cur; ++ ++ /* get rid of all filters up through protocol... since we ++ * haven't parsed off the headers, there is no way they can ++ * work ++ */ ++ ++ cur = r->proto_output_filters; ++ while (cur && cur->frec->ftype < AP_FTYPE_CONNECTION) { ++ cur = cur->next; ++ } ++ r->output_filters = r->proto_output_filters = cur; ++ ++ rv = ap_pass_brigade(r->output_filters, bb); ++ } ++ ++ /* don't soak up script output if errors occurred writing it ++ * out... otherwise, we prolong the life of the script when the ++ * connection drops or we stopped sending output for some other ++ * reason */ ++ if (script_err && rv == APR_SUCCESS && !r->connection->aborted) { ++ apr_file_pipe_timeout_set(script_err, timeout); ++ log_script_err(r, script_err); ++ } ++ ++ if (script_err) apr_file_close(script_err); ++ ++ return OK; /* NOT r->status, even if it has changed. */ ++} diff --git a/SOURCES/httpd-2.4.37-r1840554.patch b/SOURCES/httpd-2.4.37-r1840554.patch new file mode 100644 index 0000000..7b379e1 --- /dev/null +++ b/SOURCES/httpd-2.4.37-r1840554.patch @@ -0,0 +1,35 @@ +diff --git a/modules/arch/unix/mod_systemd.c b/modules/arch/unix/mod_systemd.c +index 7a82a90..6c244b6 100644 +--- a/modules/arch/unix/mod_systemd.c ++++ b/modules/arch/unix/mod_systemd.c +@@ -100,6 +100,21 @@ static int systemd_post_config(apr_pool_t *pconf, apr_pool_t *plog, + return OK; + } + ++/* Report the service is ready in post_config, which could be during ++ * startup or after a reload. The server could still hit a fatal ++ * startup error after this point during ap_run_mpm(), so this is ++ * perhaps too early, but by post_config listen() has been called on ++ * the TCP ports so new connections will not be rejected. There will ++ * always be a possible async failure event simultaneous to the ++ * service reporting "ready", so this should be good enough. */ ++static int systemd_post_config_last(apr_pool_t *p, apr_pool_t *plog, ++ apr_pool_t *ptemp, server_rec *main_server) ++{ ++ sd_notify(0, "READY=1\n" ++ "STATUS=Configuration loaded.\n"); ++ return OK; ++} ++ + static int systemd_pre_mpm(apr_pool_t *p, ap_scoreboard_e sb_type) + { + int rv; +@@ -187,6 +202,8 @@ static void systemd_register_hooks(apr_pool_t *p) + ap_hook_pre_config(systemd_pre_config, NULL, NULL, APR_HOOK_LAST); + /* Grab the listener config. */ + ap_hook_post_config(systemd_post_config, NULL, NULL, APR_HOOK_LAST); ++ /* Signal service is ready. */ ++ ap_hook_post_config(systemd_post_config_last, NULL, NULL, APR_HOOK_REALLY_LAST); + /* We know the PID in this hook ... */ + ap_hook_pre_mpm(systemd_pre_mpm, NULL, NULL, APR_HOOK_LAST); + /* Used to update httpd's status line using sd_notifyf */ diff --git a/SOURCES/httpd-2.4.37-r1842929+.patch b/SOURCES/httpd-2.4.37-r1842929+.patch new file mode 100644 index 0000000..ab5bba6 --- /dev/null +++ b/SOURCES/httpd-2.4.37-r1842929+.patch @@ -0,0 +1,272 @@ +# ./pullrev.sh 1842929 1842931 1852982 1853631 1857731 +http://svn.apache.org/viewvc?view=revision&revision=1842929 +http://svn.apache.org/viewvc?view=revision&revision=1842931 +http://svn.apache.org/viewvc?view=revision&revision=1852982 +http://svn.apache.org/viewvc?view=revision&revision=1857731 +http://svn.apache.org/viewvc?view=revision&revision=1853631 + +diff --git a/Makefile.in b/Makefile.in +index 06b8c5a..9eeb5c7 100644 +--- a/Makefile.in ++++ b/Makefile.in +@@ -213,6 +213,7 @@ install-cgi: + install-other: + @test -d $(DESTDIR)$(logfiledir) || $(MKINSTALLDIRS) $(DESTDIR)$(logfiledir) + @test -d $(DESTDIR)$(runtimedir) || $(MKINSTALLDIRS) $(DESTDIR)$(runtimedir) ++ @test -d $(DESTDIR)$(statedir) || $(MKINSTALLDIRS) $(DESTDIR)$(statedir) + @for ext in dll x; do \ + file=apachecore.$$ext; \ + if test -f $$file; then \ +diff --git a/acinclude.m4 b/acinclude.m4 +index 0ad0c13..a8c2804 100644 +--- a/acinclude.m4 ++++ b/acinclude.m4 +@@ -45,6 +45,7 @@ AC_DEFUN([APACHE_GEN_CONFIG_VARS],[ + APACHE_SUBST(installbuilddir) + APACHE_SUBST(runtimedir) + APACHE_SUBST(proxycachedir) ++ APACHE_SUBST(statedir) + APACHE_SUBST(other_targets) + APACHE_SUBST(progname) + APACHE_SUBST(prefix) +@@ -663,6 +664,7 @@ AC_DEFUN([APACHE_EXPORT_ARGUMENTS],[ + APACHE_SUBST_EXPANDED_ARG(runtimedir) + APACHE_SUBST_EXPANDED_ARG(logfiledir) + APACHE_SUBST_EXPANDED_ARG(proxycachedir) ++ APACHE_SUBST_EXPANDED_ARG(statedir) + ]) + + dnl +diff --git a/configure.in b/configure.in +index a208b53..de6a8ad 100644 +--- a/configure.in ++++ b/configure.in +@@ -41,7 +41,7 @@ dnl Something seems broken here. + AC_PREFIX_DEFAULT(/usr/local/apache2) + + dnl Get the layout here, so we can pass the required variables to apr +-APR_ENABLE_LAYOUT(Apache, [errordir iconsdir htdocsdir cgidir]) ++APR_ENABLE_LAYOUT(Apache, [errordir iconsdir htdocsdir cgidir statedir]) + + dnl reparse the configure arguments. + APR_PARSE_ARGUMENTS +diff --git a/include/ap_config_layout.h.in b/include/ap_config_layout.h.in +index 2b4a70c..e076f41 100644 +--- a/include/ap_config_layout.h.in ++++ b/include/ap_config_layout.h.in +@@ -60,5 +60,7 @@ + #define DEFAULT_REL_LOGFILEDIR "@rel_logfiledir@" + #define DEFAULT_EXP_PROXYCACHEDIR "@exp_proxycachedir@" + #define DEFAULT_REL_PROXYCACHEDIR "@rel_proxycachedir@" ++#define DEFAULT_EXP_STATEDIR "@exp_statedir@" ++#define DEFAULT_REL_STATEDIR "@rel_statedir@" + + #endif /* AP_CONFIG_LAYOUT_H */ +diff --git a/include/http_config.h b/include/http_config.h +index adc5825..effccc1 100644 +--- a/include/http_config.h ++++ b/include/http_config.h +@@ -757,6 +757,14 @@ AP_DECLARE(char *) ap_server_root_relative(apr_pool_t *p, const char *fname); + */ + AP_DECLARE(char *) ap_runtime_dir_relative(apr_pool_t *p, const char *fname); + ++/** ++ * Compute the name of a persistent state file (e.g. a database or ++ * long-lived cache) relative to the appropriate state directory. ++ * Absolute paths are returned as-is. The state directory is ++ * configured via the DefaultStateDir directive or at build time. ++ */ ++AP_DECLARE(char *) ap_state_dir_relative(apr_pool_t *p, const char *fname); ++ + /* Finally, the hook for dynamically loading modules in... */ + + /** +diff --git a/modules/dav/fs/mod_dav_fs.c b/modules/dav/fs/mod_dav_fs.c +index addfd7e..2389f8f 100644 +--- a/modules/dav/fs/mod_dav_fs.c ++++ b/modules/dav/fs/mod_dav_fs.c +@@ -29,6 +29,10 @@ typedef struct { + + extern module AP_MODULE_DECLARE_DATA dav_fs_module; + ++#ifndef DEFAULT_DAV_LOCKDB ++#define DEFAULT_DAV_LOCKDB "davlockdb" ++#endif ++ + const char *dav_get_lockdb_path(const request_rec *r) + { + dav_fs_server_conf *conf; +@@ -57,6 +61,24 @@ static void *dav_fs_merge_server_config(apr_pool_t *p, + return newconf; + } + ++static apr_status_t dav_fs_post_config(apr_pool_t *p, apr_pool_t *plog, ++ apr_pool_t *ptemp, server_rec *base_server) ++{ ++ server_rec *s; ++ ++ for (s = base_server; s; s = s->next) { ++ dav_fs_server_conf *conf; ++ ++ conf = ap_get_module_config(s->module_config, &dav_fs_module); ++ ++ if (!conf->lockdb_path) { ++ conf->lockdb_path = ap_state_dir_relative(p, DEFAULT_DAV_LOCKDB); ++ } ++ } ++ ++ return OK; ++} ++ + /* + * Command handler for the DAVLockDB directive, which is TAKE1 + */ +@@ -87,6 +109,8 @@ static const command_rec dav_fs_cmds[] = + + static void register_hooks(apr_pool_t *p) + { ++ ap_hook_post_config(dav_fs_post_config, NULL, NULL, APR_HOOK_MIDDLE); ++ + dav_hook_gather_propsets(dav_fs_gather_propsets, NULL, NULL, + APR_HOOK_MIDDLE); + dav_hook_find_liveprop(dav_fs_find_liveprop, NULL, NULL, APR_HOOK_MIDDLE); +diff --git a/modules/md/mod_md_config.c b/modules/md/mod_md_config.c +index 336a21b..4d50e26 100644 +--- a/modules/md/mod_md_config.c ++++ b/modules/md/mod_md_config.c +@@ -54,10 +54,18 @@ + + #define DEF_VAL (-1) + ++#ifndef MD_DEFAULT_BASE_DIR ++#define MD_DEFAULT_BASE_DIR "md" ++#endif ++ + /* Default settings for the global conf */ + static md_mod_conf_t defmc = { + NULL, +- "md", ++#if 1 ++ NULL, /* apply default state-dir-relative */ ++#else ++ MD_DEFAULT_BASE_DIR, ++#endif + NULL, + NULL, + 80, +@@ -864,6 +872,12 @@ apr_status_t md_config_post_config(server_rec *s, apr_pool_t *p) + if (mc->hsts_max_age > 0) { + mc->hsts_header = apr_psprintf(p, "max-age=%d", mc->hsts_max_age); + } ++ ++#if 1 ++ if (mc->base_dir == NULL) { ++ mc->base_dir = ap_state_dir_relative(p, MD_DEFAULT_BASE_DIR); ++ } ++#endif + + return APR_SUCCESS; + } +diff --git a/server/core.c b/server/core.c +index bbe52e0..b5ab429 100644 +--- a/server/core.c ++++ b/server/core.c +@@ -133,6 +133,8 @@ AP_DECLARE_DATA int ap_main_state = AP_SQ_MS_INITIAL_STARTUP; + AP_DECLARE_DATA int ap_run_mode = AP_SQ_RM_UNKNOWN; + AP_DECLARE_DATA int ap_config_generation = 0; + ++static const char *core_state_dir; ++ + static void *create_core_dir_config(apr_pool_t *a, char *dir) + { + core_dir_config *conf; +@@ -1411,12 +1413,15 @@ AP_DECLARE(const char *) ap_resolve_env(apr_pool_t *p, const char * word) + return res_buf; + } + +-static int reset_config_defines(void *dummy) ++/* pconf cleanup - clear global variables set from config here. */ ++static apr_status_t reset_config(void *dummy) + { + ap_server_config_defines = saved_server_config_defines; + saved_server_config_defines = NULL; + server_config_defined_vars = NULL; +- return OK; ++ core_state_dir = NULL; ++ ++ return APR_SUCCESS; + } + + /* +@@ -3108,6 +3113,24 @@ static const char *set_runtime_dir(cmd_parms *cmd, void *dummy, const char *arg) + return NULL; + } + ++static const char *set_state_dir(cmd_parms *cmd, void *dummy, const char *arg) ++{ ++ const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY); ++ ++ if (err != NULL) { ++ return err; ++ } ++ ++ if ((apr_filepath_merge((char**)&core_state_dir, NULL, ++ ap_server_root_relative(cmd->temp_pool, arg), ++ APR_FILEPATH_TRUENAME, cmd->pool) != APR_SUCCESS) ++ || !ap_is_directory(cmd->temp_pool, core_state_dir)) { ++ return "DefaultStateDir must be a valid directory, absolute or relative to ServerRoot"; ++ } ++ ++ return NULL; ++} ++ + static const char *set_timeout(cmd_parms *cmd, void *dummy, const char *arg) + { + const char *err = ap_check_cmd_context(cmd, NOT_IN_DIR_CONTEXT); +@@ -4409,6 +4432,8 @@ AP_INIT_TAKE1("ServerRoot", set_server_root, NULL, RSRC_CONF | EXEC_ON_READ, + "Common directory of server-related files (logs, confs, etc.)"), + AP_INIT_TAKE1("DefaultRuntimeDir", set_runtime_dir, NULL, RSRC_CONF | EXEC_ON_READ, + "Common directory for run-time files (shared memory, locks, etc.)"), ++AP_INIT_TAKE1("DefaultStateDir", set_state_dir, NULL, RSRC_CONF | EXEC_ON_READ, ++ "Common directory for persistent state (databases, long-lived caches, etc.)"), + AP_INIT_TAKE1("ErrorLog", set_server_string_slot, + (void *)APR_OFFSETOF(server_rec, error_fname), RSRC_CONF, + "The filename of the error log"), +@@ -4932,8 +4957,7 @@ static int core_pre_config(apr_pool_t *pconf, apr_pool_t *plog, apr_pool_t *ptem + + if (!saved_server_config_defines) + init_config_defines(pconf); +- apr_pool_cleanup_register(pconf, NULL, reset_config_defines, +- apr_pool_cleanup_null); ++ apr_pool_cleanup_register(pconf, NULL, reset_config, apr_pool_cleanup_null); + + ap_regcomp_set_default_cflags(AP_REG_DOLLAR_ENDONLY); + +@@ -5202,6 +5226,27 @@ AP_DECLARE(int) ap_state_query(int query) + } + } + ++AP_DECLARE(char *) ap_state_dir_relative(apr_pool_t *p, const char *file) ++{ ++ char *newpath = NULL; ++ apr_status_t rv; ++ const char *state_dir; ++ ++ state_dir = core_state_dir ++ ? core_state_dir ++ : ap_server_root_relative(p, DEFAULT_REL_STATEDIR); ++ ++ rv = apr_filepath_merge(&newpath, state_dir, file, APR_FILEPATH_TRUENAME, p); ++ if (newpath && (rv == APR_SUCCESS || APR_STATUS_IS_EPATHWILD(rv) ++ || APR_STATUS_IS_ENOENT(rv) ++ || APR_STATUS_IS_ENOTDIR(rv))) { ++ return newpath; ++ } ++ else { ++ return NULL; ++ } ++} ++ + static apr_random_t *rng = NULL; + #if APR_HAS_THREADS + static apr_thread_mutex_t *rng_mutex = NULL; diff --git a/SOURCES/httpd-2.4.37-r1851471.patch b/SOURCES/httpd-2.4.37-r1851471.patch new file mode 100644 index 0000000..f56c907 --- /dev/null +++ b/SOURCES/httpd-2.4.37-r1851471.patch @@ -0,0 +1,45 @@ +# ./pullrev.sh 1851471 +http://svn.apache.org/viewvc?view=revision&revision=1851471 + +--- httpd-2.4.37/modules/ssl/ssl_engine_io.c ++++ httpd-2.4.37/modules/ssl/ssl_engine_io.c +@@ -200,6 +200,8 @@ + apr_bucket *e; + int need_flush; + ++ BIO_clear_retry_flags(bio); ++ + /* Abort early if the client has initiated a renegotiation. */ + if (outctx->filter_ctx->config->reneg_state == RENEG_ABORT) { + outctx->rc = APR_ECONNABORTED; +@@ -206,12 +208,6 @@ + return -1; + } + +- /* when handshaking we'll have a small number of bytes. +- * max size SSL will pass us here is about 16k. +- * (16413 bytes to be exact) +- */ +- BIO_clear_retry_flags(bio); +- + /* Use a transient bucket for the output data - any downstream + * filter must setaside if necessary. */ + e = apr_bucket_transient_create(in, inl, outctx->bb->bucket_alloc); +@@ -458,6 +454,8 @@ + if (!in) + return 0; + ++ BIO_clear_retry_flags(bio); ++ + /* Abort early if the client has initiated a renegotiation. */ + if (inctx->filter_ctx->config->reneg_state == RENEG_ABORT) { + inctx->rc = APR_ECONNABORTED; +@@ -464,8 +462,6 @@ + return -1; + } + +- BIO_clear_retry_flags(bio); +- + if (!inctx->bb) { + inctx->rc = APR_EOF; + return -1; diff --git a/SOURCES/httpd-2.4.37-r1861793+.patch b/SOURCES/httpd-2.4.37-r1861793+.patch new file mode 100644 index 0000000..a74ece4 --- /dev/null +++ b/SOURCES/httpd-2.4.37-r1861793+.patch @@ -0,0 +1,209 @@ +diff --git a/configure.in b/configure.in +index de6a8ad..4ca489d 100644 +--- a/configure.in ++++ b/configure.in +@@ -465,6 +465,28 @@ LIBS="" + AC_SEARCH_LIBS(crypt, crypt) + CRYPT_LIBS="$LIBS" + APACHE_SUBST(CRYPT_LIBS) ++ ++if test "$ac_cv_search_crypt" != "no"; then ++ # Test crypt() with the SHA-512 test vector from https://akkadia.org/drepper/SHA-crypt.txt ++ AC_CACHE_CHECK([whether crypt() supports SHA-2], [ap_cv_crypt_sha2], [ ++ AC_RUN_IFELSE([AC_LANG_PROGRAM([[ ++#include ++#include ++#include ++ ++#define PASSWD_0 "Hello world!" ++#define SALT_0 "\$6\$saltstring" ++#define EXPECT_0 "\$6\$saltstring\$svn8UoSVapNtMuq1ukKS4tPQd8iKwSMHWjl/O817G3uBnIFNjnQJu" \ ++ "esI68u4OTLiBFdcbYEdFCoEOfaS35inz1" ++]], [char *result = crypt(PASSWD_0, SALT_0); ++ if (!result) return 1; ++ if (strcmp(result, EXPECT_0)) return 2; ++])], [ap_cv_crypt_sha2=yes], [ap_cv_crypt_sha2=no])]) ++ if test "$ap_cv_crypt_sha2" = yes; then ++ AC_DEFINE([HAVE_CRYPT_SHA2], 1, [Define if crypt() supports SHA-2 hashes]) ++ fi ++fi ++ + LIBS="$saved_LIBS" + + dnl See Comment #Spoon +diff --git a/support/htpasswd.c b/support/htpasswd.c +index 660a27c..136f62a 100644 +--- a/support/htpasswd.c ++++ b/support/htpasswd.c +@@ -98,28 +98,32 @@ static int mkrecord(struct passwd_ctx *ctx, char *user) + static void usage(void) + { + apr_file_printf(errfile, "Usage:" NL +- "\thtpasswd [-cimBdpsDv] [-C cost] passwordfile username" NL +- "\thtpasswd -b[cmBdpsDv] [-C cost] passwordfile username password" NL ++ "\thtpasswd [-cimB25dpsDv] [-C cost] [-r rounds] passwordfile username" NL ++ "\thtpasswd -b[cmB25dpsDv] [-C cost] [-r rounds] passwordfile username password" NL + NL +- "\thtpasswd -n[imBdps] [-C cost] username" NL +- "\thtpasswd -nb[mBdps] [-C cost] username password" NL ++ "\thtpasswd -n[imB25dps] [-C cost] [-r rounds] username" NL ++ "\thtpasswd -nb[mB25dps] [-C cost] [-r rounds] username password" NL + " -c Create a new file." NL + " -n Don't update file; display results on stdout." NL + " -b Use the password from the command line rather than prompting " + "for it." NL + " -i Read password from stdin without verification (for script usage)." NL + " -m Force MD5 encryption of the password (default)." NL +- " -B Force bcrypt encryption of the password (very secure)." NL ++ " -2 Force SHA-256 crypt() hash of the password (secure)." NL ++ " -5 Force SHA-512 crypt() hash of the password (secure)." NL ++ " -B Force bcrypt aencryption of the password (very secure)." NL + " -C Set the computing time used for the bcrypt algorithm" NL + " (higher is more secure but slower, default: %d, valid: 4 to 31)." NL ++ " -r Set the number of rounds used for the SHA-256, SHA-512 algorithms" NL ++ " (higher is more secure but slower, default: 5000)." NL + " -d Force CRYPT encryption of the password (8 chars max, insecure)." NL +- " -s Force SHA encryption of the password (insecure)." NL ++ " -s Force SHA-1 encryption of the password (insecure)." NL + " -p Do not encrypt the password (plaintext, insecure)." NL + " -D Delete the specified user." NL + " -v Verify password for the specified user." NL + "On other systems than Windows and NetWare the '-p' flag will " + "probably not work." NL +- "The SHA algorithm does not use a salt and is less secure than the " ++ "The SHA-1 algorithm does not use a salt and is less secure than the " + "MD5 algorithm." NL, + BCRYPT_DEFAULT_COST + ); +@@ -178,7 +182,7 @@ static void check_args(int argc, const char *const argv[], + if (rv != APR_SUCCESS) + exit(ERR_SYNTAX); + +- while ((rv = apr_getopt(state, "cnmspdBbDiC:v", &opt, &opt_arg)) == APR_SUCCESS) { ++ while ((rv = apr_getopt(state, "cnmspdBbDi25C:r:v", &opt, &opt_arg)) == APR_SUCCESS) { + switch (opt) { + case 'c': + *mask |= APHTP_NEWFILE; +diff --git a/support/passwd_common.c b/support/passwd_common.c +index 664e509..d45657c 100644 +--- a/support/passwd_common.c ++++ b/support/passwd_common.c +@@ -185,10 +185,15 @@ int mkhash(struct passwd_ctx *ctx) + #if CRYPT_ALGO_SUPPORTED + char *cbuf; + #endif ++#ifdef HAVE_CRYPT_SHA2 ++ const char *setting; ++ char method; ++#endif + +- if (ctx->cost != 0 && ctx->alg != ALG_BCRYPT) { ++ if (ctx->cost != 0 && ctx->alg != ALG_BCRYPT ++ && ctx->alg != ALG_CRYPT_SHA256 && ctx->alg != ALG_CRYPT_SHA512 ) { + apr_file_printf(errfile, +- "Warning: Ignoring -C argument for this algorithm." NL); ++ "Warning: Ignoring -C/-r argument for this algorithm." NL); + } + + if (ctx->passwd == NULL) { +@@ -246,6 +251,34 @@ int mkhash(struct passwd_ctx *ctx) + break; + #endif /* CRYPT_ALGO_SUPPORTED */ + ++#ifdef HAVE_CRYPT_SHA2 ++ case ALG_CRYPT_SHA256: ++ case ALG_CRYPT_SHA512: ++ ret = generate_salt(salt, 16, &ctx->errstr, ctx->pool); ++ if (ret != 0) ++ break; ++ ++ method = ctx->alg == ALG_CRYPT_SHA256 ? '5': '6'; ++ ++ if (ctx->cost) ++ setting = apr_psprintf(ctx->pool, "$%c$rounds=%d$%s", ++ method, ctx->cost, salt); ++ else ++ setting = apr_psprintf(ctx->pool, "$%c$%s", ++ method, salt); ++ ++ cbuf = crypt(pw, setting); ++ if (cbuf == NULL) { ++ rv = APR_FROM_OS_ERROR(errno); ++ ctx->errstr = apr_psprintf(ctx->pool, "crypt() failed: %pm", &rv); ++ ret = ERR_PWMISMATCH; ++ break; ++ } ++ ++ apr_cpystrn(ctx->out, cbuf, ctx->out_len - 1); ++ break; ++#endif /* HAVE_CRYPT_SHA2 */ ++ + #if BCRYPT_ALGO_SUPPORTED + case ALG_BCRYPT: + rv = apr_generate_random_bytes((unsigned char*)salt, 16); +@@ -294,6 +327,19 @@ int parse_common_options(struct passwd_ctx *ctx, char opt, + case 's': + ctx->alg = ALG_APSHA; + break; ++#ifdef HAVE_CRYPT_SHA2 ++ case '2': ++ ctx->alg = ALG_CRYPT_SHA256; ++ break; ++ case '5': ++ ctx->alg = ALG_CRYPT_SHA512; ++ break; ++#else ++ case '2': ++ case '5': ++ ctx->errstr = "SHA-2 crypt() algorithms are not supported on this platform."; ++ return ERR_ALG_NOT_SUPP; ++#endif + case 'p': + ctx->alg = ALG_PLAIN; + #if !PLAIN_ALGO_SUPPORTED +@@ -324,11 +370,12 @@ int parse_common_options(struct passwd_ctx *ctx, char opt, + return ERR_ALG_NOT_SUPP; + #endif + break; +- case 'C': { ++ case 'C': ++ case 'r': { + char *endptr; + long num = strtol(opt_arg, &endptr, 10); + if (*endptr != '\0' || num <= 0) { +- ctx->errstr = "argument to -C must be a positive integer"; ++ ctx->errstr = "argument to -C/-r must be a positive integer"; + return ERR_SYNTAX; + } + ctx->cost = num; +diff --git a/support/passwd_common.h b/support/passwd_common.h +index 660081e..f1b3cd7 100644 +--- a/support/passwd_common.h ++++ b/support/passwd_common.h +@@ -28,6 +28,8 @@ + #include "apu_version.h" + #endif + ++#include "ap_config_auto.h" ++ + #define MAX_STRING_LEN 256 + + #define ALG_PLAIN 0 +@@ -35,6 +37,8 @@ + #define ALG_APMD5 2 + #define ALG_APSHA 3 + #define ALG_BCRYPT 4 ++#define ALG_CRYPT_SHA256 5 ++#define ALG_CRYPT_SHA512 6 + + #define BCRYPT_DEFAULT_COST 5 + +@@ -84,7 +88,7 @@ struct passwd_ctx { + apr_size_t out_len; + char *passwd; + int alg; +- int cost; ++ int cost; /* cost for bcrypt, rounds for SHA-2 */ + enum { + PW_PROMPT = 0, + PW_ARG, diff --git a/SOURCES/httpd-2.4.37-r1870095+.patch b/SOURCES/httpd-2.4.37-r1870095+.patch new file mode 100644 index 0000000..bd43c5c --- /dev/null +++ b/SOURCES/httpd-2.4.37-r1870095+.patch @@ -0,0 +1,117 @@ +# ./pullrev.sh 1870095 1870097 +http://svn.apache.org/viewvc?view=revision&revision=1870095 +http://svn.apache.org/viewvc?view=revision&revision=1870097 + +--- httpd-2.4.37/modules/ssl/ssl_engine_kernel.c ++++ httpd-2.4.37/modules/ssl/ssl_engine_kernel.c +@@ -114,6 +114,45 @@ + return result; + } + ++/* If a renegotiation is required for the location, and the request ++ * includes a message body (and the client has not requested a "100 ++ * Continue" response), then the client will be streaming the request ++ * body over the wire already. In that case, it is not possible to ++ * stop and perform a new SSL handshake immediately; once the SSL ++ * library moves to the "accept" state, it will reject the SSL packets ++ * which the client is sending for the request body. ++ * ++ * To allow authentication to complete in the hook, the solution used ++ * here is to fill a (bounded) buffer with the request body, and then ++ * to reinject that request body later. ++ * ++ * This function is called to fill the renegotiation buffer for the ++ * location as required, or fail. Returns zero on success or HTTP_ ++ * error code on failure. ++ */ ++static int fill_reneg_buffer(request_rec *r, SSLDirConfigRec *dc) ++{ ++ int rv; ++ apr_size_t rsize; ++ ++ /* ### this is HTTP/1.1 specific, special case for protocol? */ ++ if (r->expecting_100 || !ap_request_has_body(r)) { ++ return 0; ++ } ++ ++ rsize = dc->nRenegBufferSize == UNSET ? DEFAULT_RENEG_BUFFER_SIZE : dc->nRenegBufferSize; ++ if (rsize > 0) { ++ /* Fill the I/O buffer with the request body if possible. */ ++ rv = ssl_io_buffer_fill(r, rsize); ++ } ++ else { ++ /* If the reneg buffer size is set to zero, just fail. */ ++ rv = HTTP_REQUEST_ENTITY_TOO_LARGE; ++ } ++ ++ return rv; ++} ++ + #ifdef HAVE_TLSEXT + static int ap_array_same_str_set(apr_array_header_t *s1, apr_array_header_t *s2) + { +@@ -814,41 +853,14 @@ + } + } + +- /* If a renegotiation is now required for this location, and the +- * request includes a message body (and the client has not +- * requested a "100 Continue" response), then the client will be +- * streaming the request body over the wire already. In that +- * case, it is not possible to stop and perform a new SSL +- * handshake immediately; once the SSL library moves to the +- * "accept" state, it will reject the SSL packets which the client +- * is sending for the request body. +- * +- * To allow authentication to complete in this auth hook, the +- * solution used here is to fill a (bounded) buffer with the +- * request body, and then to reinject that request body later. +- */ +- if (renegotiate && !renegotiate_quick +- && !r->expecting_100 +- && ap_request_has_body(r)) { +- int rv; +- apr_size_t rsize; +- +- rsize = dc->nRenegBufferSize == UNSET ? DEFAULT_RENEG_BUFFER_SIZE : +- dc->nRenegBufferSize; +- if (rsize > 0) { +- /* Fill the I/O buffer with the request body if possible. */ +- rv = ssl_io_buffer_fill(r, rsize); +- } +- else { +- /* If the reneg buffer size is set to zero, just fail. */ +- rv = HTTP_REQUEST_ENTITY_TOO_LARGE; +- } +- +- if (rv) { ++ /* Fill reneg buffer if required. */ ++ if (renegotiate && !renegotiate_quick) { ++ rc = fill_reneg_buffer(r, dc); ++ if (rc) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02257) + "could not buffer message body to allow " + "SSL renegotiation to proceed"); +- return rv; ++ return rc; + } + } + +@@ -1132,6 +1144,17 @@ + } + } + ++ /* Fill reneg buffer if required. */ ++ if (change_vmode) { ++ rc = fill_reneg_buffer(r, dc); ++ if (rc) { ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10228) ++ "could not buffer message body to allow " ++ "TLS Post-Handshake Authentication to proceed"); ++ return rc; ++ } ++ } ++ + if (change_vmode) { + char peekbuf[1]; + diff --git a/SOURCES/httpd-2.4.37-session-expiry-updt-int.patch b/SOURCES/httpd-2.4.37-session-expiry-updt-int.patch new file mode 100644 index 0000000..8c5b852 --- /dev/null +++ b/SOURCES/httpd-2.4.37-session-expiry-updt-int.patch @@ -0,0 +1,194 @@ +diff --git a/docs/manual/mod/mod_session.html.en b/docs/manual/mod/mod_session.html.en +index 6834f8e..9f8301f 100644 +--- a/docs/manual/mod/mod_session.html.en ++++ b/docs/manual/mod/mod_session.html.en +@@ -82,6 +82,7 @@ +
  • SessionHeader
  • +
  • SessionInclude
  • +
  • SessionMaxAge
  • ++
  • SessionExpiryUpdateInterval
  • + +

    Bugfix checklist

    See also

    +
    Description:Determines whether trailers are merged into headers
    ++ ++ ++ ++ ++ ++
    Description:Define the number of seconds a session's expiry may change without the session being updated
    Syntax:SessionExpiryUpdateInterval interval
    Default:SessionExpiryUpdateInterval 0 (always update)
    Context:server config, virtual host, directory, .htaccess
    Module:mod_session
    ++

    The SessionExpiryUpdateInterval directive allows ++ sessions to avoid the cost associated with writing the session each request ++ when only the expiry time has changed. This can be used to make a website ++ more efficient or reduce load on a database when using ++ mod_session_dbd. The session is always written if the data ++ stored in the session has changed or the expiry has changed by more than the ++ configured interval.

    ++ ++

    Setting the interval to zero disables this directive, and the session ++ expiry is refreshed for each request.

    ++ ++

    This directive only has an effect when combined with SessionMaxAge to enable session ++ expiry. Sessions without an expiry are only written when the data stored in ++ the session has changed.

    ++ ++

    Warning

    ++

    Because the session expiry may not be refreshed with each request, it's ++ possible for sessions to expire up to interval seconds early. ++ Using a small interval usually provides sufficient savings while having a ++ minimal effect on expiry resolution.

    ++ +
    + +
    +diff --git a/modules/session/mod_session.c b/modules/session/mod_session.c +index d517020..10e6396 100644 +--- a/modules/session/mod_session.c ++++ b/modules/session/mod_session.c +@@ -177,6 +177,7 @@ static apr_status_t ap_session_save(request_rec * r, session_rec * z) + { + if (z) { + apr_time_t now = apr_time_now(); ++ apr_time_t initialExpiry = z->expiry; + int rv = 0; + + session_dir_conf *dconf = ap_get_module_config(r->per_dir_config, +@@ -207,6 +208,17 @@ static apr_status_t ap_session_save(request_rec * r, session_rec * z) + z->expiry = now + z->maxage * APR_USEC_PER_SEC; + } + ++ /* don't save if the only change is the expiry by a small amount */ ++ if (!z->dirty && dconf->expiry_update_time ++ && (z->expiry - initialExpiry < dconf->expiry_update_time)) { ++ return APR_SUCCESS; ++ } ++ ++ /* also don't save sessions that didn't change at all */ ++ if (!z->dirty && !z->maxage) { ++ return APR_SUCCESS; ++ } ++ + /* encode the session */ + rv = ap_run_session_encode(r, z); + if (OK != rv) { +@@ -553,6 +565,10 @@ static void *merge_session_dir_config(apr_pool_t * p, void *basev, void *addv) + new->env_set = add->env_set || base->env_set; + new->includes = apr_array_append(p, base->includes, add->includes); + new->excludes = apr_array_append(p, base->excludes, add->excludes); ++ new->expiry_update_time = (add->expiry_update_set == 0) ++ ? base->expiry_update_time ++ : add->expiry_update_time; ++ new->expiry_update_set = add->expiry_update_set || base->expiry_update_set; + + return new; + } +@@ -622,6 +638,21 @@ static const char *add_session_exclude(cmd_parms * cmd, void *dconf, const char + return NULL; + } + ++static const char * ++ set_session_expiry_update(cmd_parms * parms, void *dconf, const char *arg) ++{ ++ session_dir_conf *conf = dconf; ++ ++ conf->expiry_update_time = atoi(arg); ++ if (conf->expiry_update_time < 0) { ++ return "SessionExpiryUpdateInterval must be positive or nul"; ++ } ++ conf->expiry_update_time = apr_time_from_sec(conf->expiry_update_time); ++ conf->expiry_update_set = 1; ++ ++ return NULL; ++} ++ + + static const command_rec session_cmds[] = + { +@@ -637,6 +668,9 @@ static const command_rec session_cmds[] = + "URL prefixes to include in the session. Defaults to all URLs"), + AP_INIT_TAKE1("SessionExclude", add_session_exclude, NULL, RSRC_CONF|OR_AUTHCFG, + "URL prefixes to exclude from the session. Defaults to no URLs"), ++ AP_INIT_TAKE1("SessionExpiryUpdateInterval", set_session_expiry_update, NULL, RSRC_CONF|OR_AUTHCFG, ++ "time interval for which a session's expiry time may change " ++ "without having to be rewritten. Zero to disable"), + {NULL} + }; + +diff --git a/modules/session/mod_session.h b/modules/session/mod_session.h +index a6dd5e9..bdeb532 100644 +--- a/modules/session/mod_session.h ++++ b/modules/session/mod_session.h +@@ -115,6 +115,9 @@ typedef struct { + * URLs included if empty */ + apr_array_header_t *excludes; /* URL prefixes to be excluded. No + * URLs excluded if empty */ ++ apr_time_t expiry_update_time; /* seconds the session expiry may change and ++ * not have to be rewritten */ ++ int expiry_update_set; + } session_dir_conf; + + /** +diff --git a/modules/session/mod_session_cookie.c b/modules/session/mod_session_cookie.c +index 6a02322..4aa75e4 100644 +--- a/modules/session/mod_session_cookie.c ++++ b/modules/session/mod_session_cookie.c +@@ -60,9 +60,6 @@ static apr_status_t session_cookie_save(request_rec * r, session_rec * z) + session_cookie_dir_conf *conf = ap_get_module_config(r->per_dir_config, + &session_cookie_module); + +- /* don't cache auth protected pages */ +- apr_table_addn(r->headers_out, "Cache-Control", "no-cache"); +- + /* create RFC2109 compliant cookie */ + if (conf->name_set) { + if (z->encoded && z->encoded[0]) { +@@ -162,6 +159,9 @@ static apr_status_t session_cookie_load(request_rec * r, session_rec ** z) + /* put the session in the notes so we don't have to parse it again */ + apr_table_setn(m->notes, note, (char *)zz); + ++ /* don't cache auth protected pages */ ++ apr_table_addn(r->headers_out, "Cache-Control", "no-cache, private"); ++ + return OK; + + } +diff --git a/modules/session/mod_session_dbd.c b/modules/session/mod_session_dbd.c +index 0be7306..f683da2 100644 +--- a/modules/session/mod_session_dbd.c ++++ b/modules/session/mod_session_dbd.c +@@ -245,6 +245,9 @@ static apr_status_t session_dbd_load(request_rec * r, session_rec ** z) + /* put the session in the notes so we don't have to parse it again */ + apr_table_setn(m->notes, note, (char *)zz); + ++ /* don't cache pages with a session */ ++ apr_table_addn(r->headers_out, "Cache-Control", "no-cache, private"); ++ + return OK; + + } +@@ -409,9 +412,6 @@ static apr_status_t session_dbd_save(request_rec * r, session_rec * z) + if (conf->name_set || conf->name2_set) { + char *oldkey = NULL, *newkey = NULL; + +- /* don't cache pages with a session */ +- apr_table_addn(r->headers_out, "Cache-Control", "no-cache"); +- + /* if the session is new or changed, make a new session ID */ + if (z->uuid) { + oldkey = apr_pcalloc(r->pool, APR_UUID_FORMATTED_LENGTH + 1); +@@ -458,7 +458,7 @@ static apr_status_t session_dbd_save(request_rec * r, session_rec * z) + else if (conf->peruser) { + + /* don't cache pages with a session */ +- apr_table_addn(r->headers_out, "Cache-Control", "no-cache"); ++ apr_table_addn(r->headers_out, "Cache-Control", "no-cache, private"); + + if (r->user) { + ret = dbd_save(r, r->user, r->user, z->encoded, z->expiry); diff --git a/SOURCES/httpd-2.4.37-sslkeylogfile-support.patch b/SOURCES/httpd-2.4.37-sslkeylogfile-support.patch new file mode 100644 index 0000000..9d4cc19 --- /dev/null +++ b/SOURCES/httpd-2.4.37-sslkeylogfile-support.patch @@ -0,0 +1,123 @@ +diff --git a/modules/ssl/ssl_engine_config.c b/modules/ssl/ssl_engine_config.c +index 1d201d9..0c4bf1f 100644 +--- a/modules/ssl/ssl_engine_config.c ++++ b/modules/ssl/ssl_engine_config.c +@@ -75,6 +75,10 @@ SSLModConfigRec *ssl_config_global_create(server_rec *s) + mc->stapling_refresh_mutex = NULL; + #endif + ++#ifdef HAVE_OPENSSL_KEYLOG ++ mc->keylog_file = NULL; ++#endif ++ + apr_pool_userdata_set(mc, SSL_MOD_CONFIG_KEY, + apr_pool_cleanup_null, + pool); +diff --git a/modules/ssl/ssl_engine_init.c b/modules/ssl/ssl_engine_init.c +index ef631c1..b286053 100644 +--- a/modules/ssl/ssl_engine_init.c ++++ b/modules/ssl/ssl_engine_init.c +@@ -437,6 +437,28 @@ apr_status_t ssl_init_Module(apr_pool_t *p, apr_pool_t *plog, + init_bio_methods(); + #endif + ++#ifdef HAVE_OPENSSL_KEYLOG ++ { ++ const char *logfn = getenv("SSLKEYLOGFILE"); ++ ++ if (logfn) { ++ rv = apr_file_open(&mc->keylog_file, logfn, ++ APR_FOPEN_CREATE|APR_FOPEN_WRITE|APR_FOPEN_APPEND|APR_FOPEN_LARGEFILE, ++ APR_FPROT_UREAD|APR_FPROT_UWRITE, ++ mc->pPool); ++ if (rv) { ++ ap_log_error(APLOG_MARK, APLOG_NOTICE, rv, s, APLOGNO(10226) ++ "Could not open log file '%s' configured via SSLKEYLOGFILE", ++ logfn); ++ return rv; ++ } ++ ++ ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, s, APLOGNO(10227) ++ "Init: Logging SSL private key material to %s", logfn); ++ } ++ } ++#endif ++ + return OK; + } + +@@ -796,6 +818,12 @@ static apr_status_t ssl_init_ctx_protocol(server_rec *s, + * https://github.com/openssl/openssl/issues/7178 */ + SSL_CTX_clear_mode(ctx, SSL_MODE_AUTO_RETRY); + #endif ++ ++#ifdef HAVE_OPENSSL_KEYLOG ++ if (mctx->sc->mc->keylog_file) { ++ SSL_CTX_set_keylog_callback(ctx, modssl_callback_keylog); ++ } ++#endif + + return APR_SUCCESS; + } +diff --git a/modules/ssl/ssl_engine_kernel.c b/modules/ssl/ssl_engine_kernel.c +index 6611610..7058865 100644 +--- a/modules/ssl/ssl_engine_kernel.c ++++ b/modules/ssl/ssl_engine_kernel.c +@@ -2719,3 +2719,17 @@ int ssl_callback_SRPServerParams(SSL *ssl, int *ad, void *arg) + } + + #endif /* HAVE_SRP */ ++ ++ ++#ifdef HAVE_OPENSSL_KEYLOG ++/* Callback used with SSL_CTX_set_keylog_callback. */ ++void modssl_callback_keylog(const SSL *ssl, const char *line) ++{ ++ conn_rec *conn = SSL_get_app_data(ssl); ++ SSLSrvConfigRec *sc = mySrvConfig(conn->base_server); ++ ++ if (sc && sc->mc->keylog_file) { ++ apr_file_printf(sc->mc->keylog_file, "%s\n", line); ++ } ++} ++#endif +diff --git a/modules/ssl/ssl_private.h b/modules/ssl/ssl_private.h +index 0fac5d1..2514407 100644 +--- a/modules/ssl/ssl_private.h ++++ b/modules/ssl/ssl_private.h +@@ -250,6 +250,10 @@ void free_bio_methods(void); + #endif + #endif + ++#if OPENSSL_VERSION_NUMBER >= 0x10101000L && !defined(LIBRESSL_VERSION_NUMBER) ++#define HAVE_OPENSSL_KEYLOG ++#endif ++ + /* mod_ssl headers */ + #include "ssl_util_ssl.h" + +@@ -617,6 +621,12 @@ typedef struct { + apr_global_mutex_t *stapling_cache_mutex; + apr_global_mutex_t *stapling_refresh_mutex; + #endif ++ ++#ifdef HAVE_OPENSSL_KEYLOG ++ /* Used for logging if SSLKEYLOGFILE is set at startup. */ ++ apr_file_t *keylog_file; ++#endif ++ + } SSLModConfigRec; + + /** Structure representing configured filenames for certs and keys for +@@ -970,6 +980,11 @@ int ssl_stapling_init_cert(server_rec *, apr_pool_t *, apr_pool_t *, + int ssl_callback_SRPServerParams(SSL *, int *, void *); + #endif + ++#ifdef HAVE_OPENSSL_KEYLOG ++/* Callback used with SSL_CTX_set_keylog_callback. */ ++void modssl_callback_keylog(const SSL *ssl, const char *line); ++#endif ++ + /** I/O */ + void ssl_io_filter_init(conn_rec *, request_rec *r, SSL *); + void ssl_io_filter_register(apr_pool_t *); diff --git a/SOURCES/httpd-2.4.37-sslprotdefault.patch b/SOURCES/httpd-2.4.37-sslprotdefault.patch new file mode 100644 index 0000000..546fa1f --- /dev/null +++ b/SOURCES/httpd-2.4.37-sslprotdefault.patch @@ -0,0 +1,98 @@ +diff --git a/modules/ssl/ssl_engine_config.c b/modules/ssl/ssl_engine_config.c +index 55c237e..5467d23 100644 +--- a/modules/ssl/ssl_engine_config.c ++++ b/modules/ssl/ssl_engine_config.c +@@ -119,7 +119,7 @@ static void modssl_ctx_init(modssl_ctx_t *mctx, apr_pool_t *p) + mctx->ticket_key = NULL; + #endif + +- mctx->protocol = SSL_PROTOCOL_DEFAULT; ++ mctx->protocol = SSL_PROTOCOL_NONE; + mctx->protocol_set = 0; + + mctx->pphrase_dialog_type = SSL_PPTYPE_UNSET; +@@ -262,6 +262,7 @@ static void modssl_ctx_cfg_merge(apr_pool_t *p, + { + if (add->protocol_set) { + mrg->protocol = add->protocol; ++ mrg->protocol_set = 1; + } + else { + mrg->protocol = base->protocol; +diff --git a/modules/ssl/ssl_engine_init.c b/modules/ssl/ssl_engine_init.c +index e3f62fe..31fc0e6 100644 +--- a/modules/ssl/ssl_engine_init.c ++++ b/modules/ssl/ssl_engine_init.c +@@ -568,6 +568,7 @@ static apr_status_t ssl_init_ctx_protocol(server_rec *s, + MODSSL_SSL_METHOD_CONST SSL_METHOD *method = NULL; + char *cp; + int protocol = mctx->protocol; ++ int protocol_set = mctx->protocol_set; + SSLSrvConfigRec *sc = mySrvConfig(s); + #if OPENSSL_VERSION_NUMBER >= 0x10100000L + int prot; +@@ -577,12 +578,18 @@ static apr_status_t ssl_init_ctx_protocol(server_rec *s, + * Create the new per-server SSL context + */ + if (protocol == SSL_PROTOCOL_NONE) { +- ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(02231) +- "No SSL protocols available [hint: SSLProtocol]"); +- return ssl_die(s); +- } ++ if (protocol_set) { ++ ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(02231) ++ "No SSL protocols available [hint: SSLProtocol]"); ++ return ssl_die(s); ++ } + +- cp = apr_pstrcat(p, ++ ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, s, ++ "Using OpenSSL/system default SSL/TLS protocols"); ++ cp = "default"; ++ } ++ else { ++ cp = apr_pstrcat(p, + #ifndef OPENSSL_NO_SSL3 + (protocol & SSL_PROTOCOL_SSLV3 ? "SSLv3, " : ""), + #endif +@@ -595,7 +602,8 @@ static apr_status_t ssl_init_ctx_protocol(server_rec *s, + #endif + #endif + NULL); +- cp[strlen(cp)-2] = NUL; ++ cp[strlen(cp)-2] = NUL; ++ } + + ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, s, + "Creating new SSL context (protocols: %s)", cp); +@@ -696,13 +704,15 @@ static apr_status_t ssl_init_ctx_protocol(server_rec *s, + prot = SSL3_VERSION; + #endif + } else { +- SSL_CTX_free(ctx); +- mctx->ssl_ctx = NULL; +- ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(03378) +- "No SSL protocols available [hint: SSLProtocol]"); +- return ssl_die(s); ++ if (protocol_set) { ++ SSL_CTX_free(ctx); ++ mctx->ssl_ctx = NULL; ++ ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(03378) ++ "No SSL protocols available [hint: SSLProtocol]"); ++ return ssl_die(s); ++ } + } +- SSL_CTX_set_max_proto_version(ctx, prot); ++ if (protocol != SSL_PROTOCOL_NONE) SSL_CTX_set_max_proto_version(ctx, prot); + + /* Next we scan for the minimal protocol version we should provide, + * but we do not allow holes between max and min */ +@@ -726,7 +736,7 @@ static apr_status_t ssl_init_ctx_protocol(server_rec *s, + prot = SSL3_VERSION; + } + #endif +- SSL_CTX_set_min_proto_version(ctx, prot); ++ if (protocol != SSL_PROTOCOL_NONE) SSL_CTX_set_min_proto_version(ctx, prot); + #endif /* if OPENSSL_VERSION_NUMBER < 0x10100000L */ + + #ifdef SSL_OP_CIPHER_SERVER_PREFERENCE diff --git a/SOURCES/httpd-init.service b/SOURCES/httpd-init.service new file mode 100644 index 0000000..3074778 --- /dev/null +++ b/SOURCES/httpd-init.service @@ -0,0 +1,12 @@ +[Unit] +Description=One-time temporary TLS key generation for httpd.service +Documentation=man:httpd-init.service(8) + +ConditionPathExists=|!/etc/pki/tls/certs/localhost.crt +ConditionPathExists=|!/etc/pki/tls/private/localhost.key + +[Service] +Type=oneshot +RemainAfterExit=no + +ExecStart=/usr/libexec/httpd-ssl-gencerts diff --git a/SOURCES/httpd-ssl-gencerts b/SOURCES/httpd-ssl-gencerts new file mode 100755 index 0000000..350f5b5 --- /dev/null +++ b/SOURCES/httpd-ssl-gencerts @@ -0,0 +1,39 @@ +#!/usr/bin/bash + +set -e + +FQDN=`hostname` +ssldotconf=/etc/httpd/conf.d/ssl.conf + +if test -f /etc/pki/tls/certs/localhost.crt -a \ + -f /etc/pki/tls/private/localhost.key; then + exit 0 +fi + +if test -f /etc/pki/tls/certs/localhost.crt -a \ + ! -f /etc/pki/tls/private/localhost.key; then + echo "Missing certificate key!" + exit 1 +fi + +if test ! -f /etc/pki/tls/certs/localhost.crt -a \ + -f /etc/pki/tls/private/localhost.key; then + echo "Missing certificate, but key is present!" + exit 1 +fi + +if ! test -f ${ssldotconf} || \ + ! grep -q '^SSLCertificateFile /etc/pki/tls/certs/localhost.crt' ${ssldotconf} || \ + ! grep -q '^SSLCertificateKeyFile /etc/pki/tls/private/localhost.key' ${ssldotconf}; then + # Non-default configuration, do nothing. + exit 0 +fi + +sscg -q \ + --cert-file /etc/pki/tls/certs/localhost.crt \ + --cert-key-file /etc/pki/tls/private/localhost.key \ + --ca-file /etc/pki/tls/certs/localhost.crt \ + --lifetime 365 \ + --hostname $FQDN \ + --email root@$FQDN + diff --git a/SOURCES/httpd-ssl-pass-dialog b/SOURCES/httpd-ssl-pass-dialog new file mode 100755 index 0000000..79318a6 --- /dev/null +++ b/SOURCES/httpd-ssl-pass-dialog @@ -0,0 +1,3 @@ +#!/bin/sh + +exec /bin/systemd-ask-password "Enter TLS private key passphrase for $1 ($2) : " diff --git a/SOURCES/httpd.conf b/SOURCES/httpd.conf new file mode 100644 index 0000000..6ab68cb --- /dev/null +++ b/SOURCES/httpd.conf @@ -0,0 +1,356 @@ +# +# This is the main Apache HTTP server configuration file. It contains the +# configuration directives that give the server its instructions. +# See for detailed information. +# In particular, see +# +# for a discussion of each configuration directive. +# +# See the httpd.conf(5) man page for more information on this configuration, +# and httpd.service(8) on using and configuring the httpd service. +# +# Do NOT simply read the instructions in here without understanding +# what they do. They're here only as hints or reminders. If you are unsure +# consult the online docs. You have been warned. +# +# Configuration and logfile names: If the filenames you specify for many +# of the server's control files begin with "/" (or "drive:/" for Win32), the +# server will use that explicit path. If the filenames do *not* begin +# with "/", the value of ServerRoot is prepended -- so 'log/access_log' +# with ServerRoot set to '/www' will be interpreted by the +# server as '/www/log/access_log', where as '/log/access_log' will be +# interpreted as '/log/access_log'. + +# +# ServerRoot: The top of the directory tree under which the server's +# configuration, error, and log files are kept. +# +# Do not add a slash at the end of the directory path. If you point +# ServerRoot at a non-local disk, be sure to specify a local disk on the +# Mutex directive, if file-based mutexes are used. If you wish to share the +# same ServerRoot for multiple httpd daemons, you will need to change at +# least PidFile. +# +ServerRoot "/etc/httpd" + +# +# Listen: Allows you to bind Apache to specific IP addresses and/or +# ports, instead of the default. See also the +# directive. +# +# Change this to Listen on specific IP addresses as shown below to +# prevent Apache from glomming onto all bound IP addresses. +# +#Listen 12.34.56.78:80 +Listen 80 + +# +# Dynamic Shared Object (DSO) Support +# +# To be able to use the functionality of a module which was built as a DSO you +# have to place corresponding `LoadModule' lines at this location so the +# directives contained in it are actually available _before_ they are used. +# Statically compiled modules (those listed by `httpd -l') do not need +# to be loaded here. +# +# Example: +# LoadModule foo_module modules/mod_foo.so +# +Include conf.modules.d/*.conf + +# +# If you wish httpd to run as a different user or group, you must run +# httpd as root initially and it will switch. +# +# User/Group: The name (or #number) of the user/group to run httpd as. +# It is usually good practice to create a dedicated user and group for +# running httpd, as with most system services. +# +User apache +Group apache + +# 'Main' server configuration +# +# The directives in this section set up the values used by the 'main' +# server, which responds to any requests that aren't handled by a +# definition. These values also provide defaults for +# any containers you may define later in the file. +# +# All of these directives may appear inside containers, +# in which case these default settings will be overridden for the +# virtual host being defined. +# + +# +# ServerAdmin: Your address, where problems with the server should be +# e-mailed. This address appears on some server-generated pages, such +# as error documents. e.g. admin@your-domain.com +# +ServerAdmin root@localhost + +# +# ServerName gives the name and port that the server uses to identify itself. +# This can often be determined automatically, but we recommend you specify +# it explicitly to prevent problems during startup. +# +# If your host doesn't have a registered DNS name, enter its IP address here. +# +#ServerName www.example.com:80 + +# +# Deny access to the entirety of your server's filesystem. You must +# explicitly permit access to web content directories in other +# blocks below. +# + + AllowOverride none + Require all denied + + +# +# Note that from this point forward you must specifically allow +# particular features to be enabled - so if something's not working as +# you might expect, make sure that you have specifically enabled it +# below. +# + +# +# DocumentRoot: The directory out of which you will serve your +# documents. By default, all requests are taken from this directory, but +# symbolic links and aliases may be used to point to other locations. +# +DocumentRoot "/var/www/html" + +# +# Relax access to content within /var/www. +# + + AllowOverride None + # Allow open access: + Require all granted + + +# Further relax access to the default document root: + + # + # Possible values for the Options directive are "None", "All", + # or any combination of: + # Indexes Includes FollowSymLinks SymLinksifOwnerMatch ExecCGI MultiViews + # + # Note that "MultiViews" must be named *explicitly* --- "Options All" + # doesn't give it to you. + # + # The Options directive is both complicated and important. Please see + # http://httpd.apache.org/docs/2.4/mod/core.html#options + # for more information. + # + Options Indexes FollowSymLinks + + # + # AllowOverride controls what directives may be placed in .htaccess files. + # It can be "All", "None", or any combination of the keywords: + # Options FileInfo AuthConfig Limit + # + AllowOverride None + + # + # Controls who can get stuff from this server. + # + Require all granted + + +# +# DirectoryIndex: sets the file that Apache will serve if a directory +# is requested. +# + + DirectoryIndex index.html + + +# +# The following lines prevent .htaccess and .htpasswd files from being +# viewed by Web clients. +# + + Require all denied + + +# +# ErrorLog: The location of the error log file. +# If you do not specify an ErrorLog directive within a +# container, error messages relating to that virtual host will be +# logged here. If you *do* define an error logfile for a +# container, that host's errors will be logged there and not here. +# +ErrorLog "logs/error_log" + +# +# LogLevel: Control the number of messages logged to the error_log. +# Possible values include: debug, info, notice, warn, error, crit, +# alert, emerg. +# +LogLevel warn + + + # + # The following directives define some format nicknames for use with + # a CustomLog directive (see below). + # + LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined + LogFormat "%h %l %u %t \"%r\" %>s %b" common + + + # You need to enable mod_logio.c to use %I and %O + LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %I %O" combinedio + + + # + # The location and format of the access logfile (Common Logfile Format). + # If you do not define any access logfiles within a + # container, they will be logged here. Contrariwise, if you *do* + # define per- access logfiles, transactions will be + # logged therein and *not* in this file. + # + #CustomLog "logs/access_log" common + + # + # If you prefer a logfile with access, agent, and referer information + # (Combined Logfile Format) you can use the following directive. + # + CustomLog "logs/access_log" combined + + + + # + # Redirect: Allows you to tell clients about documents that used to + # exist in your server's namespace, but do not anymore. The client + # will make a new request for the document at its new location. + # Example: + # Redirect permanent /foo http://www.example.com/bar + + # + # Alias: Maps web paths into filesystem paths and is used to + # access content that does not live under the DocumentRoot. + # Example: + # Alias /webpath /full/filesystem/path + # + # If you include a trailing / on /webpath then the server will + # require it to be present in the URL. You will also likely + # need to provide a section to allow access to + # the filesystem path. + + # + # ScriptAlias: This controls which directories contain server scripts. + # ScriptAliases are essentially the same as Aliases, except that + # documents in the target directory are treated as applications and + # run by the server when requested rather than as documents sent to the + # client. The same rules about trailing "/" apply to ScriptAlias + # directives as to Alias. + # + ScriptAlias /cgi-bin/ "/var/www/cgi-bin/" + + + +# +# "/var/www/cgi-bin" should be changed to whatever your ScriptAliased +# CGI directory exists, if you have that configured. +# + + AllowOverride None + Options None + Require all granted + + + + # + # TypesConfig points to the file containing the list of mappings from + # filename extension to MIME-type. + # + TypesConfig /etc/mime.types + + # + # AddType allows you to add to or override the MIME configuration + # file specified in TypesConfig for specific file types. + # + #AddType application/x-gzip .tgz + # + # AddEncoding allows you to have certain browsers uncompress + # information on the fly. Note: Not all browsers support this. + # + #AddEncoding x-compress .Z + #AddEncoding x-gzip .gz .tgz + # + # If the AddEncoding directives above are commented-out, then you + # probably should define those extensions to indicate media types: + # + AddType application/x-compress .Z + AddType application/x-gzip .gz .tgz + + # + # AddHandler allows you to map certain file extensions to "handlers": + # actions unrelated to filetype. These can be either built into the server + # or added with the Action directive (see below) + # + # To use CGI scripts outside of ScriptAliased directories: + # (You will also need to add "ExecCGI" to the "Options" directive.) + # + #AddHandler cgi-script .cgi + + # For type maps (negotiated resources): + #AddHandler type-map var + + # + # Filters allow you to process content before it is sent to the client. + # + # To parse .shtml files for server-side includes (SSI): + # (You will also need to add "Includes" to the "Options" directive.) + # + AddType text/html .shtml + AddOutputFilter INCLUDES .shtml + + +# +# Specify a default charset for all content served; this enables +# interpretation of all content as UTF-8 by default. To use the +# default browser choice (ISO-8859-1), or to allow the META tags +# in HTML content to override this choice, comment out this +# directive: +# +AddDefaultCharset UTF-8 + + + # + # The mod_mime_magic module allows the server to use various hints from the + # contents of the file itself to determine its type. The MIMEMagicFile + # directive tells the module where the hint definitions are located. + # + MIMEMagicFile conf/magic + + +# +# Customizable error responses come in three flavors: +# 1) plain text 2) local redirects 3) external redirects +# +# Some examples: +#ErrorDocument 500 "The server made a boo boo." +#ErrorDocument 404 /missing.html +#ErrorDocument 404 "/cgi-bin/missing_handler.pl" +#ErrorDocument 402 http://www.example.com/subscription_info.html +# + +# +# EnableMMAP and EnableSendfile: On systems that support it, +# memory-mapping or the sendfile syscall may be used to deliver +# files. This usually improves server performance, but must +# be turned off when serving from networked-mounted +# filesystems or if support for these functions is otherwise +# broken on your system. +# Defaults if commented: EnableMMAP On, EnableSendfile Off +# +#EnableMMAP off +EnableSendfile on + +# Supplemental configuration +# +# Load config files in the "/etc/httpd/conf.d" directory, if any. +IncludeOptional conf.d/*.conf diff --git a/SOURCES/httpd.conf.xml b/SOURCES/httpd.conf.xml new file mode 100644 index 0000000..705e527 --- /dev/null +++ b/SOURCES/httpd.conf.xml @@ -0,0 +1,259 @@ + + + + + + + httpd.conf + httpd + AuthorOrtonJoejorton@redhat.com + + + + httpd.conf + 5 + + + + httpd.conf + Configuration files for httpd + + + + + /etc/httpd/conf/httpd.conf, + /etc/httpd/conf.modules.d, + /etc/httpd/conf.d + + + + + Description + + The main configuration file for the httpd daemon is + /etc/httpd/conf/httpd.conf. The syntax of + this file is described at , and + the full set of available directives is listed at . + + + + Configuration structure + + The main configuration file + (httpd.conf) sets up various defaults and + includes configuration files from two directories - + /etc/httpd/conf.modules.d and + /etc/httpd/conf.d. Packages containing + loadable modules (like ) place files + in the conf.modules.d directory with the + appropriate directive so that module + is loaded by default. + + Some notable configured defaults are:. + + + + + The default document root from which content + is served. + + + + The daemon lists on TCP port 80. + + + + Error messages are logged to + @LOGDIR@/error_log. + + + + CGI scripts are served via the URL-path . + + + + + To remove any of the default configuration provided in + separate files covered below, replace that file with an empty + file rather than removing it from the filesystem, otherwise it + may be restored to the original when the package which provides + it is upgraded. + + + + + MPM configuration + + The configuration file at + /etc/httpd/conf.modules.d/00-mpm.conf is + used to select the multi-processing module (MPM), which governs + how httpd divides work between processes + and/or threads at run-time. Exactly one + directive must be uncommented in + this file; by default the MPM is enabled. + For more information on MPMs, see . + + If using the prefork MPM, the + "httpd_graceful_shutdown" SELinux boolean should also be + enabled, since with this MPM, httpd needs to establish TCP + connections to local ports to successfully complete a graceful + restart or shutdown. This boolean can be enabled by running the + command: semanage boolean -m --on + httpd_graceful_shutdown + + + + Module configuration files + + Module configuration files are provided in the + /etc/httpd/conf.modules.d/ directory. Filenames + in this directory are by convention prefixed with two digit numeric + prefix to ensure they are processed in the desired order. Core + modules provide with the httpd package are + loaded by files with a prefix to ensure + these are loaded first. Only filenames with a + suffix in this directory will be + processed. + + Other provided configuration files are listed below. + + + + /etc/httpd/conf.modules.d/00-base.conf + The set of core modules included with + httpd which are all loaded by + default. + + + + /etc/httpd/conf.modules.d/00-optional.conf + The set of non-core modules included with + httpd which are not + loaded by default. + + + + + /etc/httpd/conf.modules.d/00-systemd.conf + This file loads + which is necessary for the correct operation of the + httpd.service service, and should not be + removed or disabled. + + + + + + + Other configuration files + + Default module configuration files and site-specific + configuration files are loaded from the + /etc/httpd/conf.d/ directory. Only files + with a suffix will be loaded. The + following files are provided: + + + + /etc/httpd/conf.d/userdir.conf + This file gives an example configuration for + to map URLs such as + to + /home/jim/public_html/. Userdir mapping + is disabled by default. + + + + /etc/httpd/conf.d/autoindex.conf + This file provides the default configuration + for which generates HTML + directory listings when enabled. It also makes file icon + image files available at the + URL-path. + + + + /etc/httpd/conf.d/welcome.conf + This file enables a "welcome page" at + if no content is present + in the default documentation root + /var/www/html. + + + + /etc/httpd/conf.d/ssl.conf (present only if is installed) + This file configures a TLS + listening on port + . If the default configuration is used, + the referenced test certificate and private key are + generated the first time httpd.service is + started; see + httpd-init.service8 + for more information. + + + + + + + Instantiated services + + As an alternative to (or in addition to) the + httpd.service unit, the instantiated template + service httpd@.service unit file can be used, + which starts httpd using a different + configuration file to the default. For example, + systemctl start httpd@foobar.service will + start httpd using the configuration file + /etc/httpd/conf/foobar.conf. See httpd@.service8 for more information. + + + + + Files + + + /etc/httpd/conf/httpd.conf, + /etc/httpd/conf.d, + /etc/httpd/conf.modules.d + + + + + See also + + + httpd8, + httpd.service8, + , + + + + + + + diff --git a/SOURCES/httpd.logrotate b/SOURCES/httpd.logrotate new file mode 100644 index 0000000..28c9730 --- /dev/null +++ b/SOURCES/httpd.logrotate @@ -0,0 +1,9 @@ +/var/log/httpd/*log { + missingok + notifempty + sharedscripts + delaycompress + postrotate + /bin/systemctl reload httpd.service > /dev/null 2>/dev/null || true + endscript +} diff --git a/SOURCES/httpd.service b/SOURCES/httpd.service new file mode 100644 index 0000000..6ff4e8b --- /dev/null +++ b/SOURCES/httpd.service @@ -0,0 +1,32 @@ +# See httpd.service(8) for more information on using the httpd service. + +# Modifying this file in-place is not recommended, because changes +# will be overwritten during package upgrades. To customize the +# behaviour, run "systemctl edit httpd" to create an override unit. + +# For example, to pass additional options (such as -D definitions) to +# the httpd binary at startup, create an override unit (as is done by +# systemctl edit) and enter the following: + +# [Service] +# Environment=OPTIONS=-DMY_DEFINE + +[Unit] +Description=The Apache HTTP Server +Wants=httpd-init.service +After=network.target remote-fs.target nss-lookup.target httpd-init.service +Documentation=man:httpd.service(8) + +[Service] +Type=notify +Environment=LANG=C + +ExecStart=/usr/sbin/httpd $OPTIONS -DFOREGROUND +ExecReload=/usr/sbin/httpd $OPTIONS -k graceful +# Send SIGWINCH for graceful stop +KillSignal=SIGWINCH +KillMode=mixed +PrivateTmp=true + +[Install] +WantedBy=multi-user.target diff --git a/SOURCES/httpd.service.xml b/SOURCES/httpd.service.xml new file mode 100644 index 0000000..b2c72dd --- /dev/null +++ b/SOURCES/httpd.service.xml @@ -0,0 +1,332 @@ + + + + + + + httpd systemd units + httpd + AuthorOrtonJoejorton@redhat.com + + + + httpd.service + 8 + + + + httpd.service + httpd@.service + httpd.socket + httpd-init.service + httpd unit files for systemd + + + + + /usr/lib/systemd/system/httpd.service, + /usr/lib/systemd/system/httpd@.service, + /usr/lib/systemd/system/httpd-init.service, + /usr/lib/systemd/system/httpd.socket + + + + + Description + + This manual page describes the systemd + unit files used to integrate the httpd daemon + with systemd. Two main unit files are + available: httpd.service allows the + httpd daemon to be run as a system service, and + httpd.socket allows httpd to be started via + socket-based activation. Most systems will use + httpd.service. + + The apachectl command has been modified + to invoke systemctl for most uses, so for + example, running apachectl start is equivalent + to running systemctl start httpd.service. This + ensures that the running httpd daemon is tracked and managed by + systemd. In contrast, running + httpd directly from a root shell will start the + service outside of systemd; in this case, + default security restrictions described below (including, but not + limited to, SELinux) will not be enforced. + + + Changing default behaviour + + To change the default behaviour of the httpd service, an + over-ride file should be created, rather + than changing + /usr/lib/systemd/system/httpd.service + directly, since such changes would be lost over package + upgrades. Running systemctl edit + httpd.service or systemctl edit + httpd.socket as root will create a drop-in file (in + the former case, in + /etc/systemd/system/httpd.service.d) which + over-rides the system defaults. + + For example, to set the + environment variable for the daemon, run systemctl edit + httpd.service and enter: + + [Service] +Environment=LD_LIBRARY_PATH=/opt/vendor/lib + + + + Starting the service at boot time + + The httpd.service and httpd.socket units are + disabled by default. To start the httpd + service at boot time, run: systemctl enable + httpd.service. In the default configuration, the + httpd daemon will accept connections on port 80 (and, if mod_ssl + is installed, TLS connections on port 443) for any configured + IPv4 or IPv6 address. + + If httpd is configured to depend on any specific IP + address (for example, with a "Listen" directive) which may only + become available during start-up, or if httpd depends on other + services (such as a database daemon), the service + must be configured to ensure correct + start-up ordering. + + For example, to ensure httpd is only running after all + configured network interfaces are configured, create a drop-in + file (as described above) with the following section: + + [Unit] +After=network-online.target +Wants=network-online.target + + See + for more information on start-up ordering with systemd. + + + + + SSL/TLS certificate generation + + The httpd-init.service unit is provided + with the mod_ssl package. This oneshot unit automatically + creates a TLS server certificate and key (using a generated + self-signed CA certificate and key) for testing purposes before + httpd is started. To inhibit certificate generation, use + systemctl mask httpd-init.service after + installing mod_ssl, and adjust the mod_ssl configuration to use + an appropriate certificate and key. + + + + + Reloading and stopping the service + + When running systemctl reload + httpd.service, a graceful + restart is used, which sends a signal to the httpd parent + process to reload the configuration and re-open log files. Any + children with open connections at the time of reload will + terminate only once they have completed serving requests. This + prevents users of the server seeing errors (or potentially + losing data) due to the reload, but means some there is some + delay before any configuration changes take effect for all + users. + + Similarly, a graceful stop is used + when systemctl stop httpd.service is run, + which terminates the server only once active connections have + been processed. + + To "ungracefully" stop the server without waiting for + requests to complete, use systemctl kill + --kill-who=main httpd; similarly to "ungracefully" + reload the configuration, use systemctl kill + --kill-who=main --signal=HUP httpd. + + + + Automated service restarts + + System packages (including the httpd package itself) may + restart the httpd service automatically after packages are + upgraded, installed, or removed. This is done using the + systemctl reload httpd.service, which + produces a graceful restart by default as + described above. + + To suppress automatic reloads entirely, create the file + /etc/sysconfig/httpd-disable-posttrans. + + + + Changing the default MPM (Multi-Processing Module) + + httpd offers a choice of multi-processing modules (MPMs), + which can be configured in + /etc/httpd/conf.modules.d/00-mpm.conf. + See + httpd.conf5 + for more information on changing the MPM. + + + + systemd integration and mod_systemd + + The httpd service uses the systemd + service type. The mod_systemd module must be + loaded (as in the default configuration) for this to work + correctly - the service will fail if this module is not + loaded. mod_systemd also makes worker and + request statistics available when running systemctl status + httpd. See + systemd.exec5 + for more information on systemd service types. + + + + Security and SELinux + + The default SELinux policy restricts the httpd service in + various ways. For example, the default policy limits the ports + to which httpd can bind (using the Listen + directive), which parts of the filesystem can be accessed, and + whether outgoing TCP connections are possible. Many of these + restrictions can be relaxed or adjusted by using + semanage to change booleans or other + types. See + httpd_selinux8 + for more information. + + The httpd service enables PrivateTmp + by default. The /tmp and + /var/tmp directories available within the + httpd process (and CGI scripts, etc) are not shared by other + processes. See + systemd.exec5 + for more information. + + + + + Socket activation + + Socket activation (see + systemd.socket5 + for more information) can be used with httpd + by enabling the httpd.socket unit. The + httpd listener configuration must exactly + match the ListenStream options configured for + the httpd.socket unit. The default + httpd.socket has a + ListenStream=80 and, if mod_ssl is installed, + ListenStream=443 by a drop-in file. If + additional Listen directives are added to the + httpd configuration, corresponding + ListenStream options should be added via + drop-in files, for example via systemctl edit + httpd.socket. + + If using socket activation with httpd, only one listener + on any given TCP port is supported; a configuration with both + "Listen 127.0.0.1:80" and "Listen + 192.168.1.2:80" will not work. + + + + Instantiated services + + The httpd@.service unit is an + instantiated template service. An instance of this unit will be + started using the configuration file + /etc/httpd/conf/INSTANCE.conf, where + INSTANCE is replaced with the instance + name. For example, systemctl start + httpd@foobar.service will start httpd using the + configuration file + /etc/httpd/conf/foobar.conf. The + environment variable is set to + the instance name by the unit and is available for use within + the configuration file. + + To allow multiple instances of httpd to run + simultaneously, a number of configuration directives must be + changed, such as PidFile and + DefaultRuntimeDir to pick non-conflicting + paths, and Listen to choose different ports. + The example configuration file + /usr/share/doc/httpd/instance.conf + demonstrates how to make such changes using + variable. + + It can be useful to configure instances of + httpd@.service to reload when + httpd.service is reloaded; for example, + logrotate will reload only + httpd.service when logs are rotated. If this + behaviour is required, create a drop-in file for the instance as + follows: + + [Unit] +ReloadPropagatedFrom=httpd.service + + As with normal units, drop-in files for instances can be created + using systemctl edit, e.g. systemctl edit + httpd@foobar.service. + + + + + + Files + + /usr/lib/systemd/system/httpd.service, + /usr/lib/systemd/system/httpd.socket, + /usr/lib/systemd/system/httpd@.service, + /etc/systemd/systemd/httpd.service.d + + + + See also + + + httpd8, + httpd.conf5, + systemd1, + systemctl1, + systemd.service5, + systemd.exec5, + systemd.socket5, + httpd_selinux8, + semanage8 + + + + + + diff --git a/SOURCES/httpd.socket b/SOURCES/httpd.socket new file mode 100644 index 0000000..074695e --- /dev/null +++ b/SOURCES/httpd.socket @@ -0,0 +1,13 @@ +# See httpd.socket(8) for more information on using the httpd service. + +[Unit] +Description=Apache httpd Server Socket +Documentation=man:httpd.socket(8) + +[Socket] +ListenStream=80 +NoDelay=true +DeferAcceptSec=30 + +[Install] +WantedBy=sockets.target diff --git a/SOURCES/httpd.tmpfiles b/SOURCES/httpd.tmpfiles new file mode 100644 index 0000000..f148886 --- /dev/null +++ b/SOURCES/httpd.tmpfiles @@ -0,0 +1,2 @@ +d /run/httpd 710 root apache +d /run/httpd/htcacheclean 700 apache apache diff --git a/SOURCES/httpd@.service b/SOURCES/httpd@.service new file mode 100644 index 0000000..c58ae88 --- /dev/null +++ b/SOURCES/httpd@.service @@ -0,0 +1,23 @@ +# This is a template for httpd instances. +# See httpd@.service(8) for more information. + +[Unit] +Description=The Apache HTTP Server +After=network.target remote-fs.target nss-lookup.target +Documentation=man:httpd@.service(8) + +[Service] +Type=notify +Environment=LANG=C +Environment=HTTPD_INSTANCE=%i +ExecStartPre=/bin/mkdir -m 710 -p /run/httpd/instance-%i +ExecStartPre=/bin/chown root.apache /run/httpd/instance-%i +ExecStart=/usr/sbin/httpd $OPTIONS -DFOREGROUND -f conf/%i.conf +ExecReload=/usr/sbin/httpd $OPTIONS -k graceful -f conf/%i.conf +# Send SIGWINCH for graceful stop +KillSignal=SIGWINCH +KillMode=mixed +PrivateTmp=true + +[Install] +WantedBy=multi-user.target diff --git a/SOURCES/index.html b/SOURCES/index.html new file mode 100644 index 0000000..06ad3fc --- /dev/null +++ b/SOURCES/index.html @@ -0,0 +1,123 @@ + + + + + Test Page for the Apache HTTP Server on Red Hat Enterprise Linux + + + + + +

    Red Hat Enterprise Linux Test Page

    + +
    +
    +

    This page is used to test the proper operation of the Apache HTTP server after it has been installed. If you can read this page, it means that the Apache HTTP server installed at this site is working properly.

    +
    +
    + +
    +
    +

    If you are a member of the general public:

    + +

    The fact that you are seeing this page indicates that the website you just visited is either experiencing problems, or is undergoing routine maintenance.

    + +

    If you would like to let the administrators of this website know that you've seen this page instead of the page you expected, you should send them e-mail. In general, mail sent to the name "webmaster" and directed to the website's domain should reach the appropriate person.

    + +

    For example, if you experienced problems while visiting www.example.com, you should send e-mail to "webmaster@example.com".

    + +

    For information on Red Hat Enterprise Linux, please visit the Red Hat, Inc. website. The documentation for Red Hat Enterprise Linux is available on the Red Hat, Inc. website.

    +
    +
    + +
    +

    If you are the website administrator:

    + +

    You may now add content to the directory /var/www/html/. Note that until you do so, people visiting your website will see this page, and not your content. To prevent this page from ever being used, follow the instructions in the file /etc/httpd/conf.d/welcome.conf.

    + +

    You are free to use the image below on web sites powered by the Apache HTTP Server:

    + +

    [ Powered by Apache ]

    + +
    +
    +
    + + diff --git a/SOURCES/instance.conf b/SOURCES/instance.conf new file mode 100644 index 0000000..f2b03f7 --- /dev/null +++ b/SOURCES/instance.conf @@ -0,0 +1,23 @@ +# +# This is an example instance-specific configuration file. See the +# httpd.service(8) man page for detailed information on using the +# the httpd@.service with instances. +# +# To use this example, copy instance.conf to /etc/httpd/conf/foobar.conf +# This config will then used as the default configuration when +# running: +# +# # systemctl start httpd@foobar.service +# +# The changes compared to the default are: +# - DefaultRuntime and Pidfile renamed to be instance-specific +# - default logfile names are prefixed with the instance name +# - /etc/httpd/conf.d is NOT included by default (conf.modules.d still is) +# +# Further customisations will be required for an instance to run +# simultaneously to httpd.service under the default configuration, +# e.g. changing the port used with Listen. +# + +DefaultRuntimeDir /run/httpd/instance-${HTTPD_INSTANCE} +PidFile /run/httpd/instance-${HTTPD_INSTANCE}.pid diff --git a/SOURCES/manual.conf b/SOURCES/manual.conf new file mode 100644 index 0000000..133652b --- /dev/null +++ b/SOURCES/manual.conf @@ -0,0 +1,13 @@ +# +# This configuration file allows the manual to be accessed at +# http://localhost/manual/ +# +Alias /manual /usr/share/httpd/manual + + + Options Indexes + AllowOverride None + Require all granted + + RedirectMatch 301 ^/manual/(?:da|de|en|es|fr|ja|ko|pt-br|ru|tr|zh-cn)(/.*)$ "/manual$1" + diff --git a/SOURCES/ssl.conf b/SOURCES/ssl.conf new file mode 100644 index 0000000..d28adf3 --- /dev/null +++ b/SOURCES/ssl.conf @@ -0,0 +1,203 @@ +# +# When we also provide SSL we have to listen to the +# standard HTTPS port in addition. +# +Listen 443 https + +## +## SSL Global Context +## +## All SSL configuration in this context applies both to +## the main server and all SSL-enabled virtual hosts. +## + +# Pass Phrase Dialog: +# Configure the pass phrase gathering process. +# The filtering dialog program (`builtin' is a internal +# terminal dialog) has to provide the pass phrase on stdout. +SSLPassPhraseDialog exec:/usr/libexec/httpd-ssl-pass-dialog + +# Inter-Process Session Cache: +# Configure the SSL Session Cache: First the mechanism +# to use and second the expiring timeout (in seconds). +SSLSessionCache shmcb:/run/httpd/sslcache(512000) +SSLSessionCacheTimeout 300 + +# +# Use "SSLCryptoDevice" to enable any supported hardware +# accelerators. Use "openssl engine -v" to list supported +# engine names. NOTE: If you enable an accelerator and the +# server does not start, consult the error logs and ensure +# your accelerator is functioning properly. +# +SSLCryptoDevice builtin +#SSLCryptoDevice ubsec + +## +## SSL Virtual Host Context +## + + + +# General setup for the virtual host, inherited from global configuration +#DocumentRoot "/var/www/html" +#ServerName www.example.com:443 + +# Use separate log files for the SSL virtual host; note that LogLevel +# is not inherited from httpd.conf. +ErrorLog logs/ssl_error_log +TransferLog logs/ssl_access_log +LogLevel warn + +# SSL Engine Switch: +# Enable/Disable SSL for this virtual host. +SSLEngine on + +# List the protocol versions which clients are allowed to connect with. +# The OpenSSL system profile is used by default. See +# update-crypto-policies(8) for more details. +#SSLProtocol all -SSLv3 +#SSLProxyProtocol all -SSLv3 + +# User agents such as web browsers are not configured for the user's +# own preference of either security or performance, therefore this +# must be the prerogative of the web server administrator who manages +# cpu load versus confidentiality, so enforce the server's cipher order. +SSLHonorCipherOrder on + +# SSL Cipher Suite: +# List the ciphers that the client is permitted to negotiate. +# See the mod_ssl documentation for a complete list. +# The OpenSSL system profile is configured by default. See +# update-crypto-policies(8) for more details. +SSLCipherSuite PROFILE=SYSTEM +SSLProxyCipherSuite PROFILE=SYSTEM + +# Point SSLCertificateFile at a PEM encoded certificate. If +# the certificate is encrypted, then you will be prompted for a +# pass phrase. Note that restarting httpd will prompt again. Keep +# in mind that if you have both an RSA and a DSA certificate you +# can configure both in parallel (to also allow the use of DSA +# ciphers, etc.) +# Some ECC cipher suites (http://www.ietf.org/rfc/rfc4492.txt) +# require an ECC certificate which can also be configured in +# parallel. +SSLCertificateFile /etc/pki/tls/certs/localhost.crt + +# Server Private Key: +# If the key is not combined with the certificate, use this +# directive to point at the key file. Keep in mind that if +# you've both a RSA and a DSA private key you can configure +# both in parallel (to also allow the use of DSA ciphers, etc.) +# ECC keys, when in use, can also be configured in parallel +SSLCertificateKeyFile /etc/pki/tls/private/localhost.key + +# Server Certificate Chain: +# Point SSLCertificateChainFile at a file containing the +# concatenation of PEM encoded CA certificates which form the +# certificate chain for the server certificate. Alternatively +# the referenced file can be the same as SSLCertificateFile +# when the CA certificates are directly appended to the server +# certificate for convenience. +#SSLCertificateChainFile /etc/pki/tls/certs/server-chain.crt + +# Certificate Authority (CA): +# Set the CA certificate verification path where to find CA +# certificates for client authentication or alternatively one +# huge file containing all of them (file must be PEM encoded) +#SSLCACertificateFile /etc/pki/tls/certs/ca-bundle.crt + +# Client Authentication (Type): +# Client certificate verification type and depth. Types are +# none, optional, require and optional_no_ca. Depth is a +# number which specifies how deeply to verify the certificate +# issuer chain before deciding the certificate is not valid. +#SSLVerifyClient require +#SSLVerifyDepth 10 + +# Access Control: +# With SSLRequire you can do per-directory access control based +# on arbitrary complex boolean expressions containing server +# variable checks and other lookup directives. The syntax is a +# mixture between C and Perl. See the mod_ssl documentation +# for more details. +# +#SSLRequire ( %{SSL_CIPHER} !~ m/^(EXP|NULL)/ \ +# and %{SSL_CLIENT_S_DN_O} eq "Snake Oil, Ltd." \ +# and %{SSL_CLIENT_S_DN_OU} in {"Staff", "CA", "Dev"} \ +# and %{TIME_WDAY} >= 1 and %{TIME_WDAY} <= 5 \ +# and %{TIME_HOUR} >= 8 and %{TIME_HOUR} <= 20 ) \ +# or %{REMOTE_ADDR} =~ m/^192\.76\.162\.[0-9]+$/ +# + +# SSL Engine Options: +# Set various options for the SSL engine. +# o FakeBasicAuth: +# Translate the client X.509 into a Basic Authorisation. This means that +# the standard Auth/DBMAuth methods can be used for access control. The +# user name is the `one line' version of the client's X.509 certificate. +# Note that no password is obtained from the user. Every entry in the user +# file needs this password: `xxj31ZMTZzkVA'. +# o ExportCertData: +# This exports two additional environment variables: SSL_CLIENT_CERT and +# SSL_SERVER_CERT. These contain the PEM-encoded certificates of the +# server (always existing) and the client (only existing when client +# authentication is used). This can be used to import the certificates +# into CGI scripts. +# o StdEnvVars: +# This exports the standard SSL/TLS related `SSL_*' environment variables. +# Per default this exportation is switched off for performance reasons, +# because the extraction step is an expensive operation and is usually +# useless for serving static content. So one usually enables the +# exportation for CGI and SSI requests only. +# o StrictRequire: +# This denies access when "SSLRequireSSL" or "SSLRequire" applied even +# under a "Satisfy any" situation, i.e. when it applies access is denied +# and no other module can change it. +# o OptRenegotiate: +# This enables optimized SSL connection renegotiation handling when SSL +# directives are used in per-directory context. +#SSLOptions +FakeBasicAuth +ExportCertData +StrictRequire + + SSLOptions +StdEnvVars + + + SSLOptions +StdEnvVars + + +# SSL Protocol Adjustments: +# The safe and default but still SSL/TLS standard compliant shutdown +# approach is that mod_ssl sends the close notify alert but doesn't wait for +# the close notify alert from client. When you need a different shutdown +# approach you can use one of the following variables: +# o ssl-unclean-shutdown: +# This forces an unclean shutdown when the connection is closed, i.e. no +# SSL close notify alert is sent or allowed to be received. This violates +# the SSL/TLS standard but is needed for some brain-dead browsers. Use +# this when you receive I/O errors because of the standard approach where +# mod_ssl sends the close notify alert. +# o ssl-accurate-shutdown: +# This forces an accurate shutdown when the connection is closed, i.e. a +# SSL close notify alert is sent and mod_ssl waits for the close notify +# alert of the client. This is 100% SSL/TLS standard compliant, but in +# practice often causes hanging connections with brain-dead browsers. Use +# this only for browsers where you know that their SSL implementation +# works correctly. +# Notice: Most problems of broken clients are also related to the HTTP +# keep-alive facility, so you usually additionally want to disable +# keep-alive for those clients, too. Use variable "nokeepalive" for this. +# Similarly, one has to force some clients to use HTTP/1.0 to workaround +# their broken HTTP/1.1 implementation. Use variables "downgrade-1.0" and +# "force-response-1.0" for this. +BrowserMatch "MSIE [2-5]" \ + nokeepalive ssl-unclean-shutdown \ + downgrade-1.0 force-response-1.0 + +# Per-Server Logging: +# The home of a custom SSL log file. Use this when you want a +# compact non-error SSL logfile on a virtual host basis. +CustomLog logs/ssl_request_log \ + "%t %h %{SSL_PROTOCOL}x %{SSL_CIPHER}x \"%r\" %b" + + + diff --git a/SOURCES/userdir.conf b/SOURCES/userdir.conf new file mode 100644 index 0000000..b5d7a49 --- /dev/null +++ b/SOURCES/userdir.conf @@ -0,0 +1,36 @@ +# +# UserDir: The name of the directory that is appended onto a user's home +# directory if a ~user request is received. +# +# The path to the end user account 'public_html' directory must be +# accessible to the webserver userid. This usually means that ~userid +# must have permissions of 711, ~userid/public_html must have permissions +# of 755, and documents contained therein must be world-readable. +# Otherwise, the client will only receive a "403 Forbidden" message. +# + + # + # UserDir is disabled by default since it can confirm the presence + # of a username on the system (depending on home directory + # permissions). + # + UserDir disabled + + # + # To enable requests to /~user/ to serve the user's public_html + # directory, remove the "UserDir disabled" line above, and uncomment + # the following line instead: + # + #UserDir public_html + + +# +# Control access to UserDir directories. The following is an example +# for a site where these directories are restricted to read-only. +# + + AllowOverride FileInfo AuthConfig Limit Indexes + Options MultiViews Indexes SymLinksIfOwnerMatch IncludesNoExec + Require method GET POST OPTIONS + + diff --git a/SOURCES/welcome.conf b/SOURCES/welcome.conf new file mode 100644 index 0000000..5d1e452 --- /dev/null +++ b/SOURCES/welcome.conf @@ -0,0 +1,18 @@ +# +# This configuration file enables the default "Welcome" page if there +# is no default index page present for the root URL. To disable the +# Welcome page, comment out all the lines below. +# +# NOTE: if this file is removed, it will be restored on upgrades. +# + + Options -Indexes + ErrorDocument 403 /.noindex.html + + + + AllowOverride None + Require all granted + + +Alias /.noindex.html /usr/share/httpd/noindex/index.html diff --git a/SPECS/httpd.spec b/SPECS/httpd.spec new file mode 100644 index 0000000..d07a4b1 --- /dev/null +++ b/SPECS/httpd.spec @@ -0,0 +1,1628 @@ +%define contentdir %{_datadir}/httpd +%define docroot /var/www +%define suexec_caller apache +%define mmn 20120211 +%define mmnisa %{mmn}%{__isa_name}%{__isa_bits} +%define vstring %(source /etc/os-release; echo ${REDHAT_SUPPORT_PRODUCT}) +%if 0%{?fedora} > 26 || 0%{?rhel} > 7 +%global mpm event +%else +%global mpm prefork +%endif + +Summary: Apache HTTP Server +Name: httpd +Version: 2.4.37 +Release: 30%{?dist} +URL: https://httpd.apache.org/ +Source0: https://www.apache.org/dist/httpd/httpd-%{version}.tar.bz2 +Source1: index.html +Source2: httpd.logrotate +Source3: instance.conf +Source4: httpd-ssl-pass-dialog +Source5: httpd.tmpfiles +Source6: httpd.service +Source7: action-graceful.sh +Source8: action-configtest.sh +Source10: httpd.conf +Source11: 00-base.conf +Source12: 00-mpm.conf +Source13: 00-lua.conf +Source14: 01-cgi.conf +Source15: 00-dav.conf +Source16: 00-proxy.conf +Source17: 00-ssl.conf +Source18: 01-ldap.conf +Source19: 00-proxyhtml.conf +Source20: userdir.conf +Source21: ssl.conf +Source22: welcome.conf +Source23: manual.conf +Source24: 00-systemd.conf +Source25: 01-session.conf +Source26: 10-listen443.conf +Source27: httpd.socket +Source28: 00-optional.conf +# Documentation +Source30: README.confd +Source31: README.confmod +Source32: httpd.service.xml +Source33: htcacheclean.service.xml +Source34: httpd.conf.xml +Source40: htcacheclean.service +Source41: htcacheclean.sysconf +Source42: httpd-init.service +Source43: httpd-ssl-gencerts +Source44: httpd@.service +Source45: config.layout + +# build/scripts patches +# http://bugzilla.redhat.com/show_bug.cgi?id=1231924 +# http://bugzilla.redhat.com/show_bug.cgi?id=842736 +# http://bugzilla.redhat.com/show_bug.cgi?id=1214401 +Patch1: httpd-2.4.35-apachectl.patch +Patch2: httpd-2.4.28-apxs.patch +Patch3: httpd-2.4.35-deplibs.patch + +# Needed for socket activation and mod_systemd patch +Patch19: httpd-2.4.35-detect-systemd.patch + +# Features/functional changes +Patch20: httpd-2.4.32-export.patch +Patch21: httpd-2.4.35-corelimit.patch +Patch22: httpd-2.4.35-selinux.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1170215 +Patch23: httpd-2.4.28-icons.patch +Patch24: httpd-2.4.35-systemd.patch +Patch25: httpd-2.4.35-cachehardmax.patch +Patch26: httpd-2.4.28-socket-activation.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1109119 +Patch27: httpd-2.4.35-sslciphdefault.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1332242 +Patch28: httpd-2.4.28-statements-comment.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=811714 +Patch29: httpd-2.4.35-full-release.patch +Patch30: httpd-2.4.35-freebind.patch +Patch31: httpd-2.4.35-r1830819+.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1638738 +Patch32: httpd-2.4.37-sslprotdefault.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1747898 +Patch33: httpd-2.4.37-mod-md-mod-ssl-hooks.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1725031 +Patch34: httpd-2.4.37-r1861793+.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1704317 +Patch35: httpd-2.4.37-sslkeylogfile-support.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1794728 +Patch36: httpd-2.4.37-session-expiry-updt-int.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1209162 +Patch37: httpd-2.4.37-logjournal.patch +# Bug fixes +# https://bugzilla.redhat.com/show_bug.cgi?id=1397243 +Patch61: httpd-2.4.35-r1738878.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1170206 +Patch62: httpd-2.4.35-r1633085.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1448892 +Patch63: httpd-2.4.28-r1811831.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1602548 +Patch65: httpd-2.4.35-r1842888.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1653009 +# https://bugzilla.redhat.com/show_bug.cgi?id=1672977 +# https://bugzilla.redhat.com/show_bug.cgi?id=1673022 +Patch66: httpd-2.4.37-r1842929+.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1630432 +Patch67: httpd-2.4.35-r1825120.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1670716 +Patch68: httpd-2.4.37-fips-segfault.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1669221 +Patch70: httpd-2.4.37-r1840554.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1673022 +Patch71: httpd-2.4.37-mod-md-perms.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1724549 +Patch72: httpd-2.4.37-mod-mime-magic-strdup.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1724034 +Patch73: httpd-2.4.35-ocsp-wrong-ctx.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1633224 +Patch74: httpd-2.4.37-r1828172+.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1775158 +Patch75: httpd-2.4.37-r1870095+.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1771847 +Patch76: httpd-2.4.37-proxy-continue.patch +Patch77: httpd-2.4.37-balancer-failover.patch + + +# Security fixes +Patch200: httpd-2.4.37-r1851471.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1694980 +Patch201: httpd-2.4.37-CVE-2019-0211.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1695025 +Patch202: httpd-2.4.37-CVE-2019-0215.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1696141 +Patch203: httpd-2.4.37-CVE-2019-0217.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1696097 +Patch204: httpd-2.4.37-CVE-2019-0220.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1741860 +# https://bugzilla.redhat.com/show_bug.cgi?id=1741864 +# https://bugzilla.redhat.com/show_bug.cgi?id=1741868 +Patch205: httpd-2.4.34-CVE-2019-9511-and-9516-and-9517.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1823259 +# https://bugzilla.redhat.com/show_bug.cgi?id=1747284 +# fixes both CVE-2020-1927 and CVE-2019-10098 +Patch206: httpd-2.4.37-CVE-2019-10098.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1747281 +Patch207: httpd-2.4.37-CVE-2019-10092.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1747291 +Patch208: httpd-2.4.37-CVE-2019-10097.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1820772 +Patch209: httpd-2.4.37-CVE-2020-1934.patch + +License: ASL 2.0 +Group: System Environment/Daemons +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root +BuildRequires: autoconf, perl-interpreter, perl-generators, pkgconfig, findutils, xmlto +BuildRequires: zlib-devel, libselinux-devel, lua-devel, brotli-devel +BuildRequires: apr-devel >= 1.5.0, apr-util-devel >= 1.5.0, pcre-devel >= 5.0 +BuildRequires: systemd-devel +Requires: /etc/mime.types, system-logos-httpd +Obsoletes: httpd-suexec +Provides: webserver +Provides: mod_dav = %{version}-%{release}, httpd-suexec = %{version}-%{release} +Provides: httpd-mmn = %{mmn}, httpd-mmn = %{mmnisa} +Requires: httpd-tools = %{version}-%{release} +Requires: httpd-filesystem = %{version}-%{release} +Requires: mod_http2 +Requires(pre): httpd-filesystem +Requires(preun): systemd-units +Requires(postun): systemd-units +Requires(post): systemd-units +Conflicts: apr < 1.5.0-1 + +%description +The Apache HTTP Server is a powerful, efficient, and extensible +web server. + +%package devel +Group: Development/Libraries +Summary: Development interfaces for the Apache HTTP server +Requires: apr-devel, apr-util-devel, pkgconfig +Requires: httpd = %{version}-%{release} + +%description devel +The httpd-devel package contains the APXS binary and other files +that you need to build Dynamic Shared Objects (DSOs) for the +Apache HTTP Server. + +If you are installing the Apache HTTP server and you want to be +able to compile or develop additional modules for Apache, you need +to install this package. + +%package manual +Group: Documentation +Summary: Documentation for the Apache HTTP server +Requires: httpd = %{version}-%{release} +Obsoletes: secureweb-manual, apache-manual +BuildArch: noarch + +%description manual +The httpd-manual package contains the complete manual and +reference guide for the Apache HTTP server. The information can +also be found at http://httpd.apache.org/docs/2.2/. + +%package filesystem +Group: System Environment/Daemons +Summary: The basic directory layout for the Apache HTTP server +BuildArch: noarch +Requires(pre): /usr/sbin/useradd + +%description filesystem +The httpd-filesystem package contains the basic directory layout +for the Apache HTTP server including the correct permissions +for the directories. + +%package tools +Group: System Environment/Daemons +Summary: Tools for use with the Apache HTTP Server + +%description tools +The httpd-tools package contains tools which can be used with +the Apache HTTP Server. + +%package -n mod_ssl +Group: System Environment/Daemons +Summary: SSL/TLS module for the Apache HTTP Server +Epoch: 1 +BuildRequires: openssl-devel +Requires(pre): httpd-filesystem +Requires: httpd = 0:%{version}-%{release}, httpd-mmn = %{mmnisa} +Requires: sscg >= 2.2.0 +Obsoletes: stronghold-mod_ssl +# Require an OpenSSL which supports PROFILE=SYSTEM +Conflicts: openssl-libs < 1:1.0.1h-4 + +%description -n mod_ssl +The mod_ssl module provides strong cryptography for the Apache Web +server via the Secure Sockets Layer (SSL) and Transport Layer +Security (TLS) protocols. + +%package -n mod_proxy_html +Group: System Environment/Daemons +Summary: HTML and XML content filters for the Apache HTTP Server +Requires: httpd = 0:%{version}-%{release}, httpd-mmn = %{mmnisa} +BuildRequires: libxml2-devel +Epoch: 1 +Obsoletes: mod_proxy_html < 1:2.4.1-2 + +%description -n mod_proxy_html +The mod_proxy_html and mod_xml2enc modules provide filters which can +transform and modify HTML and XML content. + +%package -n mod_ldap +Group: System Environment/Daemons +Summary: LDAP authentication modules for the Apache HTTP Server +Requires: httpd = 0:%{version}-%{release}, httpd-mmn = %{mmnisa} +Requires: apr-util-ldap + +%description -n mod_ldap +The mod_ldap and mod_authnz_ldap modules add support for LDAP +authentication to the Apache HTTP Server. + +%package -n mod_session +Group: System Environment/Daemons +Summary: Session interface for the Apache HTTP Server +Requires: httpd = 0:%{version}-%{release}, httpd-mmn = %{mmnisa} + +%description -n mod_session +The mod_session module and associated backends provide an abstract +interface for storing and accessing per-user session data. + +%prep +%setup -q +%patch1 -p1 -b .apctl +%patch2 -p1 -b .apxs +%patch3 -p1 -b .deplibs + +%patch19 -p1 -b .detectsystemd +%patch20 -p1 -b .export +%patch21 -p1 -b .corelimit +%patch22 -p1 -b .selinux +%patch23 -p1 -b .icons +%patch24 -p1 -b .systemd +%patch25 -p1 -b .cachehardmax +%patch26 -p1 -b .socketactivation +%patch27 -p1 -b .sslciphdefault +%patch28 -p1 -b .statementscomment +%patch29 -p1 -b .fullrelease +%patch30 -p1 -b .freebind +%patch31 -p1 -b .r1830819+ +%patch32 -p1 -b .sslprotdefault +%patch33 -p1 -b .mod-md-mod-ssl-hooks +%patch34 -p1 -b .r1861793+ +%patch35 -p1 -b .sslkeylogfile-support +%patch36 -p1 -b .session-expiry +%patch37 -p1 -b .logjournal + +%patch61 -p1 -b .r1738878 +%patch62 -p1 -b .r1633085 +%patch63 -p1 -b .r1811831 +%patch65 -p1 -b .r1842888 +%patch66 -p1 -b .r1842929+ +%patch67 -p1 -b .r1825120 +%patch68 -p1 -b .fipscore +%patch70 -p1 -b .r1840554 +%patch71 -p1 -b .modmdperms +%patch72 -p1 -b .mimemagic +%patch73 -p1 -b .ocspwrongctx +%patch74 -p1 -b .r1828172+ +%patch75 -p1 -b .r1870095+ +%patch76 -p1 -b .proxy-continue +%patch77 -p1 -b .balancer-failover + + +%patch200 -p1 -b .r1851471 +%patch201 -p1 -b .CVE-2019-0211 +%patch202 -p1 -b .CVE-2019-0215 +%patch203 -p1 -b .CVE-2019-0217 +%patch204 -p1 -b .CVE-2019-0220 +%patch205 -p1 -b .CVE-2019-9511-and-9516-and-9517 +%patch206 -p1 -b .CVE-2019-10098 +%patch207 -p1 -b .CVE-2019-10092 +%patch208 -p1 -b .CVE-2019-10097 +%patch209 -p1 -b .CVE-2020-1934 + +# Patch in the vendor string +sed -i '/^#define PLATFORM/s/Unix/%{vstring}/' os/unix/os.h +sed -i 's/@RELEASE@/%{release}/' server/core.c + +# Prevent use of setcap in "install-suexec-caps" target. +sed -i '/suexec/s,setcap ,echo Skipping setcap for ,' Makefile.in + +# Example conf for instances +cp $RPM_SOURCE_DIR/instance.conf . +sed < $RPM_SOURCE_DIR/httpd.conf >> instance.conf ' +0,/^ServerRoot/d; +/# Supplemental configuration/,$d +/^ *CustomLog .logs/s,logs/,logs/${HTTPD_INSTANCE}_, +/^ *ErrorLog .logs/s,logs/,logs/${HTTPD_INSTANCE}_, +' +touch -r $RPM_SOURCE_DIR/instance.conf instance.conf + +# Safety check: prevent build if defined MMN does not equal upstream MMN. +vmmn=`echo MODULE_MAGIC_NUMBER_MAJOR | cpp -include include/ap_mmn.h | sed -n '/^2/p'` +if test "x${vmmn}" != "x%{mmn}"; then + : Error: Upstream MMN is now ${vmmn}, packaged MMN is %{mmn} + : Update the mmn macro and rebuild. + exit 1 +fi + +# Provide default layout +cp $RPM_SOURCE_DIR/config.layout . + +sed ' +s,@MPM@,%{mpm},g +s,@DOCROOT@,%{docroot},g +s,@LOGDIR@,%{_localstatedir}/log/httpd,g +' < $RPM_SOURCE_DIR/httpd.conf.xml \ + > httpd.conf.xml + +xmlto man ./httpd.conf.xml +xmlto man $RPM_SOURCE_DIR/htcacheclean.service.xml +xmlto man $RPM_SOURCE_DIR/httpd.service.xml + +: Building with MMN %{mmn}, MMN-ISA %{mmnisa} +: Default MPM is %{mpm}, vendor string is '%{vstring}' + +%build +# forcibly prevent use of bundled apr, apr-util, pcre +rm -rf srclib/{apr,apr-util,pcre} + +# regenerate configure scripts +autoheader && autoconf || exit 1 + +# Before configure; fix location of build dir in generated apxs +%{__perl} -pi -e "s:\@exp_installbuilddir\@:%{_libdir}/httpd/build:g" \ + support/apxs.in + +export CFLAGS=$RPM_OPT_FLAGS +export LDFLAGS="-Wl,-z,relro,-z,now" + +# Hard-code path to links to avoid unnecessary builddep +export LYNX_PATH=/usr/bin/links + +# Build the daemon +./configure \ + --prefix=%{_sysconfdir}/httpd \ + --exec-prefix=%{_prefix} \ + --bindir=%{_bindir} \ + --sbindir=%{_sbindir} \ + --mandir=%{_mandir} \ + --libdir=%{_libdir} \ + --sysconfdir=%{_sysconfdir}/httpd/conf \ + --includedir=%{_includedir}/httpd \ + --libexecdir=%{_libdir}/httpd/modules \ + --datadir=%{contentdir} \ + --enable-layout=Fedora \ + --with-installbuilddir=%{_libdir}/httpd/build \ + --enable-mpms-shared=all \ + --with-apr=%{_prefix} --with-apr-util=%{_prefix} \ + --enable-suexec --with-suexec \ + --enable-suexec-capabilities \ + --with-suexec-caller=%{suexec_caller} \ + --with-suexec-docroot=%{docroot} \ + --without-suexec-logfile \ + --with-suexec-syslog \ + --with-suexec-bin=%{_sbindir}/suexec \ + --with-suexec-uidmin=1000 --with-suexec-gidmin=1000 \ + --with-brotli \ + --enable-pie \ + --with-pcre \ + --enable-mods-shared=all \ + --enable-ssl --with-ssl --disable-distcache \ + --enable-proxy --enable-proxy-fdpass \ + --enable-cache \ + --enable-disk-cache \ + --enable-ldap --enable-authnz-ldap \ + --enable-cgid --enable-cgi \ + --enable-cgid-fdpassing \ + --enable-authn-anon --enable-authn-alias \ + --disable-imagemap --disable-file-cache \ + --disable-http2 \ + --disable-md \ + $* +make %{?_smp_mflags} + +%install +rm -rf $RPM_BUILD_ROOT + +make DESTDIR=$RPM_BUILD_ROOT install + +# Install systemd service files +mkdir -p $RPM_BUILD_ROOT%{_unitdir} +for s in httpd.service htcacheclean.service httpd.socket \ + httpd@.service httpd-init.service; do + install -p -m 644 $RPM_SOURCE_DIR/${s} \ + $RPM_BUILD_ROOT%{_unitdir}/${s} +done + +# install conf file/directory +mkdir $RPM_BUILD_ROOT%{_sysconfdir}/httpd/conf.d \ + $RPM_BUILD_ROOT%{_sysconfdir}/httpd/conf.modules.d +install -m 644 $RPM_SOURCE_DIR/README.confd \ + $RPM_BUILD_ROOT%{_sysconfdir}/httpd/conf.d/README +install -m 644 $RPM_SOURCE_DIR/README.confmod \ + $RPM_BUILD_ROOT%{_sysconfdir}/httpd/conf.modules.d/README +for f in 00-base.conf 00-mpm.conf 00-lua.conf 01-cgi.conf 00-dav.conf \ + 00-proxy.conf 00-ssl.conf 01-ldap.conf 00-proxyhtml.conf \ + 01-ldap.conf 00-systemd.conf 01-session.conf 00-optional.conf; do + install -m 644 -p $RPM_SOURCE_DIR/$f \ + $RPM_BUILD_ROOT%{_sysconfdir}/httpd/conf.modules.d/$f +done + +sed -i '/^#LoadModule mpm_%{mpm}_module /s/^#//' \ + $RPM_BUILD_ROOT%{_sysconfdir}/httpd/conf.modules.d/00-mpm.conf +touch -r $RPM_SOURCE_DIR/00-mpm.conf \ + $RPM_BUILD_ROOT%{_sysconfdir}/httpd/conf.modules.d/00-mpm.conf + +# install systemd override drop directory +# Web application packages can drop snippets into this location if +# they need ExecStart[pre|post]. +mkdir $RPM_BUILD_ROOT%{_unitdir}/httpd.service.d +mkdir $RPM_BUILD_ROOT%{_unitdir}/httpd.socket.d + +install -m 644 -p $RPM_SOURCE_DIR/10-listen443.conf \ + $RPM_BUILD_ROOT%{_unitdir}/httpd.socket.d/10-listen443.conf + +for f in welcome.conf ssl.conf manual.conf userdir.conf; do + install -m 644 -p $RPM_SOURCE_DIR/$f \ + $RPM_BUILD_ROOT%{_sysconfdir}/httpd/conf.d/$f +done + +# Split-out extra config shipped as default in conf.d: +for f in autoindex; do + install -m 644 docs/conf/extra/httpd-${f}.conf \ + $RPM_BUILD_ROOT%{_sysconfdir}/httpd/conf.d/${f}.conf +done + +# Extra config trimmed: +rm -v docs/conf/extra/httpd-{ssl,userdir}.conf + +rm $RPM_BUILD_ROOT%{_sysconfdir}/httpd/conf/*.conf +install -m 644 -p $RPM_SOURCE_DIR/httpd.conf \ + $RPM_BUILD_ROOT%{_sysconfdir}/httpd/conf/httpd.conf + +mkdir $RPM_BUILD_ROOT%{_sysconfdir}/sysconfig +install -m 644 -p $RPM_SOURCE_DIR/htcacheclean.sysconf \ + $RPM_BUILD_ROOT%{_sysconfdir}/sysconfig/htcacheclean + +# tmpfiles.d configuration +mkdir -p $RPM_BUILD_ROOT%{_prefix}/lib/tmpfiles.d +install -m 644 -p $RPM_SOURCE_DIR/httpd.tmpfiles \ + $RPM_BUILD_ROOT%{_prefix}/lib/tmpfiles.d/httpd.conf + +# Other directories +mkdir -p $RPM_BUILD_ROOT%{_localstatedir}/lib/dav \ + $RPM_BUILD_ROOT%{_localstatedir}/lib/httpd \ + $RPM_BUILD_ROOT/run/httpd/htcacheclean + +# Substitute in defaults which are usually done (badly) by "make install" +sed -i \ + "s,@@ServerRoot@@/var,%{_localstatedir}/lib/dav,; + s,@@ServerRoot@@/user.passwd,/etc/httpd/conf/user.passwd,; + s,@@ServerRoot@@/docs,%{docroot},; + s,@@ServerRoot@@,%{docroot},; + s,@@Port@@,80,;" \ + docs/conf/extra/*.conf + +# Create cache directory +mkdir -p $RPM_BUILD_ROOT%{_localstatedir}/cache/httpd \ + $RPM_BUILD_ROOT%{_localstatedir}/cache/httpd/proxy \ + $RPM_BUILD_ROOT%{_localstatedir}/cache/httpd/ssl + +# Make the MMN accessible to module packages +echo %{mmnisa} > $RPM_BUILD_ROOT%{_includedir}/httpd/.mmn +mkdir -p $RPM_BUILD_ROOT%{_rpmconfigdir}/macros.d +cat > $RPM_BUILD_ROOT%{_rpmconfigdir}/macros.d/macros.httpd < $RPM_BUILD_ROOT%{_mandir}/man8/httpd.8 + +# Make ap_config_layout.h libdir-agnostic +sed -i '/.*DEFAULT_..._LIBEXECDIR/d;/DEFAULT_..._INSTALLBUILDDIR/d' \ + $RPM_BUILD_ROOT%{_includedir}/httpd/ap_config_layout.h + +# Fix path to instdso in special.mk +sed -i '/instdso/s,top_srcdir,top_builddir,' \ + $RPM_BUILD_ROOT%{_libdir}/httpd/build/special.mk + +# Remove unpackaged files +rm -vf \ + $RPM_BUILD_ROOT%{_libdir}/*.exp \ + $RPM_BUILD_ROOT/etc/httpd/conf/mime.types \ + $RPM_BUILD_ROOT%{_libdir}/httpd/modules/*.exp \ + $RPM_BUILD_ROOT%{_libdir}/httpd/build/config.nice \ + $RPM_BUILD_ROOT%{_bindir}/{ap?-config,dbmmanage} \ + $RPM_BUILD_ROOT%{_sbindir}/{checkgid,envvars*} \ + $RPM_BUILD_ROOT%{contentdir}/htdocs/* \ + $RPM_BUILD_ROOT%{_mandir}/man1/dbmmanage.* \ + $RPM_BUILD_ROOT%{contentdir}/cgi-bin/* + +rm -rf $RPM_BUILD_ROOT/etc/httpd/conf/{original,extra} + +%pre filesystem +getent group apache >/dev/null || groupadd -g 48 -r apache +getent passwd apache >/dev/null || \ + useradd -r -u 48 -g apache -s /sbin/nologin \ + -d %{contentdir} -c "Apache" apache +exit 0 + +%post +%systemd_post httpd.service htcacheclean.service httpd.socket + +%preun +%systemd_preun httpd.service htcacheclean.service httpd.socket + +%postun +%systemd_postun httpd.service htcacheclean.service httpd.socket + +# Trigger for conversion from SysV, per guidelines at: +# https://fedoraproject.org/wiki/Packaging:ScriptletSnippets#Systemd +%triggerun -- httpd < 2.2.21-5 +# Save the current service runlevel info +# User must manually run systemd-sysv-convert --apply httpd +# to migrate them to systemd targets +/usr/bin/systemd-sysv-convert --save httpd.service >/dev/null 2>&1 ||: + +# Run these because the SysV package being removed won't do them +/sbin/chkconfig --del httpd >/dev/null 2>&1 || : + +%posttrans +test -f /etc/sysconfig/httpd-disable-posttrans || \ + /bin/systemctl try-restart --no-block httpd.service htcacheclean.service >/dev/null 2>&1 || : + +%check +# Check the built modules are all PIC +if readelf -d $RPM_BUILD_ROOT%{_libdir}/httpd/modules/*.so | grep TEXTREL; then + : modules contain non-relocatable code + exit 1 +fi +set +x +rv=0 +# Ensure every mod_* that's built is loaded. +for f in $RPM_BUILD_ROOT%{_libdir}/httpd/modules/*.so; do + m=${f##*/} + if ! grep -q $m $RPM_BUILD_ROOT%{_sysconfdir}/httpd/conf.modules.d/*.conf; then + echo ERROR: Module $m not configured. Disable it, or load it. + rv=1 + fi +done +# Ensure every loaded mod_* is actually built +mods=`grep -h ^LoadModule $RPM_BUILD_ROOT%{_sysconfdir}/httpd/conf.modules.d/*.conf | sed 's,.*modules/,,'` +for m in $mods; do + f=$RPM_BUILD_ROOT%{_libdir}/httpd/modules/${m} + if ! test -x $f; then + echo ERROR: Module $m is configured but not built. + rv=1 + fi +done +set -x +exit $rv + +%clean +rm -rf $RPM_BUILD_ROOT + +%files +%defattr(-,root,root) + +%doc ABOUT_APACHE README CHANGES LICENSE VERSIONING NOTICE +%doc docs/conf/extra/*.conf +%doc instance.conf + +%{_sysconfdir}/httpd/modules +%{_sysconfdir}/httpd/logs +%{_sysconfdir}/httpd/state +%{_sysconfdir}/httpd/run +%dir %{_sysconfdir}/httpd/conf +%config(noreplace) %{_sysconfdir}/httpd/conf/httpd.conf +%config(noreplace) %{_sysconfdir}/httpd/conf/magic + +%config(noreplace) %{_sysconfdir}/logrotate.d/httpd + +%config(noreplace) %{_sysconfdir}/httpd/conf.d/*.conf +%exclude %{_sysconfdir}/httpd/conf.d/ssl.conf +%exclude %{_sysconfdir}/httpd/conf.d/manual.conf + +%dir %{_sysconfdir}/httpd/conf.modules.d +%{_sysconfdir}/httpd/conf.modules.d/README +%config(noreplace) %{_sysconfdir}/httpd/conf.modules.d/*.conf +%exclude %{_sysconfdir}/httpd/conf.modules.d/00-ssl.conf +%exclude %{_sysconfdir}/httpd/conf.modules.d/00-proxyhtml.conf +%exclude %{_sysconfdir}/httpd/conf.modules.d/01-ldap.conf +%exclude %{_sysconfdir}/httpd/conf.modules.d/01-session.conf + +%config(noreplace) %{_sysconfdir}/sysconfig/htcacheclean +%{_prefix}/lib/tmpfiles.d/httpd.conf + +%dir %{_libexecdir}/initscripts/legacy-actions/httpd +%{_libexecdir}/initscripts/legacy-actions/httpd/* + +%{_sbindir}/ht* +%{_sbindir}/fcgistarter +%{_sbindir}/apachectl +%{_sbindir}/rotatelogs +%caps(cap_setuid,cap_setgid+pe) %attr(510,root,%{suexec_caller}) %{_sbindir}/suexec + +%dir %{_libdir}/httpd +%dir %{_libdir}/httpd/modules +%{_libdir}/httpd/modules/mod*.so +%exclude %{_libdir}/httpd/modules/mod_auth_form.so +%exclude %{_libdir}/httpd/modules/mod_ssl.so +%exclude %{_libdir}/httpd/modules/mod_*ldap.so +%exclude %{_libdir}/httpd/modules/mod_proxy_html.so +%exclude %{_libdir}/httpd/modules/mod_xml2enc.so +%exclude %{_libdir}/httpd/modules/mod_session*.so + +%dir %{contentdir}/error +%dir %{contentdir}/error/include +%dir %{contentdir}/noindex +%{contentdir}/icons/* +%{contentdir}/error/README +%{contentdir}/error/*.var +%{contentdir}/error/include/*.html +%{contentdir}/noindex/index.html + +%attr(0710,root,apache) %dir /run/httpd +%attr(0700,apache,apache) %dir /run/httpd/htcacheclean +%attr(0700,root,root) %dir %{_localstatedir}/log/httpd +%attr(0700,apache,apache) %dir %{_localstatedir}/lib/dav +%attr(0700,apache,apache) %dir %{_localstatedir}/lib/httpd +%attr(0700,apache,apache) %dir %{_localstatedir}/cache/httpd +%attr(0700,apache,apache) %dir %{_localstatedir}/cache/httpd/proxy + +%{_mandir}/man8/* +%{_mandir}/man5/* +%exclude %{_mandir}/man8/httpd-init.* + +%{_unitdir}/httpd.service +%{_unitdir}/httpd@.service +%{_unitdir}/htcacheclean.service +%{_unitdir}/*.socket + +%files filesystem +%dir %{_sysconfdir}/httpd +%dir %{_sysconfdir}/httpd/conf.d +%{_sysconfdir}/httpd/conf.d/README +%dir %{docroot} +%dir %{docroot}/cgi-bin +%dir %{docroot}/html +%dir %{contentdir} +%dir %{contentdir}/icons +%attr(755,root,root) %dir %{_unitdir}/httpd.service.d +%attr(755,root,root) %dir %{_unitdir}/httpd.socket.d + +%files tools +%defattr(-,root,root) +%{_bindir}/* +%{_mandir}/man1/* +%doc LICENSE NOTICE +%exclude %{_bindir}/apxs +%exclude %{_mandir}/man1/apxs.1* + +%files manual +%defattr(-,root,root) +%{contentdir}/manual +%config(noreplace) %{_sysconfdir}/httpd/conf.d/manual.conf + +%files -n mod_ssl +%defattr(-,root,root) +%{_libdir}/httpd/modules/mod_ssl.so +%config(noreplace) %{_sysconfdir}/httpd/conf.modules.d/00-ssl.conf +%config(noreplace) %{_sysconfdir}/httpd/conf.d/ssl.conf +%attr(0700,apache,root) %dir %{_localstatedir}/cache/httpd/ssl +%{_unitdir}/httpd-init.service +%{_libexecdir}/httpd-ssl-pass-dialog +%{_libexecdir}/httpd-ssl-gencerts +%{_unitdir}/httpd.socket.d/10-listen443.conf +%{_mandir}/man8/httpd-init.* + +%files -n mod_proxy_html +%defattr(-,root,root) +%{_libdir}/httpd/modules/mod_proxy_html.so +%{_libdir}/httpd/modules/mod_xml2enc.so +%config(noreplace) %{_sysconfdir}/httpd/conf.modules.d/00-proxyhtml.conf + +%files -n mod_ldap +%defattr(-,root,root) +%{_libdir}/httpd/modules/mod_*ldap.so +%config(noreplace) %{_sysconfdir}/httpd/conf.modules.d/01-ldap.conf + +%files -n mod_session +%defattr(-,root,root) +%{_libdir}/httpd/modules/mod_session*.so +%{_libdir}/httpd/modules/mod_auth_form.so +%config(noreplace) %{_sysconfdir}/httpd/conf.modules.d/01-session.conf + +%files devel +%defattr(-,root,root) +%{_includedir}/httpd +%{_bindir}/apxs +%{_mandir}/man1/apxs.1* +%dir %{_libdir}/httpd/build +%{_libdir}/httpd/build/*.mk +%{_libdir}/httpd/build/*.sh +%{_rpmconfigdir}/macros.d/macros.httpd + +%changelog +* Mon Jun 15 2020 Joe Orton - 2.4.37-30 +- Resolves: #1209162 - support logging to journald from CustomLog + +* Mon Jun 08 2020 Lubos Uhliarik - 2.4.37-29 +- Resolves: #1823263 (CVE-2020-1934) - CVE-2020-1934 httpd: mod_proxy_ftp use of + uninitialized value + +* Fri May 29 2020 Lubos Uhliarik - 2.4.37-28 +- Related: #1771847 - BalancerMember ping parameter for mod_proxy_http + doesn't work + +* Tue Apr 14 2020 Lubos Uhliarik - 2.4.37-27 +- Resolves: #1823259 - CVE-2020-1927 httpd:2.4/httpd: mod_rewrite configurations + vulnerable to open redirect +- Resolves: #1747284 - CVE-2019-10098 httpd:2.4/httpd: mod_rewrite potential + open redirect +- Resolves: #1747281 - CVE-2019-10092 httpd:2.4/httpd: limited cross-site + scripting in mod_proxy error page +- Resolves: #1747291 - CVE-2019-10097 httpd:2.4/httpd: null-pointer dereference + in mod_remoteip +- Resolves: #1771847 - BalancerMember ping parameter for mod_proxy_http + doesn't work +- Resolves: #1794728 - Backport of SessionExpiryUpdateInterval directive + +* Mon Dec 02 2019 Lubos Uhliarik - 2.4.37-21 +- Resolves: #1775158 - POST request with TLS 1.3 PHA client auth fails: + Re-negotiation handshake failed: Client certificate missing + +* Sun Dec 01 2019 Lubos Uhliarik - 2.4.37-20 +- Resolves: #1704317 - Add support for SSLKEYLOGFILE + +* Thu Nov 28 2019 Joe Orton - 2.4.37-19 +- mod_cgid: enable fd passing (#1633224) + +* Mon Nov 18 2019 Lubos Uhliarik - 2.4.37-18 +- Resolves: #1744121 - Unexpected OCSP in proxy SSL connection +- Resolves: #1725031 - htpasswd: support SHA-x passwords for FIPS compatibility +- Resolves: #1633224 - mod_cgid logging issues + +* Wed Oct 02 2019 Lubos Uhliarik - 2.4.37-17 +- remove bundled mod_md module +- Related: #1747898 - add mod_md package + +* Thu Aug 29 2019 Lubos Uhliarik - 2.4.37-16 +- Resolves: #1744999 - CVE-2019-9511 httpd:2.4/mod_http2: HTTP/2: large amount + of data request leads to denial of service +- Resolves: #1745086 - CVE-2019-9516 httpd:2.4/mod_http2: HTTP/2: 0-length + headers leads to denial of service +- Resolves: #1745154 - CVE-2019-9517 httpd:2.4/mod_http2: HTTP/2: request for + large response leads to denial of service + +* Tue Jul 16 2019 Lubos Uhliarik - 2.4.37-15 +- Resolves: #1730721 - absolute path used for default state and runtime dir by + default + +* Thu Jun 27 2019 Lubos Uhliarik - 2.4.37-14 +- Resolves: #1724549 - httpd response contains garbage in Content-Type header + +* Wed Jun 12 2019 Lubos Uhliarik - 2.4.37-13 +- Resolves: #1696142 - CVE-2019-0217 httpd:2.4/httpd: mod_auth_digest: access + control bypass due to race condition +- Resolves: #1696097 - CVE-2019-0220 httpd:2.4/httpd: URL normalization + inconsistency +- Resolves: #1669221 - `ExtendedStatus Off` directive when using mod_systemd + causes systemctl to hang +- Resolves: #1673022 - httpd can not be started with mod_md enabled + +* Mon Apr 08 2019 Lubos Uhliarik - 2.4.37-11 +- Resolves: #1695432 - CVE-2019-0211 httpd: privilege escalation + from modules scripts +- Resolves: #1696091 - CVE-2019-0215 httpd:2.4/httpd: mod_ssl: access control + bypass when using per-location client certification authentication + +* Wed Feb 06 2019 Lubos Uhliarik - 2.4.37-10 +- Resolves: #1672977 - state-dir corruption on reload + +* Tue Feb 05 2019 Lubos Uhliarik - 2.4.37-9 +- Resolves: #1670716 - Coredump when starting in FIPS mode + +* Fri Feb 1 2019 Joe Orton - 2.4.37-8 +- add security fix for CVE-2019-0190 (#1671282) + +* Tue Dec 11 2018 Joe Orton - 2.4.37-7 +- add DefaultStateDir/ap_state_dir_relative() (#1653009) +- mod_dav_fs: use state dir for default DAVLockDB +- mod_md: use state dir for default MDStoreDir + +* Mon Dec 10 2018 Joe Orton - 2.4.37-6 +- add httpd.conf(5) (#1611361) + +* Mon Nov 26 2018 Luboš Uhliarik - 2.4.37-5 +- Resolves: #1652966 - Missing RELEASE in http header + +* Fri Nov 23 2018 Luboš Uhliarik - 2.4.37-4 +- Resolves: #1641951 - No Documentation= line in htcacheclean.service files + +* Fri Nov 23 2018 Luboš Uhliarik - 2.4.37-3 +- Resolves: #1643713 - TLS connection allowed while all protocols are forbidden + +* Thu Nov 22 2018 Joe Orton - 2.4.37-2 +- mod_ssl: fix off-by-one causing crashes in CGI children (#1649428) + +* Wed Nov 21 2018 Lubos Uhliarik - 2.4.37-1 +- Resolves: #1644625 - httpd rebase to 2.4.37 + +* Thu Oct 18 2018 Luboš Uhliarik - 2.4.35-10 +- Related: #1493510 - RFE: httpd, add IP_FREEBIND support for Listen + +* Tue Oct 16 2018 Lubos Uhliarik - 2.4.35-9 +- mod_ssl: allow sending multiple CA names which differ only in case + +* Tue Oct 16 2018 Joe Orton - 2.4.35-7 +- mod_ssl: drop SSLRandomSeed from default config (#1638730) +- mod_ssl: follow OpenSSL protocol defaults if SSLProtocol + is not configured (Rob Crittenden, #1638738) + +* Mon Oct 15 2018 Joe Orton - 2.4.35-5 +- mod_ssl: don't require SSLCryptoDevice to be set for PKCS#11 cert + +* Mon Oct 15 2018 Lubos Uhliarik - 2.4.35-4 +- Resolves: #1635681 - sync with Fedora 28/29 httpd +- comment-out SSLProtocol, SSLProxyProtocol from ssl.conf in default + configuration; now follow OpenSSL system default (#1468322) +- dropped NPN support +- mod_md: change hard-coded default MdStoreDir to state/md (#1563846) +- don't block on service try-restart in posttrans scriptlet +- build and load mod_brotli +- mod_systemd: show bound ports in status and log to journal + at startup +- updated httpd.service.xml man page +- tweak wording in privkey passphrase prompt +- drop sslmultiproxy patch +- apachectl: don't read /etc/sysconfig/httpd +- drop irrelevant Obsoletes for devel subpackage +- move instantiated httpd@.service to main httpd package + +* Mon Oct 15 2018 Lubos Uhliarik - 2.4.35-3 +- Resolves: #1602548 - various covscan fixes + +* Thu Sep 27 2018 Lubos Uhliarik - 2.4.35-2 +- apache httpd can work with TLS 1.3 (#1617997) +- drop SSLv3 support patch + +* Thu Sep 27 2018 Lubos Uhliarik - 2.4.35-1 +- new version 2.4.35 (#1632754) + +* Mon Sep 03 2018 Lubos Uhliarik - 2.4.33-4 +- mod_ssl: enable SSLv3 and change behavior of "SSLProtocol All" + configuration (#1622630) + +* Thu Jul 26 2018 Joe Orton - 2.4.33-3 +- mod_ssl: add PKCS#11 cert/key support (Anderson Sasaki, #1527084) + +* Mon Apr 30 2018 Luboš Uhliarik - 2.4.33-2 +- new version 2.4.33 +- add mod_md subpackage; load mod_proxy_uwsgi by default + +* Mon Apr 30 2018 Joe Orton - 2.4.28-8 +- remove %%ghosted /etc/sysconfig/httpd (#1572676) + +* Wed Mar 07 2018 Luboš Uhliarik - 2.4.28-2 +- Resolves: #1512563 - httpd: update welcome page branding +- Resolves: #1511123 - RFE: httpd use event MPM by default +- Resolves: #1493510 - RFE: httpd, add IP_FREEBIND support for Listen + +* Fri Oct 06 2017 Luboš Uhliarik - 2.4.28-1 +- new version 2.4.28 + +* Tue Oct 3 2017 Joe Orton - 2.4.27-14 +- add notes on enabling httpd_graceful_shutdown boolean for prefork + +* Fri Sep 22 2017 Joe Orton - 2.4.27-13 +- drop Requires(post) for mod_ssl + +* Fri Sep 22 2017 Joe Orton - 2.4.27-12 +- better error handling in httpd-ssl-gencerts (#1494556) + +* Thu Sep 21 2017 Stephen Gallagher - 2.4.27-11 +- Require sscg 2.2.0 for creating service and CA certificates together + +* Thu Sep 21 2017 Jeroen van Meeuwen - 2.4.27-10 +- Address CVE-2017-9798 by applying patch from upstream (#1490344) + +* Thu Sep 21 2017 Joe Orton - 2.4.27-9 +- use sscg defaults; append CA cert to generated cert +- document httpd-init.service in httpd-init.service(8) + +* Thu Sep 21 2017 Jeroen van Meeuwen - 2.4.27-8 +- Address CVE-2017-9798 by applying patch from upstream (#1490344) + +* Wed Sep 20 2017 Stephen Gallagher - 2.4.27-8.1 +- Generate SSL certificates on service start, not %%posttrans + +* Tue Sep 19 2017 Joe Orton - 2.4.27-8.1 +- move httpd.service.d, httpd.socket.d dirs to -filesystem + +* Wed Sep 13 2017 Joe Orton - 2.4.27-7 +- add new content-length filter (upstream PR 61222) + +* Wed Aug 02 2017 Fedora Release Engineering - 2.4.27-6 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Binutils_Mass_Rebuild + +* Wed Jul 26 2017 Fedora Release Engineering - 2.4.27-5 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Mass_Rebuild + +* Tue Jul 18 2017 Joe Orton - 2.4.27-4 +- update mod_systemd (r1802251) + +* Mon Jul 17 2017 Joe Orton - 2.4.27-3 +- switch to event by default for Fedora 27 and later (#1471708) + +* Wed Jul 12 2017 Luboš Uhliarik - 2.4.27-2 +- Resolves: #1469959 - httpd update cleaned out /etc/sysconfig + +* Mon Jul 10 2017 Luboš Uhliarik - 2.4.27-1 +- new version 2.4.27 + +* Fri Jun 30 2017 Joe Orton - 2.4.26-2 +- mod_proxy_fcgi: fix further regressions (PR 61202) + +* Mon Jun 19 2017 Luboš Uhliarik - 2.4.26-1 +- new version 2.4.26 + +* Mon Jun 5 2017 Joe Orton - 2.4.25-10 +- move unit man pages to section 8, add as Documentation= in units + +* Fri May 19 2017 Joe Orton - 2.4.25-9 +- add httpd.service(5) and httpd.socket(5) man pages + +* Tue May 16 2017 Joe Orton - 2.4.25-8 +- require mod_http2, now packaged separately + +* Wed Mar 29 2017 Luboš Uhliarik - 2.4.25-7 +- Resolves: #1397243 - Backport Apache Bug 53098 - mod_proxy_ajp: + patch to set worker secret passed to tomcat + +* Tue Mar 28 2017 Luboš Uhliarik - 2.4.25-6 +- Resolves: #1434916 - httpd.service: Failed with result timeout + +* Fri Mar 24 2017 Joe Orton - 2.4.25-5 +- link only httpd, not support/* against -lselinux -lsystemd + +* Fri Feb 10 2017 Fedora Release Engineering - 2.4.25-4 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_26_Mass_Rebuild + +* Thu Jan 12 2017 Joe Orton - 2.4.25-3 +- mod_watchdog: restrict thread lifetime (#1410883) + +* Thu Dec 22 2016 Luboš Uhliarik - 2.4.25-2 +- Resolves: #1358875 - require nghttp2 >= 1.5.0 + +* Thu Dec 22 2016 Luboš Uhliarik - 2.4.25-1 +- new version 2.4.25 + +* Mon Dec 05 2016 Luboš Uhliarik - 2.4.23-7 +- Resolves: #1401530 - CVE-2016-8740 httpd: Incomplete handling of + LimitRequestFields directive in mod_http2 + +* Mon Nov 14 2016 Joe Orton - 2.4.23-6 +- fix build with OpenSSL 1.1 (#1392900) +- fix typos in ssl.conf (josef randinger, #1379407) + +* Wed Nov 2 2016 Joe Orton - 2.4.23-5 +- no longer package /etc/sysconfig/httpd +- synch ssl.conf with upstream + +* Mon Jul 18 2016 Joe Orton - 2.4.23-4 +- add security fix for CVE-2016-5387 + +* Thu Jul 7 2016 Joe Orton - 2.4.23-3 +- load mod_watchdog by default (#1353582) + +* Thu Jul 7 2016 Joe Orton - 2.4.23-2 +- restore build of mod_proxy_fdpass (#1325883) +- improve check tests to catch configured-but-not-built modules + +* Thu Jul 7 2016 Joe Orton - 2.4.23-1 +- update to 2.4.23 (#1325883, #1353203) +- load mod_proxy_hcheck +- recommend use of "systemctl edit" in httpd.service + +* Thu Apr 7 2016 Joe Orton - 2.4.18-6 +- have "apachectl graceful" start httpd if not running, per man page + +* Wed Apr 6 2016 Joe Orton - 2.4.18-5 +- use redirects for lang-specific /manual/ URLs + +* Fri Mar 18 2016 Joe Orton - 2.4.18-4 +- fix welcome page HTML validity (Ville Skyttä) + +* Fri Mar 18 2016 Joe Orton - 2.4.18-3 +- remove httpd pre script (duplicate of httpd-filesystem's) +- in httpd-filesystem pre script, create group/user iff non-existent + +* Wed Feb 03 2016 Fedora Release Engineering - 2.4.18-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_24_Mass_Rebuild + +* Mon Dec 14 2015 Jan Kaluza - 2.4.18-1 +- update to new version 2.4.18 + +* Wed Dec 9 2015 Joe Orton - 2.4.17-4 +- re-enable mod_asis due to popular demand (#1284315) + +* Mon Oct 26 2015 Jan Kaluza - 2.4.17-3 +- fix crash when using -X argument (#1272234) + +* Wed Oct 14 2015 Jan Kaluza - 2.4.17-2 +- rebase socket activation patch to 2.4.17 + +* Tue Oct 13 2015 Joe Orton - 2.4.17-1 +- update to 2.4.17 (#1271224) +- build, load mod_http2 +- don't build mod_asis, mod_file_cache +- load mod_cache_socache, mod_proxy_wstunnel by default +- check every built mod_* is configured +- synch ssl.conf with upstream; disable SSLv3 by default + +* Wed Jul 15 2015 Jan Kaluza - 2.4.12-4 +- update to 2.4.16 + +* Tue Jul 7 2015 Joe Orton - 2.4.12-3 +- mod_ssl: use "localhost" in the dummy SSL cert if len(FQDN) > 59 chars + +* Wed Jun 17 2015 Fedora Release Engineering - 2.4.12-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_23_Mass_Rebuild + +* Fri Mar 27 2015 Jan Kaluza - 2.4.12-1 +- update to 2.4.12 + +* Tue Mar 24 2015 Jan Kaluza - 2.4.10-17 +- fix compilation with lua-5.3 + +* Tue Mar 24 2015 Jan Kaluza - 2.4.10-16 +- remove filter for auto-provides of httpd modules, it is not needed since F20 + +* Wed Dec 17 2014 Jan Kaluza - 2.4.10-15 +- core: fix bypassing of mod_headers rules via chunked requests (CVE-2013-5704) +- mod_cache: fix NULL pointer dereference on empty Content-Type (CVE-2014-3581) +- mod_proxy_fcgi: fix a potential crash with long headers (CVE-2014-3583) +- mod_lua: fix handling of the Require line when a LuaAuthzProvider is used + in multiple Require directives with different arguments (CVE-2014-8109) + +* Tue Oct 14 2014 Joe Orton - 2.4.10-14 +- require apr-util 1.5.x + +* Thu Sep 18 2014 Jan Kaluza - 2.4.10-13 +- use NoDelay and DeferAcceptSec in httpd.socket + +* Mon Sep 08 2014 Jan Kaluza - 2.4.10-12 +- increase suexec minimum acceptable uid/gid to 1000 (#1136391) + +* Wed Sep 03 2014 Jan Kaluza - 2.4.10-11 +- fix hostname requirement and conflict with openssl-libs + +* Mon Sep 01 2014 Jan Kaluza - 2.4.10-10 +- use KillMode=mixed in httpd.service (#1135122) + +* Fri Aug 29 2014 Joe Orton - 2.4.10-9 +- set vstring based on /etc/os-release (Pat Riehecky, #1114539) + +* Fri Aug 29 2014 Joe Orton - 2.4.10-8 +- pull in httpd-filesystem as Requires(pre) (#1128328) +- fix cipher selection in default ssl.conf, depend on new OpenSSL (#1134348) +- require hostname for mod_ssl post script (#1135118) + +* Fri Aug 22 2014 Jan Kaluza - 2.4.10-7 +- mod_systemd: updated to the latest version +- use -lsystemd instead of -lsystemd-daemon (#1125084) +- fix possible crash in SIGINT handling (#958934) + +* Thu Aug 21 2014 Joe Orton - 2.4.10-6 +- mod_ssl: treat "SSLCipherSuite PROFILE=..." as special (#1109119) +- switch default ssl.conf to use PROFILE=SYSTEM (#1109119) + +* Sat Aug 16 2014 Fedora Release Engineering - 2.4.10-5 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_22_Mass_Rebuild + +* Fri Aug 15 2014 Jan Kaluza - 2.4.10-4 +- add /usr/bin/useradd dependency to -filesystem requires + +* Thu Aug 14 2014 Jan Kaluza - 2.4.10-3 +- fix creating apache user in pre script (#1128328) + +* Thu Jul 31 2014 Joe Orton - 2.4.10-2 +- enable mod_request by default for mod_auth_form +- move disabled-by-default modules from 00-base.conf to 00-optional.conf + +* Mon Jul 21 2014 Joe Orton - 2.4.10-1 +- update to 2.4.10 +- expand variables in docdir example configs + +* Tue Jul 08 2014 Jan Kaluza - 2.4.9-8 +- add support for systemd socket activation (#1111648) + +* Mon Jul 07 2014 Jan Kaluza - 2.4.9-7 +- remove conf.modules.d from httpd-filesystem subpackage (#1081453) + +* Mon Jul 07 2014 Jan Kaluza - 2.4.9-6 +- add httpd-filesystem subpackage (#1081453) + +* Fri Jun 20 2014 Joe Orton - 2.4.9-5 +- mod_ssl: don't use the default OpenSSL cipher suite in ssl.conf (#1109119) + +* Sat Jun 07 2014 Fedora Release Engineering - 2.4.9-4 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_Mass_Rebuild + +* Fri Mar 28 2014 Jan Kaluza - 2.4.9-3 +- add support for SetHandler + proxy (#1078970) + +* Thu Mar 27 2014 Jan Kaluza - 2.4.9-2 +- move macros from /etc/rpm to macros.d (#1074277) +- remove unused patches + +* Mon Mar 17 2014 Jan Kaluza - 2.4.9-1 +- update to 2.4.9 + +* Fri Feb 28 2014 Joe Orton - 2.4.7-6 +- use 2048-bit RSA key with SHA-256 signature in dummy certificate + +* Fri Feb 28 2014 Stephen Gallagher 2.4.7-5 +- Create drop directory for systemd snippets + +* Thu Feb 27 2014 Jan Kaluza - 2.4.7-4 +- remove provides of old MMN, because it contained double-dash (#1068851) + +* Thu Feb 20 2014 Jan Kaluza - 2.4.7-3 +- fix graceful restart using legacy actions + +* Thu Dec 12 2013 Joe Orton - 2.4.7-2 +- conflict with pre-1.5.0 APR +- fix sslsninotreq patch + +* Wed Nov 27 2013 Joe Orton - 2.4.7-1 +- update to 2.4.7 (#1034071) + +* Fri Nov 22 2013 Joe Orton - 2.4.6-10 +- switch to requiring system-logos-httpd (#1031288) + +* Tue Nov 12 2013 Joe Orton - 2.4.6-9 +- change mmnisa to drop "-" altogether + +* Tue Nov 12 2013 Joe Orton - 2.4.6-8 +- drop ambiguous invalid "-" in RHS of httpd-mmn Provide, keeping old Provide + for transition + +* Fri Nov 1 2013 Jan Kaluza - 2.4.6-7 +- systemd: use {MAINPID} notation to ensure /bin/kill has always the second arg + +* Thu Oct 31 2013 Joe Orton - 2.4.6-6 +- mod_ssl: allow SSLEngine to override Listen-based default (r1537535) + +* Thu Oct 24 2013 Jan kaluza - 2.4.6-5 +- systemd: send SIGWINCH signal without httpd -k in ExecStop + +* Mon Oct 21 2013 Joe Orton - 2.4.6-4 +- load mod_macro by default (#998452) +- add README to conf.modules.d +- mod_proxy_http: add possible fix for threading issues (r1534321) +- core: add fix for truncated output with CGI scripts (r1530793) + +* Thu Oct 10 2013 Jan Kaluza - 2.4.6-3 +- require fedora-logos-httpd (#1009162) + +* Wed Jul 31 2013 Jan Kaluza - 2.4.6-2 +- revert fix for dumping vhosts twice + +* Mon Jul 22 2013 Joe Orton - 2.4.6-1 +- update to 2.4.6 +- mod_ssl: use revised NPN API (r1487772) + +* Thu Jul 11 2013 Jan Kaluza - 2.4.4-12 +- mod_unique_id: replace use of hostname + pid with PRNG output (#976666) +- apxs: mention -p option in manpage + +* Tue Jul 2 2013 Joe Orton - 2.4.4-11 +- add patch for aarch64 (Dennis Gilmore, #925558) + +* Mon Jul 1 2013 Joe Orton - 2.4.4-10 +- remove duplicate apxs man page from httpd-tools + +* Mon Jun 17 2013 Joe Orton - 2.4.4-9 +- remove zombie dbmmanage script + +* Fri May 31 2013 Jan Kaluza - 2.4.4-8 +- return 400 Bad Request on malformed Host header + +* Fri May 24 2013 Jan Kaluza - 2.4.4-7 +- ignore /etc/sysconfig/httpd and document systemd way of setting env variables + in this file + +* Mon May 20 2013 Jan Kaluza - 2.4.4-6 +- htpasswd/htdbm: fix hash generation bug (#956344) +- do not dump vhosts twice in httpd -S output (#928761) +- mod_cache: fix potential crash caused by uninitialized variable (#954109) + +* Thu Apr 18 2013 Jan Kaluza - 2.4.4-5 +- execute systemctl reload as result of apachectl graceful +- mod_ssl: ignore SNI hints unless required by config +- mod_cache: forward-port CacheMaxExpire "hard" option +- mod_ssl: fall back on another module's proxy hook if mod_ssl proxy + is not configured. + +* Tue Apr 16 2013 Jan Kaluza - 2.4.4-4 +- fix service file to not send SIGTERM after ExecStop (#906321, #912288) + +* Tue Mar 26 2013 Jan Kaluza - 2.4.4-3 +- protect MIMEMagicFile with IfModule (#893949) + +* Tue Feb 26 2013 Joe Orton - 2.4.4-2 +- really package mod_auth_form in mod_session (#915438) + +* Tue Feb 26 2013 Joe Orton - 2.4.4-1 +- update to 2.4.4 +- fix duplicate ownership of mod_session config (#914901) + +* Fri Feb 22 2013 Joe Orton - 2.4.3-17 +- add mod_session subpackage, move mod_auth_form there (#894500) + +* Thu Feb 14 2013 Fedora Release Engineering - 2.4.3-16 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_19_Mass_Rebuild + +* Tue Jan 8 2013 Joe Orton - 2.4.3-15 +- add systemd service for htcacheclean + +* Tue Nov 13 2012 Joe Orton - 2.4.3-14 +- drop patch for r1344712 + +* Tue Nov 13 2012 Joe Orton - 2.4.3-13 +- filter mod_*.so auto-provides (thanks to rcollet) +- pull in syslog logging fix from upstream (r1344712) + +* Fri Oct 26 2012 Joe Orton - 2.4.3-12 +- rebuild to pick up new apr-util-ldap + +* Tue Oct 23 2012 Joe Orton - 2.4.3-11 +- rebuild + +* Wed Oct 3 2012 Joe Orton - 2.4.3-10 +- pull upstream patch r1392850 in addition to r1387633 + +* Mon Oct 1 2012 Joe Orton - 2.4.3-9 +- define PLATFORM in os.h using vendor string + +* Mon Oct 1 2012 Joe Orton - 2.4.3-8 +- use systemd script unconditionally (#850149) + +* Mon Oct 1 2012 Joe Orton - 2.4.3-7 +- use systemd scriptlets if available (#850149) +- don't run posttrans restart if /etc/sysconfig/httpd-disable-posttrans exists + +* Mon Oct 01 2012 Jan Kaluza - 2.4.3-6 +- use systemctl from apachectl (#842736) + +* Wed Sep 19 2012 Joe Orton - 2.4.3-5 +- fix some error log spam with graceful-stop (r1387633) +- minor mod_systemd tweaks + +* Thu Sep 13 2012 Joe Orton - 2.4.3-4 +- use IncludeOptional for conf.d/*.conf inclusion + +* Fri Sep 07 2012 Jan Kaluza - 2.4.3-3 +- adding mod_systemd to integrate with systemd better + +* Tue Aug 21 2012 Joe Orton - 2.4.3-2 +- mod_ssl: add check for proxy keypair match (upstream r1374214) + +* Tue Aug 21 2012 Joe Orton - 2.4.3-1 +- update to 2.4.3 (#849883) +- own the docroot (#848121) + +* Mon Aug 6 2012 Joe Orton - 2.4.2-23 +- add mod_proxy fixes from upstream (r1366693, r1365604) + +* Thu Jul 19 2012 Fedora Release Engineering - 2.4.2-22 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_18_Mass_Rebuild + +* Fri Jul 6 2012 Joe Orton - 2.4.2-21 +- drop explicit version requirement on initscripts + +* Thu Jul 5 2012 Joe Orton - 2.4.2-20 +- mod_ext_filter: fix error_log warnings + +* Mon Jul 2 2012 Joe Orton - 2.4.2-19 +- support "configtest" and "graceful" as initscripts "legacy actions" + +* Fri Jun 8 2012 Joe Orton - 2.4.2-18 +- avoid use of "core" GIF for a "core" directory (#168776) +- drop use of "syslog.target" in systemd unit file + +* Thu Jun 7 2012 Joe Orton - 2.4.2-17 +- use _unitdir for systemd unit file +- use /run in unit file, ssl.conf + +* Thu Jun 7 2012 Joe Orton - 2.4.2-16 +- mod_ssl: fix NPN patch merge + +* Wed Jun 6 2012 Joe Orton - 2.4.2-15 +- move tmpfiles.d fragment into /usr/lib per new guidelines +- package /run/httpd not /var/run/httpd +- set runtimedir to /run/httpd likewise + +* Wed Jun 6 2012 Joe Orton - 2.4.2-14 +- fix htdbm/htpasswd crash on crypt() failure (#818684) + +* Wed Jun 6 2012 Joe Orton - 2.4.2-13 +- pull fix for NPN patch from upstream (r1345599) + +* Thu May 31 2012 Joe Orton - 2.4.2-12 +- update suexec patch to use LOG_AUTHPRIV facility + +* Thu May 24 2012 Joe Orton - 2.4.2-11 +- really fix autoindex.conf (thanks to remi@) + +* Thu May 24 2012 Joe Orton - 2.4.2-10 +- fix autoindex.conf to allow symlink to poweredby.png + +* Wed May 23 2012 Joe Orton - 2.4.2-9 +- suexec: use upstream version of patch for capability bit support + +* Wed May 23 2012 Joe Orton - 2.4.2-8 +- suexec: use syslog rather than suexec.log, drop dac_override capability + +* Tue May 1 2012 Joe Orton - 2.4.2-7 +- mod_ssl: add TLS NPN support (r1332643, #809599) + +* Tue May 1 2012 Joe Orton - 2.4.2-6 +- add BR on APR >= 1.4.0 + +* Fri Apr 27 2012 Joe Orton - 2.4.2-5 +- use systemctl from logrotate (#221073) + +* Fri Apr 27 2012 Joe Orton - 2.4.2-4 +- pull from upstream: + * use TLS close_notify alert for dummy_connection (r1326980+) + * cleanup symbol exports (r1327036+) + +* Fri Apr 20 2012 Joe Orton - 2.4.2-3 +- really fix restart + +* Fri Apr 20 2012 Joe Orton - 2.4.2-2 +- tweak default ssl.conf +- fix restart handling (#814645) +- use graceful restart by default + +* Wed Apr 18 2012 Jan Kaluza - 2.4.2-1 +- update to 2.4.2 + +* Fri Mar 23 2012 Joe Orton - 2.4.1-6 +- fix macros + +* Fri Mar 23 2012 Joe Orton - 2.4.1-5 +- add _httpd_moddir to macros + +* Tue Mar 13 2012 Joe Orton - 2.4.1-4 +- fix symlink for poweredby.png +- fix manual.conf + +* Tue Mar 13 2012 Joe Orton - 2.4.1-3 +- add mod_proxy_html subpackage (w/mod_proxy_html + mod_xml2enc) +- move mod_ldap, mod_authnz_ldap to mod_ldap subpackage + +* Tue Mar 13 2012 Joe Orton - 2.4.1-2 +- clean docroot better +- ship proxy, ssl directories within /var/cache/httpd +- default config: + * unrestricted access to (only) /var/www + * remove (commented) Mutex, MaxRanges, ScriptSock + * split autoindex config to conf.d/autoindex.conf +- ship additional example configs in docdir + +* Tue Mar 6 2012 Joe Orton - 2.4.1-1 +- update to 2.4.1 +- adopt upstream default httpd.conf (almost verbatim) +- split all LoadModules to conf.modules.d/*.conf +- include conf.d/*.conf at end of httpd.conf +- trim %%changelog + +* Mon Feb 13 2012 Joe Orton - 2.2.22-2 +- fix build against PCRE 8.30 + +* Mon Feb 13 2012 Joe Orton - 2.2.22-1 +- update to 2.2.22 + +* Fri Feb 10 2012 Petr Pisar - 2.2.21-8 +- Rebuild against PCRE 8.30 + +* Mon Jan 23 2012 Jan Kaluza - 2.2.21-7 +- fix #783629 - start httpd after named + +* Mon Jan 16 2012 Joe Orton - 2.2.21-6 +- complete conversion to systemd, drop init script (#770311) +- fix comments in /etc/sysconfig/httpd (#771024) +- enable PrivateTmp in service file (#781440) +- set LANG=C in /etc/sysconfig/httpd + +* Fri Jan 13 2012 Fedora Release Engineering - 2.2.21-5 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_17_Mass_Rebuild + +* Tue Dec 06 2011 Jan Kaluza - 2.2.21-4 +- fix #751591 - start httpd after remote-fs + +* Mon Oct 24 2011 Jan Kaluza - 2.2.21-3 +- allow change state of BalancerMember in mod_proxy_balancer web interface + +* Thu Sep 22 2011 Ville Skyttä - 2.2.21-2 +- Make mmn available as %%{_httpd_mmn}. +- Add .svgz to AddEncoding x-gzip example in httpd.conf. + +* Tue Sep 13 2011 Joe Orton - 2.2.21-1 +- update to 2.2.21 + +* Mon Sep 5 2011 Joe Orton - 2.2.20-1 +- update to 2.2.20 +- fix MPM stub man page generation + +* Wed Aug 10 2011 Jan Kaluza - 2.2.19-5 +- fix #707917 - add httpd-ssl-pass-dialog to ask for SSL password using systemd + +* Fri Jul 22 2011 Iain Arnell 1:2.2.19-4 +- rebuild while rpm-4.9.1 is untagged to remove trailing slash in provided + directory names + +* Wed Jul 20 2011 Jan Kaluza - 2.2.19-3 +- fix #716621 - suexec now works without setuid bit + +* Thu Jul 14 2011 Jan Kaluza - 2.2.19-2 +- fix #689091 - backported patch from 2.3 branch to support IPv6 in logresolve + +* Fri Jul 1 2011 Joe Orton - 2.2.19-1 +- update to 2.2.19 +- enable dbd, authn_dbd in default config + +* Thu Apr 14 2011 Joe Orton - 2.2.17-13 +- fix path expansion in service files + +* Tue Apr 12 2011 Joe Orton - 2.2.17-12 +- add systemd service files (#684175, thanks to Jóhann B. Guðmundsson) + +* Wed Mar 23 2011 Joe Orton - 2.2.17-11 +- minor updates to httpd.conf +- drop old patches + +* Wed Mar 2 2011 Joe Orton - 2.2.17-10 +- rebuild + +* Wed Feb 23 2011 Joe Orton - 2.2.17-9 +- use arch-specific mmn + +* Wed Feb 09 2011 Fedora Release Engineering - 2.2.17-8 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_15_Mass_Rebuild + +* Mon Jan 31 2011 Joe Orton - 2.2.17-7 +- generate dummy mod_ssl cert with CA:FALSE constraint (#667841) +- add man page stubs for httpd.event, httpd.worker +- drop distcache support +- add STOP_TIMEOUT support to init script + +* Sat Jan 8 2011 Joe Orton - 2.2.17-6 +- update default SSLCipherSuite per upstream trunk + +* Wed Jan 5 2011 Joe Orton - 2.2.17-5 +- fix requires (#667397) + +* Wed Jan 5 2011 Joe Orton - 2.2.17-4 +- de-ghost /var/run/httpd + +* Tue Jan 4 2011 Joe Orton - 2.2.17-3 +- add tmpfiles.d configuration, ghost /var/run/httpd (#656600) + +* Sat Nov 20 2010 Joe Orton - 2.2.17-2 +- drop setuid bit, use capabilities for suexec binary + +* Wed Oct 27 2010 Joe Orton - 2.2.17-1 +- update to 2.2.17 + +* Fri Sep 10 2010 Joe Orton - 2.2.16-2 +- link everything using -z relro and -z now + +* Mon Jul 26 2010 Joe Orton - 2.2.16-1 +- update to 2.2.16 + +* Fri Jul 9 2010 Joe Orton - 2.2.15-3 +- default config tweaks: + * harden httpd.conf w.r.t. .htaccess restriction (#591293) + * load mod_substitute, mod_version by default + * drop proxy_ajp.conf, load mod_proxy_ajp in httpd.conf + * add commented list of shipped-but-unloaded modules + * bump up worker defaults a little + * drop KeepAliveTimeout to 5 secs per upstream +- fix LSB compliance in init script (#522074) +- bundle NOTICE in -tools +- use init script in logrotate postrotate to pick up PIDFILE +- drop some old Obsoletes/Conflicts + +* Sun Apr 04 2010 Robert Scheck - 2.2.15-1 +- update to 2.2.15 (#572404, #579311) +