diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..9969f1d --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +SOURCES/httpd-2.4.6.tar.bz2 diff --git a/.httpd.metadata b/.httpd.metadata new file mode 100644 index 0000000..d335a99 --- /dev/null +++ b/.httpd.metadata @@ -0,0 +1 @@ +16d8ec72535ded65d035122b0d944b0e64eaa2a2 SOURCES/httpd-2.4.6.tar.bz2 diff --git a/SOURCES/00-base.conf b/SOURCES/00-base.conf new file mode 100644 index 0000000..31d979f --- /dev/null +++ b/SOURCES/00-base.conf @@ -0,0 +1,77 @@ +# +# This file loads most of the modules included with the Apache HTTP +# Server itself. +# + +LoadModule access_compat_module modules/mod_access_compat.so +LoadModule actions_module modules/mod_actions.so +LoadModule alias_module modules/mod_alias.so +LoadModule allowmethods_module modules/mod_allowmethods.so +LoadModule auth_basic_module modules/mod_auth_basic.so +LoadModule auth_digest_module modules/mod_auth_digest.so +LoadModule authn_anon_module modules/mod_authn_anon.so +LoadModule authn_core_module modules/mod_authn_core.so +LoadModule authn_dbd_module modules/mod_authn_dbd.so +LoadModule authn_dbm_module modules/mod_authn_dbm.so +LoadModule authn_file_module modules/mod_authn_file.so +LoadModule authn_socache_module modules/mod_authn_socache.so +LoadModule authz_core_module modules/mod_authz_core.so +LoadModule authz_dbd_module modules/mod_authz_dbd.so +LoadModule authz_dbm_module modules/mod_authz_dbm.so +LoadModule authz_groupfile_module modules/mod_authz_groupfile.so +LoadModule authz_host_module modules/mod_authz_host.so +LoadModule authz_owner_module modules/mod_authz_owner.so +LoadModule authz_user_module modules/mod_authz_user.so +LoadModule autoindex_module modules/mod_autoindex.so +LoadModule cache_module modules/mod_cache.so +LoadModule cache_disk_module modules/mod_cache_disk.so +LoadModule data_module modules/mod_data.so +LoadModule dbd_module modules/mod_dbd.so +LoadModule deflate_module modules/mod_deflate.so +LoadModule dir_module modules/mod_dir.so +LoadModule dumpio_module modules/mod_dumpio.so +LoadModule echo_module modules/mod_echo.so +LoadModule env_module modules/mod_env.so +LoadModule expires_module modules/mod_expires.so +LoadModule ext_filter_module modules/mod_ext_filter.so +LoadModule filter_module modules/mod_filter.so +LoadModule headers_module modules/mod_headers.so +LoadModule include_module modules/mod_include.so +LoadModule info_module modules/mod_info.so +LoadModule log_config_module modules/mod_log_config.so +LoadModule logio_module modules/mod_logio.so +LoadModule mime_magic_module modules/mod_mime_magic.so +LoadModule mime_module modules/mod_mime.so +LoadModule negotiation_module modules/mod_negotiation.so +LoadModule remoteip_module modules/mod_remoteip.so +LoadModule reqtimeout_module modules/mod_reqtimeout.so +LoadModule rewrite_module modules/mod_rewrite.so +LoadModule setenvif_module modules/mod_setenvif.so +LoadModule slotmem_plain_module modules/mod_slotmem_plain.so +LoadModule slotmem_shm_module modules/mod_slotmem_shm.so +LoadModule socache_dbm_module modules/mod_socache_dbm.so +LoadModule socache_memcache_module modules/mod_socache_memcache.so +LoadModule socache_shmcb_module modules/mod_socache_shmcb.so +LoadModule status_module modules/mod_status.so +LoadModule substitute_module modules/mod_substitute.so +LoadModule suexec_module modules/mod_suexec.so +LoadModule unique_id_module modules/mod_unique_id.so +LoadModule unixd_module modules/mod_unixd.so +LoadModule userdir_module modules/mod_userdir.so +LoadModule version_module modules/mod_version.so +LoadModule vhost_alias_module modules/mod_vhost_alias.so + +#LoadModule buffer_module modules/mod_buffer.so +#LoadModule watchdog_module modules/mod_watchdog.so +#LoadModule heartbeat_module modules/mod_heartbeat.so +#LoadModule heartmonitor_module modules/mod_heartmonitor.so +#LoadModule usertrack_module modules/mod_usertrack.so +#LoadModule dialup_module modules/mod_dialup.so +#LoadModule charset_lite_module modules/mod_charset_lite.so +#LoadModule log_debug_module modules/mod_log_debug.so +#LoadModule ratelimit_module modules/mod_ratelimit.so +#LoadModule reflector_module modules/mod_reflector.so +#LoadModule request_module modules/mod_request.so +#LoadModule sed_module modules/mod_sed.so +#LoadModule speling_module modules/mod_speling.so + diff --git a/SOURCES/00-dav.conf b/SOURCES/00-dav.conf new file mode 100644 index 0000000..e6af8de --- /dev/null +++ b/SOURCES/00-dav.conf @@ -0,0 +1,3 @@ +LoadModule dav_module modules/mod_dav.so +LoadModule dav_fs_module modules/mod_dav_fs.so +LoadModule dav_lock_module modules/mod_dav_lock.so diff --git a/SOURCES/00-lua.conf b/SOURCES/00-lua.conf new file mode 100644 index 0000000..9e0d0db --- /dev/null +++ b/SOURCES/00-lua.conf @@ -0,0 +1 @@ +LoadModule lua_module modules/mod_lua.so diff --git a/SOURCES/00-mpm.conf b/SOURCES/00-mpm.conf new file mode 100644 index 0000000..7bfd1d4 --- /dev/null +++ b/SOURCES/00-mpm.conf @@ -0,0 +1,19 @@ +# Select the MPM module which should be used by uncommenting exactly +# one of the following LoadModule lines: + +# prefork MPM: Implements a non-threaded, pre-forking web server +# See: http://httpd.apache.org/docs/2.4/mod/prefork.html +LoadModule mpm_prefork_module modules/mod_mpm_prefork.so + +# worker MPM: Multi-Processing Module implementing a hybrid +# multi-threaded multi-process web server +# See: http://httpd.apache.org/docs/2.4/mod/worker.html +# +#LoadModule mpm_worker_module modules/mod_mpm_worker.so + +# event MPM: A variant of the worker MPM with the goal of consuming +# threads only for connections with active processing +# See: http://httpd.apache.org/docs/2.4/mod/event.html +# +#LoadModule mpm_event_module modules/mod_mpm_event.so + diff --git a/SOURCES/00-proxy.conf b/SOURCES/00-proxy.conf new file mode 100644 index 0000000..cc0bca0 --- /dev/null +++ b/SOURCES/00-proxy.conf @@ -0,0 +1,16 @@ +# This file configures all the proxy modules: +LoadModule proxy_module modules/mod_proxy.so +LoadModule lbmethod_bybusyness_module modules/mod_lbmethod_bybusyness.so +LoadModule lbmethod_byrequests_module modules/mod_lbmethod_byrequests.so +LoadModule lbmethod_bytraffic_module modules/mod_lbmethod_bytraffic.so +LoadModule lbmethod_heartbeat_module modules/mod_lbmethod_heartbeat.so +LoadModule proxy_ajp_module modules/mod_proxy_ajp.so +LoadModule proxy_balancer_module modules/mod_proxy_balancer.so +LoadModule proxy_connect_module modules/mod_proxy_connect.so +LoadModule proxy_express_module modules/mod_proxy_express.so +LoadModule proxy_fcgi_module modules/mod_proxy_fcgi.so +LoadModule proxy_fdpass_module modules/mod_proxy_fdpass.so +LoadModule proxy_ftp_module modules/mod_proxy_ftp.so +LoadModule proxy_http_module modules/mod_proxy_http.so +LoadModule proxy_scgi_module modules/mod_proxy_scgi.so +LoadModule proxy_wstunnel_module modules/mod_proxy_wstunnel.so diff --git a/SOURCES/00-proxyhtml.conf b/SOURCES/00-proxyhtml.conf new file mode 100644 index 0000000..9a9b107 --- /dev/null +++ b/SOURCES/00-proxyhtml.conf @@ -0,0 +1,3 @@ +# This file configures mod_proxy_html and mod_xml2enc: +LoadModule xml2enc_module modules/mod_xml2enc.so +LoadModule proxy_html_module modules/mod_proxy_html.so diff --git a/SOURCES/00-ssl.conf b/SOURCES/00-ssl.conf new file mode 100644 index 0000000..53235cd --- /dev/null +++ b/SOURCES/00-ssl.conf @@ -0,0 +1 @@ +LoadModule ssl_module modules/mod_ssl.so diff --git a/SOURCES/00-systemd.conf b/SOURCES/00-systemd.conf new file mode 100644 index 0000000..b208c97 --- /dev/null +++ b/SOURCES/00-systemd.conf @@ -0,0 +1,2 @@ +# This file configures systemd module: +LoadModule systemd_module modules/mod_systemd.so diff --git a/SOURCES/01-cgi.conf b/SOURCES/01-cgi.conf new file mode 100644 index 0000000..5b8b936 --- /dev/null +++ b/SOURCES/01-cgi.conf @@ -0,0 +1,14 @@ +# This configuration file loads a CGI module appropriate to the MPM +# which has been configured in 00-mpm.conf. mod_cgid should be used +# with a threaded MPM; mod_cgi with the prefork MPM. + + + LoadModule cgid_module modules/mod_cgid.so + + + LoadModule cgid_module modules/mod_cgid.so + + + LoadModule cgi_module modules/mod_cgi.so + + diff --git a/SOURCES/01-ldap.conf b/SOURCES/01-ldap.conf new file mode 100644 index 0000000..f2ac2a2 --- /dev/null +++ b/SOURCES/01-ldap.conf @@ -0,0 +1,3 @@ +# This file configures the LDAP modules: +LoadModule ldap_module modules/mod_ldap.so +LoadModule authnz_ldap_module modules/mod_authnz_ldap.so diff --git a/SOURCES/01-session.conf b/SOURCES/01-session.conf new file mode 100644 index 0000000..f8d4d92 --- /dev/null +++ b/SOURCES/01-session.conf @@ -0,0 +1,6 @@ +LoadModule session_module modules/mod_session.so +LoadModule session_cookie_module modules/mod_session_cookie.so +LoadModule session_dbd_module modules/mod_session_dbd.so +LoadModule auth_form_module modules/mod_auth_form.so + +#LoadModule session_crypto_module modules/mod_session_crypto.so diff --git a/SOURCES/README.confd b/SOURCES/README.confd new file mode 100644 index 0000000..f5e9661 --- /dev/null +++ b/SOURCES/README.confd @@ -0,0 +1,9 @@ + +This directory holds configuration files for the Apache HTTP Server; +any files in this directory which have the ".conf" extension will be +processed as httpd configuration files. The directory is used in +addition to the directory /etc/httpd/conf.modules.d/, which contains +configuration files necessary to load modules. + +Files are processed in alphabetical order. + diff --git a/SOURCES/action-configtest.sh b/SOURCES/action-configtest.sh new file mode 100644 index 0000000..26d2893 --- /dev/null +++ b/SOURCES/action-configtest.sh @@ -0,0 +1,2 @@ +#!/bin/sh +exec /sbin/apachectl configtest diff --git a/SOURCES/action-graceful.sh b/SOURCES/action-graceful.sh new file mode 100644 index 0000000..4976087 --- /dev/null +++ b/SOURCES/action-graceful.sh @@ -0,0 +1,2 @@ +#!/bin/sh +exec /sbin/apachectl graceful diff --git a/SOURCES/htcacheclean.service b/SOURCES/htcacheclean.service new file mode 100644 index 0000000..2a0aa8e --- /dev/null +++ b/SOURCES/htcacheclean.service @@ -0,0 +1,11 @@ +[Unit] +Description=Disk Cache Cleaning Daemon for Apache HTTP Server +After=httpd.service +Documentation=man:htcacheclean(8) + +[Service] +Type=forking +User=apache +PIDFile=/run/httpd/htcacheclean/pid +EnvironmentFile=/etc/sysconfig/htcacheclean +ExecStart=/usr/sbin/htcacheclean -P /run/httpd/htcacheclean/pid -d $INTERVAL -p $CACHE_ROOT -l $LIMIT $OPTIONS diff --git a/SOURCES/htcacheclean.sysconf b/SOURCES/htcacheclean.sysconf new file mode 100644 index 0000000..fffa17b --- /dev/null +++ b/SOURCES/htcacheclean.sysconf @@ -0,0 +1,16 @@ +# +# Configuration options for systemd service, htcacheclean.service. +# See htcacheclean(8) for more information on available options. +# + +# Interval between cache clean runs, in minutes +INTERVAL=15 + +# Default cache root. +CACHE_ROOT=/var/cache/httpd/proxy + +# Cache size limit in bytes (K=Kbytes, M=Mbytes) +LIMIT=100M + +# Any other options... +OPTIONS= diff --git a/SOURCES/httpd-2.4.1-apctl.patch b/SOURCES/httpd-2.4.1-apctl.patch new file mode 100644 index 0000000..b31c3c5 --- /dev/null +++ b/SOURCES/httpd-2.4.1-apctl.patch @@ -0,0 +1,94 @@ + +- fail gracefully if links is not installed on target system +- source sysconfig/httpd for custom env. vars etc. +- make httpd -t work even in SELinux +- pass $OPTIONS to all $HTTPD invocation + +Upstream-HEAD: vendor +Upstream-2.0: vendor +Upstream-Status: Vendor-specific changes for better initscript integration + +--- httpd-2.4.1/support/apachectl.in.apctl ++++ httpd-2.4.1/support/apachectl.in +@@ -44,19 +44,25 @@ ARGV="$@" + # the path to your httpd binary, including options if necessary + HTTPD='@exp_sbindir@/@progname@' + # +-# pick up any necessary environment variables +-if test -f @exp_sbindir@/envvars; then +- . @exp_sbindir@/envvars +-fi + # + # a command that outputs a formatted text version of the HTML at the + # url given on the command line. Designed for lynx, however other + # programs may work. +-LYNX="@LYNX_PATH@ -dump" ++if [ -x "@LYNX_PATH@" ]; then ++ LYNX="@LYNX_PATH@ -dump" ++else ++ LYNX=none ++fi + # + # the URL to your server's mod_status status page. If you do not + # have one, then status and fullstatus will not work. + STATUSURL="http://localhost:@PORT@/server-status" ++ ++# Source /etc/sysconfig/httpd for $HTTPD setting, etc. ++if [ -r /etc/sysconfig/httpd ]; then ++ . /etc/sysconfig/httpd ++fi ++ + # + # Set this variable to a command that increases the maximum + # number of file descriptors allowed per child process. This is +@@ -76,9 +82,27 @@ if [ "x$ARGV" = "x" ] ; then + ARGV="-h" + fi + ++function checklynx() { ++if [ "$LYNX" = "none" ]; then ++ echo "The 'links' package is required for this functionality." ++ exit 8 ++fi ++} ++ ++function testconfig() { ++# httpd is denied terminal access in SELinux, so run in the ++# current context to get stdout from $HTTPD -t. ++if test -x /usr/sbin/selinuxenabled && /usr/sbin/selinuxenabled; then ++ runcon -- `id -Z` $HTTPD $OPTIONS -t ++else ++ $HTTPD $OPTIONS -t ++fi ++ERROR=$? ++} ++ + case $ACMD in + start|stop|restart|graceful|graceful-stop) +- $HTTPD -k $ARGV ++ $HTTPD $OPTIONS -k $ARGV + ERROR=$? + ;; + startssl|sslstart|start-SSL) +@@ -88,17 +112,18 @@ startssl|sslstart|start-SSL) + ERROR=2 + ;; + configtest) +- $HTTPD -t +- ERROR=$? ++ testconfig + ;; + status) ++ checklynx + $LYNX $STATUSURL | awk ' /process$/ { print; exit } { print } ' + ;; + fullstatus) ++ checklynx + $LYNX $STATUSURL + ;; + *) +- $HTTPD "$@" ++ $HTTPD $OPTIONS "$@" + ERROR=$? + esac + diff --git a/SOURCES/httpd-2.4.1-corelimit.patch b/SOURCES/httpd-2.4.1-corelimit.patch new file mode 100644 index 0000000..96f8486 --- /dev/null +++ b/SOURCES/httpd-2.4.1-corelimit.patch @@ -0,0 +1,35 @@ + +Bump up the core size limit if CoreDumpDirectory is +configured. + +Upstream-Status: Was discussed but there are competing desires; + there are portability oddities here too. + +--- httpd-2.4.1/server/core.c.corelimit ++++ httpd-2.4.1/server/core.c +@@ -4433,6 +4433,25 @@ static int core_post_config(apr_pool_t * + } + apr_pool_cleanup_register(pconf, NULL, ap_mpm_end_gen_helper, + apr_pool_cleanup_null); ++ ++#ifdef RLIMIT_CORE ++ if (ap_coredumpdir_configured) { ++ struct rlimit lim; ++ ++ if (getrlimit(RLIMIT_CORE, &lim) == 0 && lim.rlim_cur == 0) { ++ lim.rlim_cur = lim.rlim_max; ++ if (setrlimit(RLIMIT_CORE, &lim) == 0) { ++ ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, NULL, ++ "core dump file size limit raised to %lu bytes", ++ lim.rlim_cur); ++ } else { ++ ap_log_error(APLOG_MARK, APLOG_NOTICE, errno, NULL, ++ "core dump file size is zero, setrlimit failed"); ++ } ++ } ++ } ++#endif ++ + return OK; + } + diff --git a/SOURCES/httpd-2.4.1-deplibs.patch b/SOURCES/httpd-2.4.1-deplibs.patch new file mode 100644 index 0000000..b73c21d --- /dev/null +++ b/SOURCES/httpd-2.4.1-deplibs.patch @@ -0,0 +1,19 @@ + +Link straight against .la files. + +Upstream-Status: vendor specific + +--- httpd-2.4.1/configure.in.deplibs ++++ httpd-2.4.1/configure.in +@@ -707,9 +707,9 @@ APACHE_HELP_STRING(--with-suexec-umask,u + + dnl APR should go after the other libs, so the right symbols can be picked up + if test x${apu_found} != xobsolete; then +- AP_LIBS="$AP_LIBS `$apu_config --avoid-ldap --link-libtool --libs`" ++ AP_LIBS="$AP_LIBS `$apu_config --avoid-ldap --link-libtool`" + fi +-AP_LIBS="$AP_LIBS `$apr_config --link-libtool --libs`" ++AP_LIBS="$AP_LIBS `$apr_config --link-libtool`" + APACHE_SUBST(AP_LIBS) + APACHE_SUBST(AP_BUILD_SRCLIB_DIRS) + APACHE_SUBST(AP_CLEAN_SRCLIB_DIRS) diff --git a/SOURCES/httpd-2.4.1-selinux.patch b/SOURCES/httpd-2.4.1-selinux.patch new file mode 100644 index 0000000..e97c5a4 --- /dev/null +++ b/SOURCES/httpd-2.4.1-selinux.patch @@ -0,0 +1,61 @@ + +Log the SELinux context at startup. + +Upstream-Status: unlikely to be any interest in this upstream + +--- httpd-2.4.1/configure.in.selinux ++++ httpd-2.4.1/configure.in +@@ -458,6 +458,11 @@ fopen64 + dnl confirm that a void pointer is large enough to store a long integer + APACHE_CHECK_VOID_PTR_LEN + ++AC_CHECK_LIB(selinux, is_selinux_enabled, [ ++ AC_DEFINE(HAVE_SELINUX, 1, [Defined if SELinux is supported]) ++ APR_ADDTO(AP_LIBS, [-lselinux]) ++]) ++ + AC_CACHE_CHECK([for gettid()], ac_cv_gettid, + [AC_TRY_RUN(#define _GNU_SOURCE + #include +--- httpd-2.4.1/server/core.c.selinux ++++ httpd-2.4.1/server/core.c +@@ -58,6 +58,10 @@ + #include + #endif + ++#ifdef HAVE_SELINUX ++#include ++#endif ++ + /* LimitRequestBody handling */ + #define AP_LIMIT_REQ_BODY_UNSET ((apr_off_t) -1) + #define AP_DEFAULT_LIMIT_REQ_BODY ((apr_off_t) 0) +@@ -4452,6 +4456,28 @@ static int core_post_config(apr_pool_t * + } + #endif + ++#ifdef HAVE_SELINUX ++ { ++ static int already_warned = 0; ++ int is_enabled = is_selinux_enabled() > 0; ++ ++ if (is_enabled && !already_warned) { ++ security_context_t con; ++ ++ if (getcon(&con) == 0) { ++ ++ ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, NULL, ++ "SELinux policy enabled; " ++ "httpd running as context %s", con); ++ ++ already_warned = 1; ++ ++ freecon(con); ++ } ++ } ++ } ++#endif ++ + return OK; + } + diff --git a/SOURCES/httpd-2.4.2-icons.patch b/SOURCES/httpd-2.4.2-icons.patch new file mode 100644 index 0000000..1341999 --- /dev/null +++ b/SOURCES/httpd-2.4.2-icons.patch @@ -0,0 +1,26 @@ + +- Fix config for /icons/ dir to allow symlink to poweredby.png. +- Avoid using coredump GIF for a directory called "core" + +Upstream-Status: vendor specific patch + +--- httpd-2.4.2/docs/conf/extra/httpd-autoindex.conf.in.icons ++++ httpd-2.4.2/docs/conf/extra/httpd-autoindex.conf.in +@@ -21,7 +21,7 @@ IndexOptions FancyIndexing HTMLTable Ver + Alias /icons/ "@exp_iconsdir@/" + + +- Options Indexes MultiViews ++ Options Indexes MultiViews FollowSymlinks + AllowOverride None + Require all granted + +@@ -53,7 +53,7 @@ AddIcon /icons/dvi.gif .dvi + AddIcon /icons/uuencoded.gif .uu + AddIcon /icons/script.gif .conf .sh .shar .csh .ksh .tcl + AddIcon /icons/tex.gif .tex +-AddIcon /icons/bomb.gif core ++AddIcon /icons/bomb.gif core. + + AddIcon /icons/back.gif .. + AddIcon /icons/hand.right.gif README diff --git a/SOURCES/httpd-2.4.3-apctl-systemd.patch b/SOURCES/httpd-2.4.3-apctl-systemd.patch new file mode 100644 index 0000000..5823aee --- /dev/null +++ b/SOURCES/httpd-2.4.3-apctl-systemd.patch @@ -0,0 +1,45 @@ + +Upstream-Status: vendor specific patch + +diff --git a/support/apachectl.in b/support/apachectl.in +index c6ac3ea..2599386 100644 +--- a/support/apachectl.in ++++ b/support/apachectl.in +@@ -100,9 +100,24 @@ fi + ERROR=$? + } + ++if [ "x$2" != "x" ] ; then ++ echo Passing arguments to httpd using apachectl is no longer supported. ++ echo You can only start/stop/restart httpd using this script. ++ echo If you want to pass extra arguments to httpd, edit the ++ echo /etc/sysconfig/httpd config file. ++fi ++ + case $ACMD in +-start|stop|restart|graceful|graceful-stop) +- $HTTPD $OPTIONS -k $ARGV ++start|stop|restart|status) ++ /usr/bin/systemctl $ACMD httpd.service ++ ERROR=$? ++ ;; ++graceful) ++ /usr/bin/systemctl reload httpd.service ++ ERROR=$? ++ ;; ++graceful-stop) ++ /usr/bin/systemctl stop httpd.service + ERROR=$? + ;; + startssl|sslstart|start-SSL) +@@ -114,10 +129,6 @@ startssl|sslstart|start-SSL) + configtest) + testconfig + ;; +-status) +- checklynx +- $LYNX $STATUSURL | awk ' /process$/ { print; exit } { print } ' +- ;; + fullstatus) + checklynx + $LYNX $STATUSURL diff --git a/SOURCES/httpd-2.4.3-apxs.patch b/SOURCES/httpd-2.4.3-apxs.patch new file mode 100644 index 0000000..f4d2a87 --- /dev/null +++ b/SOURCES/httpd-2.4.3-apxs.patch @@ -0,0 +1,56 @@ +--- httpd-2.4.3/support/apxs.in.apxs ++++ httpd-2.4.3/support/apxs.in +@@ -25,7 +25,18 @@ package apxs; + + my %config_vars = (); + +-my $installbuilddir = "@exp_installbuilddir@"; ++# Awful hack to make apxs libdir-agnostic: ++my $pkg_config = "/usr/bin/pkg-config"; ++if (! -x "$pkg_config") { ++ error("$pkg_config not found!"); ++ exit(1); ++} ++ ++my $libdir = `pkg-config --variable=libdir apr-1`; ++chomp $libdir; ++ ++my $installbuilddir = $libdir . "/httpd/build"; ++ + get_config_vars("$installbuilddir/config_vars.mk",\%config_vars); + + # read the configuration variables once +@@ -275,7 +286,7 @@ if ($opt_g) { + $data =~ s|%NAME%|$name|sg; + $data =~ s|%TARGET%|$CFG_TARGET|sg; + $data =~ s|%PREFIX%|$prefix|sg; +- $data =~ s|%INSTALLBUILDDIR%|$installbuilddir|sg; ++ $data =~ s|%LIBDIR%|$libdir|sg; + + my ($mkf, $mods, $src) = ($data =~ m|^(.+)-=#=-\n(.+)-=#=-\n(.+)|s); + +@@ -453,11 +464,11 @@ if ($opt_c) { + my $ldflags = "$CFG_LDFLAGS"; + if ($opt_p == 1) { + +- my $apr_libs=`$apr_config --cflags --ldflags --link-libtool --libs`; ++ my $apr_libs=`$apr_config --cflags --ldflags --link-libtool`; + chomp($apr_libs); + my $apu_libs=""; + if ($apr_major_version < 2) { +- $apu_libs=`$apu_config --ldflags --link-libtool --libs`; ++ $apu_libs=`$apu_config --ldflags --link-libtool`; + chomp($apu_libs); + } + +@@ -672,8 +683,8 @@ __DATA__ + + builddir=. + top_srcdir=%PREFIX% +-top_builddir=%PREFIX% +-include %INSTALLBUILDDIR%/special.mk ++top_builddir=%LIBDIR%/httpd ++include %LIBDIR%/httpd/build/special.mk + + # the used tools + APXS=apxs diff --git a/SOURCES/httpd-2.4.3-layout.patch b/SOURCES/httpd-2.4.3-layout.patch new file mode 100644 index 0000000..f3ee9ec --- /dev/null +++ b/SOURCES/httpd-2.4.3-layout.patch @@ -0,0 +1,34 @@ + +Add layout for Fedora. + +--- httpd-2.4.3/config.layout.layout ++++ httpd-2.4.3/config.layout +@@ -370,3 +370,28 @@ + logfiledir: ${localstatedir}/log/httpd + proxycachedir: ${localstatedir}/cache/httpd + ++ ++# Fedora/RHEL layout ++ ++ prefix: /usr ++ exec_prefix: ${prefix} ++ bindir: ${prefix}/bin ++ sbindir: ${prefix}/sbin ++ libdir: ${prefix}/lib ++ libexecdir: ${prefix}/libexec ++ mandir: ${prefix}/man ++ sysconfdir: /etc/httpd/conf ++ datadir: ${prefix}/share/httpd ++ installbuilddir: ${libdir}/httpd/build ++ errordir: ${datadir}/error ++ iconsdir: ${datadir}/icons ++ htdocsdir: /var/www/html ++ manualdir: ${datadir}/manual ++ cgidir: /var/www/cgi-bin ++ includedir: ${prefix}/include/httpd ++ localstatedir: /var ++ runtimedir: /run/httpd ++ logfiledir: ${localstatedir}/log/httpd ++ proxycachedir: ${localstatedir}/cache/httpd/proxy ++ davlockdb: ${localstatedir}/lib/dav/lockdb ++ diff --git a/SOURCES/httpd-2.4.3-mod_systemd.patch b/SOURCES/httpd-2.4.3-mod_systemd.patch new file mode 100644 index 0000000..a9b1fd9 --- /dev/null +++ b/SOURCES/httpd-2.4.3-mod_systemd.patch @@ -0,0 +1,163 @@ +--- httpd-2.4.3/modules/arch/unix/config5.m4.systemd ++++ httpd-2.4.3/modules/arch/unix/config5.m4 +@@ -18,6 +18,19 @@ APACHE_MODULE(privileges, Per-virtualhos + fi + ]) + ++ ++APACHE_MODULE(systemd, Systemd support, , , $unixd_mods_enabled, [ ++ AC_CHECK_LIB(systemd-daemon, sd_notify, SYSTEMD_LIBS="-lsystemd-daemon") ++ AC_CHECK_HEADERS(systemd/sd-daemon.h, [ap_HAVE_SD_DAEMON_H="yes"], [ap_HAVE_SD_DAEMON_H="no"]) ++ if test $ap_HAVE_SD_DAEMON_H = "no" || test -z "${SYSTEMD_LIBS}"; then ++ AC_MSG_WARN([Your system does not support systemd.]) ++ enable_systemd="no" ++ else ++ APR_ADDTO(MOD_SYSTEMD_LDADD, [$SYSTEMD_LIBS]) ++ enable_systemd="yes" ++ fi ++]) ++ + APR_ADDTO(INCLUDES, [-I\$(top_srcdir)/$modpath_current]) + + APACHE_MODPATH_FINISH +--- httpd-2.4.3/modules/arch/unix/mod_systemd.c.systemd ++++ httpd-2.4.3/modules/arch/unix/mod_systemd.c +@@ -0,0 +1,138 @@ ++/* Licensed to the Apache Software Foundation (ASF) under one or more ++ * contributor license agreements. See the NOTICE file distributed with ++ * this work for additional information regarding copyright ownership. ++ * The ASF licenses this file to You under the Apache License, Version 2.0 ++ * (the "License"); you may not use this file except in compliance with ++ * the License. You may obtain a copy of the License at ++ * ++ * http://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ * ++ */ ++ ++#include ++#include ++#include "ap_mpm.h" ++#include ++#include ++#include ++#include ++#include ++#include "unixd.h" ++#include "scoreboard.h" ++#include "mpm_common.h" ++ ++#include "systemd/sd-daemon.h" ++ ++#if APR_HAVE_UNISTD_H ++#include ++#endif ++ ++#define KBYTE 1024 ++ ++static pid_t pid; /* PID of the main httpd instance */ ++static int server_limit, thread_limit, threads_per_child, max_servers; ++static time_t last_update_time; ++static unsigned long last_update_access; ++static unsigned long last_update_kbytes; ++ ++static int systemd_pre_mpm(apr_pool_t *p, ap_scoreboard_e sb_type) ++{ ++ int rv; ++ last_update_time = time(0); ++ ++ ap_mpm_query(AP_MPMQ_HARD_LIMIT_THREADS, &thread_limit); ++ ap_mpm_query(AP_MPMQ_HARD_LIMIT_DAEMONS, &server_limit); ++ ap_mpm_query(AP_MPMQ_MAX_THREADS, &threads_per_child); ++ /* work around buggy MPMs */ ++ if (threads_per_child == 0) ++ threads_per_child = 1; ++ ap_mpm_query(AP_MPMQ_MAX_DAEMONS, &max_servers); ++ ++ pid = getpid(); ++ ++ rv = sd_notifyf(0, "READY=1\n" ++ "STATUS=Processing requests...\n" ++ "MAINPID=%lu", ++ (unsigned long) pid); ++ if (rv < 0) { ++ ap_log_perror(APLOG_MARK, APLOG_ERR, 0, p, ++ "sd_notifyf returned an error %d", rv); ++ } ++ ++ return OK; ++} ++ ++static int systemd_monitor(apr_pool_t *p, server_rec *s) ++{ ++ int i, j, res, rv; ++ process_score *ps_record; ++ worker_score *ws_record; ++ unsigned long access = 0; ++ unsigned long bytes = 0; ++ unsigned long kbytes = 0; ++ char bps[5]; ++ time_t now = time(0); ++ time_t elapsed = now - last_update_time; ++ ++ for (i = 0; i < server_limit; ++i) { ++ ps_record = ap_get_scoreboard_process(i); ++ for (j = 0; j < thread_limit; ++j) { ++ ws_record = ap_get_scoreboard_worker_from_indexes(i, j); ++ if (ap_extended_status && !ps_record->quiescing && ps_record->pid) { ++ res = ws_record->status; ++ if (ws_record->access_count != 0 || ++ (res != SERVER_READY && res != SERVER_DEAD)) { ++ access += ws_record->access_count; ++ bytes += ws_record->bytes_served; ++ if (bytes >= KBYTE) { ++ kbytes += (bytes >> 10); ++ bytes = bytes & 0x3ff; ++ } ++ } ++ } ++ } ++ } ++ ++ apr_strfsize((unsigned long)(KBYTE *(float) (kbytes - last_update_kbytes) ++ / (float) elapsed), bps); ++ ++ rv = sd_notifyf(0, "READY=1\n" ++ "STATUS=Total requests: %lu; Current requests/sec: %.3g; " ++ "Current traffic: %sB/sec\n", access, ++ ((float)access - last_update_access) / (float) elapsed, bps); ++ if (rv < 0) { ++ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(00000) ++ "sd_notifyf returned an error %d", rv); ++ } ++ ++ last_update_access = access; ++ last_update_kbytes = kbytes; ++ last_update_time = now; ++ ++ return DECLINED; ++} ++ ++static void systemd_register_hooks(apr_pool_t *p) ++{ ++ /* We know the PID in this hook ... */ ++ ap_hook_pre_mpm(systemd_pre_mpm, NULL, NULL, APR_HOOK_LAST); ++ /* Used to update httpd's status line using sd_notifyf */ ++ ap_hook_monitor(systemd_monitor, NULL, NULL, APR_HOOK_MIDDLE); ++} ++ ++module AP_MODULE_DECLARE_DATA systemd_module = ++{ ++ STANDARD20_MODULE_STUFF, ++ NULL, ++ NULL, ++ NULL, ++ NULL, ++ NULL, ++ systemd_register_hooks, ++}; diff --git a/SOURCES/httpd-2.4.3-sslsninotreq.patch b/SOURCES/httpd-2.4.3-sslsninotreq.patch new file mode 100644 index 0000000..6e158c6 --- /dev/null +++ b/SOURCES/httpd-2.4.3-sslsninotreq.patch @@ -0,0 +1,83 @@ +diff --git a/modules/ssl/ssl_engine_config.c b/modules/ssl/ssl_engine_config.c +index 15993f1..53ed6f1 100644 +--- a/modules/ssl/ssl_engine_config.c ++++ b/modules/ssl/ssl_engine_config.c +@@ -55,6 +55,7 @@ SSLModConfigRec *ssl_config_global_create(server_rec *s) + mc = (SSLModConfigRec *)apr_palloc(pool, sizeof(*mc)); + mc->pPool = pool; + mc->bFixed = FALSE; ++ mc->sni_required = FALSE; + + /* + * initialize per-module configuration +diff --git a/modules/ssl/ssl_engine_init.c b/modules/ssl/ssl_engine_init.c +index bf1f0e4..a7523de 100644 +--- a/modules/ssl/ssl_engine_init.c ++++ b/modules/ssl/ssl_engine_init.c +@@ -409,7 +409,7 @@ int ssl_init_Module(apr_pool_t *p, apr_pool_t *plog, + /* + * Configuration consistency checks + */ +- ssl_init_CheckServers(base_server, ptemp); ++ ssl_init_CheckServers(mc, base_server, ptemp); + + /* + * Announce mod_ssl and SSL library in HTTP Server field +@@ -1475,7 +1475,7 @@ void ssl_init_ConfigureServer(server_rec *s, + } + } + +-void ssl_init_CheckServers(server_rec *base_server, apr_pool_t *p) ++void ssl_init_CheckServers(SSLModConfigRec *mc, server_rec *base_server, apr_pool_t *p) + { + server_rec *s, *ps; + SSLSrvConfigRec *sc; +@@ -1557,6 +1557,7 @@ void ssl_init_CheckServers(server_rec *base_server, apr_pool_t *p) + } + + if (conflict) { ++ mc->sni_required = TRUE; + #ifdef OPENSSL_NO_TLSEXT + ap_log_error(APLOG_MARK, APLOG_WARNING, 0, base_server, APLOGNO(01917) + "Init: You should not use name-based " +diff --git a/modules/ssl/ssl_engine_kernel.c b/modules/ssl/ssl_engine_kernel.c +index bc9e26b..2460f01 100644 +--- a/modules/ssl/ssl_engine_kernel.c ++++ b/modules/ssl/ssl_engine_kernel.c +@@ -164,6 +164,7 @@ int ssl_hook_ReadReq(request_rec *r) + return DECLINED; + } + #ifndef OPENSSL_NO_TLSEXT ++ if (myModConfig(r->server)->sni_required) { + if ((servername = SSL_get_servername(ssl, TLSEXT_NAMETYPE_host_name))) { + char *host, *scope_id; + apr_port_t port; +@@ -206,6 +207,7 @@ int ssl_hook_ReadReq(request_rec *r) + " virtual host"); + return HTTP_FORBIDDEN; + } ++ } + #endif + SSL_set_app_data2(ssl, r); + +diff --git a/modules/ssl/ssl_private.h b/modules/ssl/ssl_private.h +index 75fc0e3..31dbfa9 100644 +--- a/modules/ssl/ssl_private.h ++++ b/modules/ssl/ssl_private.h +@@ -554,6 +554,7 @@ typedef struct { + struct { + void *pV1, *pV2, *pV3, *pV4, *pV5, *pV6, *pV7, *pV8, *pV9, *pV10; + } rCtx; ++ BOOL sni_required; + } SSLModConfigRec; + + /** Structure representing configured filenames for certs and keys for +@@ -786,7 +787,7 @@ const char *ssl_cmd_SSLFIPS(cmd_parms *cmd, void *dcfg, int flag); + int ssl_init_Module(apr_pool_t *, apr_pool_t *, apr_pool_t *, server_rec *); + void ssl_init_Engine(server_rec *, apr_pool_t *); + void ssl_init_ConfigureServer(server_rec *, apr_pool_t *, apr_pool_t *, SSLSrvConfigRec *); +-void ssl_init_CheckServers(server_rec *, apr_pool_t *); ++void ssl_init_CheckServers(SSLModConfigRec *mc, server_rec *, apr_pool_t *); + STACK_OF(X509_NAME) + *ssl_init_FindCAList(server_rec *, apr_pool_t *, const char *, const char *); + void ssl_init_Child(apr_pool_t *, server_rec *); diff --git a/SOURCES/httpd-2.4.4-cachehardmax.patch b/SOURCES/httpd-2.4.4-cachehardmax.patch new file mode 100644 index 0000000..de360ce --- /dev/null +++ b/SOURCES/httpd-2.4.4-cachehardmax.patch @@ -0,0 +1,82 @@ +diff --git a/modules/cache/cache_util.h b/modules/cache/cache_util.h +index eec38f3..1a2d5ee 100644 +--- a/modules/cache/cache_util.h ++++ b/modules/cache/cache_util.h +@@ -194,6 +194,9 @@ typedef struct { + unsigned int store_nostore_set:1; + unsigned int enable_set:1; + unsigned int disable_set:1; ++ /* treat maxex as hard limit */ ++ unsigned int hardmaxex:1; ++ unsigned int hardmaxex_set:1; + } cache_dir_conf; + + /* A linked-list of authn providers. */ +diff --git a/modules/cache/mod_cache.c b/modules/cache/mod_cache.c +index 4f2d3e0..30c88f4 100644 +--- a/modules/cache/mod_cache.c ++++ b/modules/cache/mod_cache.c +@@ -1299,6 +1299,11 @@ static apr_status_t cache_save_filter(ap_filter_t *f, apr_bucket_brigade *in) + exp = date + dconf->defex; + } + } ++ /* else, forcibly cap the expiry date if required */ ++ else if (dconf->hardmaxex && (date + dconf->maxex) < exp) { ++ exp = date + dconf->maxex; ++ } ++ + info->expire = exp; + + /* We found a stale entry which wasn't really stale. */ +@@ -1717,7 +1722,9 @@ static void *create_dir_config(apr_pool_t *p, char *dummy) + + /* array of providers for this URL space */ + dconf->cacheenable = apr_array_make(p, 10, sizeof(struct cache_enable)); +- ++ /* flag; treat maxex as hard limit */ ++ dconf->hardmaxex = 0; ++ dconf->hardmaxex_set = 0; + return dconf; + } + +@@ -1767,7 +1774,10 @@ static void *merge_dir_config(apr_pool_t *p, void *basev, void *addv) { + new->enable_set = add->enable_set || base->enable_set; + new->disable = (add->disable_set == 0) ? base->disable : add->disable; + new->disable_set = add->disable_set || base->disable_set; +- ++ new->hardmaxex = ++ (add->hardmaxex_set == 0) ++ ? base->hardmaxex ++ : add->hardmaxex; + return new; + } + +@@ -2096,12 +2106,18 @@ static const char *add_cache_disable(cmd_parms *parms, void *dummy, + } + + static const char *set_cache_maxex(cmd_parms *parms, void *dummy, +- const char *arg) ++ const char *arg, const char *hard) + { + cache_dir_conf *dconf = (cache_dir_conf *)dummy; + + dconf->maxex = (apr_time_t) (atol(arg) * MSEC_ONE_SEC); + dconf->maxex_set = 1; ++ ++ if (hard && strcasecmp(hard, "hard") == 0) { ++ dconf->hardmaxex = 1; ++ dconf->hardmaxex_set = 1; ++ } ++ + return NULL; + } + +@@ -2309,7 +2325,7 @@ static const command_rec cache_cmds[] = + "caching is enabled"), + AP_INIT_TAKE1("CacheDisable", add_cache_disable, NULL, RSRC_CONF|ACCESS_CONF, + "A partial URL prefix below which caching is disabled"), +- AP_INIT_TAKE1("CacheMaxExpire", set_cache_maxex, NULL, RSRC_CONF|ACCESS_CONF, ++ AP_INIT_TAKE12("CacheMaxExpire", set_cache_maxex, NULL, RSRC_CONF|ACCESS_CONF, + "The maximum time in seconds to cache a document"), + AP_INIT_TAKE1("CacheMinExpire", set_cache_minex, NULL, RSRC_CONF|ACCESS_CONF, + "The minimum time in seconds to cache a document"), diff --git a/SOURCES/httpd-2.4.4-export.patch b/SOURCES/httpd-2.4.4-export.patch new file mode 100644 index 0000000..eb670c6 --- /dev/null +++ b/SOURCES/httpd-2.4.4-export.patch @@ -0,0 +1,20 @@ + +There is no need to "suck in" the apr/apr-util symbols when using +a shared libapr{,util}, it just bloats the symbol table; so don't. + +Upstream-HEAD: needed +Upstream-2.0: omit +Upstream-Status: EXPORT_DIRS change is conditional on using shared apr + +--- httpd-2.4.4/server/Makefile.in.export ++++ httpd-2.4.4/server/Makefile.in +@@ -57,9 +57,6 @@ export_files: + ( for dir in $(EXPORT_DIRS); do \ + ls $$dir/*.h ; \ + done; \ +- for dir in $(EXPORT_DIRS_APR); do \ +- ls $$dir/ap[ru].h $$dir/ap[ru]_*.h 2>/dev/null; \ +- done; \ + ) | sed -e s,//,/,g | sort -u > $@ + + exports.c: export_files diff --git a/SOURCES/httpd-2.4.4-malformed-host.patch b/SOURCES/httpd-2.4.4-malformed-host.patch new file mode 100644 index 0000000..57975e5 --- /dev/null +++ b/SOURCES/httpd-2.4.4-malformed-host.patch @@ -0,0 +1,12 @@ +diff --git a/server/protocol.c b/server/protocol.c +index e1ef204..d6d9165 100644 +--- a/server/protocol.c ++++ b/server/protocol.c +@@ -1049,6 +1049,7 @@ request_rec *ap_read_request(conn_rec *conn) + * now read. may update status. + */ + ap_update_vhost_from_headers(r); ++ access_status = r->status; + + /* Toggle to the Host:-based vhost's timeout mode to fetch the + * request body and send the response body, if needed. diff --git a/SOURCES/httpd-2.4.4-mod_unique_id.patch b/SOURCES/httpd-2.4.4-mod_unique_id.patch new file mode 100644 index 0000000..30bdfe0 --- /dev/null +++ b/SOURCES/httpd-2.4.4-mod_unique_id.patch @@ -0,0 +1,239 @@ +--- trunk/modules/metadata/mod_unique_id.c 2011/12/02 23:02:04 1209766 ++++ trunk/modules/metadata/mod_unique_id.c 2013/07/10 16:20:31 1501827 +@@ -31,14 +31,11 @@ + #include "http_log.h" + #include "http_protocol.h" /* for ap_hook_post_read_request */ + +-#if APR_HAVE_UNISTD_H +-#include /* for getpid() */ +-#endif ++#define ROOT_SIZE 10 + + typedef struct { + unsigned int stamp; +- unsigned int in_addr; +- unsigned int pid; ++ char root[ROOT_SIZE]; + unsigned short counter; + unsigned int thread_index; + } unique_id_rec; +@@ -64,20 +61,15 @@ + * gethostbyname (gethostname()) is unique across all the machines at the + * "site". + * +- * We also further assume that pids fit in 32-bits. If something uses more +- * than 32-bits, the fix is trivial, but it requires the unrolled uuencoding +- * loop to be extended. * A similar fix is needed to support multithreaded +- * servers, using a pid/tid combo. +- * +- * Together, the in_addr and pid are assumed to absolutely uniquely identify +- * this one child from all other currently running children on all servers +- * (including this physical server if it is running multiple httpds) from each ++ * The root is assumed to absolutely uniquely identify this one child ++ * from all other currently running children on all servers (including ++ * this physical server if it is running multiple httpds) from each + * other. + * +- * The stamp and counter are used to distinguish all hits for a particular +- * (in_addr,pid) pair. The stamp is updated using r->request_time, +- * saving cpu cycles. The counter is never reset, and is used to permit up to +- * 64k requests in a single second by a single child. ++ * The stamp and counter are used to distinguish all hits for a ++ * particular root. The stamp is updated using r->request_time, ++ * saving cpu cycles. The counter is never reset, and is used to ++ * permit up to 64k requests in a single second by a single child. + * + * The 144-bits of unique_id_rec are encoded using the alphabet + * [A-Za-z0-9@-], resulting in 24 bytes of printable characters. That is then +@@ -92,7 +84,7 @@ + * module change. + * + * It is highly desirable that identifiers exist for "eternity". But future +- * needs (such as much faster webservers, moving to 64-bit pids, or moving to a ++ * needs (such as much faster webservers, or moving to a + * multithreaded server) may dictate a need to change the contents of + * unique_id_rec. Such a future implementation should ensure that the first + * field is still a time_t stamp. By doing that, it is possible for a site to +@@ -100,7 +92,15 @@ + * wait one entire second, and then start all of their new-servers. This + * procedure will ensure that the new space of identifiers is completely unique + * from the old space. (Since the first four unencoded bytes always differ.) ++ * ++ * Note: previous implementations used 32-bits of IP address plus pid ++ * in place of the PRNG output in the "root" field. This was ++ * insufficient for IPv6-only hosts, required working DNS to determine ++ * a unique IP address (fragile), and needed a [0, 1) second sleep ++ * call at startup to avoid pid reuse. Use of the PRNG avoids all ++ * these issues. + */ ++ + /* + * Sun Jun 7 05:43:49 CEST 1998 -- Alvaro + * More comments: +@@ -116,8 +116,6 @@ + * htonl/ntohl. Well, this shouldn't be a problem till year 2106. + */ + +-static unsigned global_in_addr; +- + /* + * XXX: We should have a per-thread counter and not use cur_unique_id.counter + * XXX: in all threads, because this is bad for performance on multi-processor +@@ -129,7 +127,7 @@ + /* + * Number of elements in the structure unique_id_rec. + */ +-#define UNIQUE_ID_REC_MAX 5 ++#define UNIQUE_ID_REC_MAX 4 + + static unsigned short unique_id_rec_offset[UNIQUE_ID_REC_MAX], + unique_id_rec_size[UNIQUE_ID_REC_MAX], +@@ -138,113 +136,32 @@ + + static int unique_id_global_init(apr_pool_t *p, apr_pool_t *plog, apr_pool_t *ptemp, server_rec *main_server) + { +- char str[APRMAXHOSTLEN + 1]; +- apr_status_t rv; +- char *ipaddrstr; +- apr_sockaddr_t *sockaddr; +- + /* + * Calculate the sizes and offsets in cur_unique_id. + */ + unique_id_rec_offset[0] = APR_OFFSETOF(unique_id_rec, stamp); + unique_id_rec_size[0] = sizeof(cur_unique_id.stamp); +- unique_id_rec_offset[1] = APR_OFFSETOF(unique_id_rec, in_addr); +- unique_id_rec_size[1] = sizeof(cur_unique_id.in_addr); +- unique_id_rec_offset[2] = APR_OFFSETOF(unique_id_rec, pid); +- unique_id_rec_size[2] = sizeof(cur_unique_id.pid); +- unique_id_rec_offset[3] = APR_OFFSETOF(unique_id_rec, counter); +- unique_id_rec_size[3] = sizeof(cur_unique_id.counter); +- unique_id_rec_offset[4] = APR_OFFSETOF(unique_id_rec, thread_index); +- unique_id_rec_size[4] = sizeof(cur_unique_id.thread_index); ++ unique_id_rec_offset[1] = APR_OFFSETOF(unique_id_rec, root); ++ unique_id_rec_size[1] = sizeof(cur_unique_id.root); ++ unique_id_rec_offset[2] = APR_OFFSETOF(unique_id_rec, counter); ++ unique_id_rec_size[2] = sizeof(cur_unique_id.counter); ++ unique_id_rec_offset[3] = APR_OFFSETOF(unique_id_rec, thread_index); ++ unique_id_rec_size[3] = sizeof(cur_unique_id.thread_index); + unique_id_rec_total_size = unique_id_rec_size[0] + unique_id_rec_size[1] + +- unique_id_rec_size[2] + unique_id_rec_size[3] + +- unique_id_rec_size[4]; ++ unique_id_rec_size[2] + unique_id_rec_size[3]; + + /* + * Calculate the size of the structure when encoded. + */ + unique_id_rec_size_uu = (unique_id_rec_total_size*8+5)/6; + +- /* +- * Now get the global in_addr. Note that it is not sufficient to use one +- * of the addresses from the main_server, since those aren't as likely to +- * be unique as the physical address of the machine +- */ +- if ((rv = apr_gethostname(str, sizeof(str) - 1, p)) != APR_SUCCESS) { +- ap_log_error(APLOG_MARK, APLOG_ALERT, rv, main_server, APLOGNO(01563) +- "unable to find hostname of the server"); +- return HTTP_INTERNAL_SERVER_ERROR; +- } +- +- if ((rv = apr_sockaddr_info_get(&sockaddr, str, AF_INET, 0, 0, p)) == APR_SUCCESS) { +- global_in_addr = sockaddr->sa.sin.sin_addr.s_addr; +- } +- else { +- ap_log_error(APLOG_MARK, APLOG_ALERT, rv, main_server, APLOGNO(01564) +- "unable to find IPv4 address of \"%s\"", str); +-#if APR_HAVE_IPV6 +- if ((rv = apr_sockaddr_info_get(&sockaddr, str, AF_INET6, 0, 0, p)) == APR_SUCCESS) { +- memcpy(&global_in_addr, +- (char *)sockaddr->ipaddr_ptr + sockaddr->ipaddr_len - sizeof(global_in_addr), +- sizeof(global_in_addr)); +- ap_log_error(APLOG_MARK, APLOG_ALERT, rv, main_server, APLOGNO(01565) +- "using low-order bits of IPv6 address " +- "as if they were unique"); +- } +- else +-#endif +- return HTTP_INTERNAL_SERVER_ERROR; +- } +- +- apr_sockaddr_ip_get(&ipaddrstr, sockaddr); +- ap_log_error(APLOG_MARK, APLOG_INFO, 0, main_server, APLOGNO(01566) "using ip addr %s", +- ipaddrstr); +- +- /* +- * If the server is pummelled with restart requests we could possibly end +- * up in a situation where we're starting again during the same second +- * that has been used in previous identifiers. Avoid that situation. +- * +- * In truth, for this to actually happen not only would it have to restart +- * in the same second, but it would have to somehow get the same pids as +- * one of the other servers that was running in that second. Which would +- * mean a 64k wraparound on pids ... not very likely at all. +- * +- * But protecting against it is relatively cheap. We just sleep into the +- * next second. +- */ +- apr_sleep(apr_time_from_sec(1) - apr_time_usec(apr_time_now())); + return OK; + } + + static void unique_id_child_init(apr_pool_t *p, server_rec *s) + { +- pid_t pid; +- +- /* +- * Note that we use the pid because it's possible that on the same +- * physical machine there are multiple servers (i.e. using Listen). But +- * it's guaranteed that none of them will share the same pids between +- * children. +- * +- * XXX: for multithread this needs to use a pid/tid combo and probably +- * needs to be expanded to 32 bits +- */ +- pid = getpid(); +- cur_unique_id.pid = pid; +- +- /* +- * Test our assumption that the pid is 32-bits. It's possible that +- * 64-bit machines will declare pid_t to be 64 bits but only use 32 +- * of them. It would have been really nice to test this during +- * global_init ... but oh well. +- */ +- if ((pid_t)cur_unique_id.pid != pid) { +- ap_log_error(APLOG_MARK, APLOG_CRIT, 0, s, APLOGNO(01567) +- "oh no! pids are greater than 32-bits! I'm broken!"); +- } +- +- cur_unique_id.in_addr = global_in_addr; ++ ap_random_insecure_bytes(&cur_unique_id.root, ++ sizeof(cur_unique_id.root)); + + /* + * If we use 0 as the initial counter we have a little less protection +@@ -253,13 +170,6 @@ + */ + ap_random_insecure_bytes(&cur_unique_id.counter, + sizeof(cur_unique_id.counter)); +- +- /* +- * We must always use network ordering for these bytes, so that +- * identifiers are comparable between machines of different byte +- * orderings. Note in_addr is already in network order. +- */ +- cur_unique_id.pid = htonl(cur_unique_id.pid); + } + + /* NOTE: This is *NOT* the same encoding used by base64encode ... the last two +@@ -291,10 +201,8 @@ + unsigned short counter; + int i,j,k; + +- new_unique_id.in_addr = cur_unique_id.in_addr; +- new_unique_id.pid = cur_unique_id.pid; ++ memcpy(&new_unique_id.root, &cur_unique_id.root, ROOT_SIZE); + new_unique_id.counter = cur_unique_id.counter; +- + new_unique_id.stamp = htonl((unsigned int)apr_time_sec(r->request_time)); + new_unique_id.thread_index = htonl((unsigned int)r->connection->id); + diff --git a/SOURCES/httpd-2.4.4-r1337344+.patch b/SOURCES/httpd-2.4.4-r1337344+.patch new file mode 100644 index 0000000..6e5c3e7 --- /dev/null +++ b/SOURCES/httpd-2.4.4-r1337344+.patch @@ -0,0 +1,250 @@ +# ./pullrev.sh 1337344 1341905 1342065 1341930 + +suexec enhancements: + +1) use syslog for logging +2) use capabilities not setuid/setgid root binary + +http://svn.apache.org/viewvc?view=revision&revision=1337344 +http://svn.apache.org/viewvc?view=revision&revision=1341905 +http://svn.apache.org/viewvc?view=revision&revision=1342065 +http://svn.apache.org/viewvc?view=revision&revision=1341930 + +--- httpd-2.4.4/configure.in.r1337344+ ++++ httpd-2.4.4/configure.in +@@ -734,7 +734,24 @@ APACHE_HELP_STRING(--with-suexec-gidmin, + + AC_ARG_WITH(suexec-logfile, + APACHE_HELP_STRING(--with-suexec-logfile,Set the logfile),[ +- AC_DEFINE_UNQUOTED(AP_LOG_EXEC, "$withval", [SuExec log file] ) ] ) ++ if test "x$withval" = "xyes"; then ++ AC_DEFINE_UNQUOTED(AP_LOG_EXEC, "$withval", [SuExec log file]) ++ fi ++]) ++ ++AC_ARG_WITH(suexec-syslog, ++APACHE_HELP_STRING(--with-suexec-syslog,Set the logfile),[ ++ if test $withval = "yes"; then ++ if test "x${with_suexec_logfile}" != "xno"; then ++ AC_MSG_NOTICE([hint: use "--without-suexec-logfile --with-suexec-syslog"]) ++ AC_MSG_ERROR([suexec does not support both logging to file and syslog]) ++ fi ++ AC_CHECK_FUNCS([vsyslog], [], [ ++ AC_MSG_ERROR([cannot support syslog from suexec without vsyslog()])]) ++ AC_DEFINE(AP_LOG_SYSLOG, 1, [SuExec log to syslog]) ++ fi ++]) ++ + + AC_ARG_WITH(suexec-safepath, + APACHE_HELP_STRING(--with-suexec-safepath,Set the safepath),[ +@@ -744,6 +761,15 @@ AC_ARG_WITH(suexec-umask, + APACHE_HELP_STRING(--with-suexec-umask,umask for suexec'd process),[ + AC_DEFINE_UNQUOTED(AP_SUEXEC_UMASK, 0$withval, [umask for suexec'd process] ) ] ) + ++INSTALL_SUEXEC=setuid ++AC_ARG_ENABLE([suexec-capabilities], ++APACHE_HELP_STRING(--enable-suexec-capabilities,Use Linux capability bits not setuid root suexec), [ ++INSTALL_SUEXEC=caps ++AC_DEFINE(AP_SUEXEC_CAPABILITIES, 1, ++ [Enable if suexec is installed with Linux capabilities, not setuid]) ++]) ++APACHE_SUBST(INSTALL_SUEXEC) ++ + dnl APR should go after the other libs, so the right symbols can be picked up + if test x${apu_found} != xobsolete; then + AP_LIBS="$AP_LIBS `$apu_config --avoid-ldap --link-libtool`" +--- httpd-2.4.4/docs/manual/suexec.html.en.r1337344+ ++++ httpd-2.4.4/docs/manual/suexec.html.en +@@ -372,6 +372,21 @@ + together with the --enable-suexec option to let + APACI accept your request for using the suEXEC feature. + ++
--enable-suexec-capabilities
++ ++
Linux specific: Normally, ++ the suexec binary is installed "setuid/setgid ++ root", which allows it to run with the full privileges of the ++ root user. If this option is used, the suexec ++ binary will instead be installed with only the setuid/setgid ++ "capability" bits set, which is the subset of full root ++ priviliges required for suexec operation. Note that ++ the suexec binary may not be able to write to a log ++ file in this mode; it is recommended that the ++ --with-suexec-syslog --without-suexec-logfile ++ options are used in conjunction with this mode, so that syslog ++ logging is used instead.
++ +
--with-suexec-bin=PATH
+ +
The path to the suexec binary must be hard-coded +@@ -433,6 +448,12 @@ + "suexec_log" and located in your standard logfile + directory (--logfiledir).
+ ++
--with-suexec-syslog
++ ++
If defined, suexec will log notices and errors to syslog ++ instead of a logfile. This option must be combined ++ with --without-suexec-logfile.
++ +
--with-suexec-safepath=PATH
+ +
Define a safe PATH environment to pass to CGI +@@ -550,9 +571,12 @@ Group webgroup + +

The suEXEC wrapper will write log information + to the file defined with the --with-suexec-logfile +- option as indicated above. If you feel you have configured and +- installed the wrapper properly, have a look at this log and the +- error_log for the server to see where you may have gone astray.

++ option as indicated above, or to syslog if --with-suexec-syslog ++ is used. If you feel you have configured and ++ installed the wrapper properly, have a look at the log and the ++ error_log for the server to see where you may have gone astray. ++ The output of "suexec -V" will show the options ++ used to compile suexec, if using a binary distribution.

+ +
top
+
+@@ -640,4 +664,4 @@ if (typeof(prettyPrint) !== 'undefined') + prettyPrint(); + } + //--> +- +\ No newline at end of file ++ +--- httpd-2.4.4/Makefile.in.r1337344+ ++++ httpd-2.4.4/Makefile.in +@@ -238,11 +238,22 @@ install-man: + cd $(DESTDIR)$(manualdir) && find . -name ".svn" -type d -print | xargs rm -rf 2>/dev/null || true; \ + fi + +-install-suexec: ++install-suexec: install-suexec-binary install-suexec-$(INSTALL_SUEXEC) ++ ++install-suexec-binary: + @if test -f $(builddir)/support/suexec; then \ + test -d $(DESTDIR)$(sbindir) || $(MKINSTALLDIRS) $(DESTDIR)$(sbindir); \ + $(INSTALL_PROGRAM) $(top_builddir)/support/suexec $(DESTDIR)$(sbindir); \ +- chmod 4755 $(DESTDIR)$(sbindir)/suexec; \ ++ fi ++ ++install-suexec-setuid: ++ @if test -f $(builddir)/support/suexec; then \ ++ chmod 4755 $(DESTDIR)$(sbindir)/suexec; \ ++ fi ++ ++install-suexec-caps: ++ @if test -f $(builddir)/support/suexec; then \ ++ setcap 'cap_setuid,cap_setgid+pe' $(DESTDIR)$(sbindir)/suexec; \ + fi + + suexec: +--- httpd-2.4.4/modules/arch/unix/mod_unixd.c.r1337344+ ++++ httpd-2.4.4/modules/arch/unix/mod_unixd.c +@@ -284,6 +284,13 @@ unixd_set_suexec(cmd_parms *cmd, void *d + return NULL; + } + ++#ifdef AP_SUEXEC_CAPABILITIES ++/* If suexec is using capabilities, don't test for the setuid bit. */ ++#define SETUID_TEST(finfo) (1) ++#else ++#define SETUID_TEST(finfo) (finfo.protection & APR_USETID) ++#endif ++ + static int + unixd_pre_config(apr_pool_t *pconf, apr_pool_t *plog, + apr_pool_t *ptemp) +@@ -300,7 +307,7 @@ unixd_pre_config(apr_pool_t *pconf, apr_ + ap_unixd_config.suexec_enabled = 0; + if ((apr_stat(&wrapper, SUEXEC_BIN, APR_FINFO_NORM, ptemp)) + == APR_SUCCESS) { +- if ((wrapper.protection & APR_USETID) && wrapper.user == 0 ++ if (SETUID_TEST(wrapper) && wrapper.user == 0 + && (access(SUEXEC_BIN, R_OK|X_OK) == 0)) { + ap_unixd_config.suexec_enabled = 1; + ap_unixd_config.suexec_disabled_reason = ""; +--- httpd-2.4.4/support/suexec.c.r1337344+ ++++ httpd-2.4.4/support/suexec.c +@@ -58,6 +58,10 @@ + #include + #endif + ++#ifdef AP_LOG_SYSLOG ++#include ++#endif ++ + #if defined(PATH_MAX) + #define AP_MAXPATH PATH_MAX + #elif defined(MAXPATHLEN) +@@ -69,7 +73,20 @@ + #define AP_ENVBUF 256 + + extern char **environ; ++ ++#ifdef AP_LOG_SYSLOG ++/* Syslog support. */ ++#if !defined(AP_LOG_FACILITY) && defined(LOG_AUTHPRIV) ++#define AP_LOG_FACILITY LOG_AUTHPRIV ++#elif !defined(AP_LOG_FACILITY) ++#define AP_LOG_FACILITY LOG_AUTH ++#endif ++ ++static int log_open; ++#else ++/* Non-syslog support. */ + static FILE *log = NULL; ++#endif + + static const char *const safe_env_lst[] = + { +@@ -137,7 +154,14 @@ static void err_output(int is_error, con + + static void err_output(int is_error, const char *fmt, va_list ap) + { +-#ifdef AP_LOG_EXEC ++#if defined(AP_LOG_SYSLOG) ++ if (!log_open) { ++ openlog("suexec", LOG_PID, AP_LOG_FACILITY); ++ log_open = 1; ++ } ++ ++ vsyslog(is_error ? LOG_ERR : LOG_INFO, fmt, ap); ++#elif defined(AP_LOG_EXEC) + time_t timevar; + struct tm *lt; + +@@ -295,7 +319,9 @@ int main(int argc, char *argv[]) + #ifdef AP_HTTPD_USER + fprintf(stderr, " -D AP_HTTPD_USER=\"%s\"\n", AP_HTTPD_USER); + #endif +-#ifdef AP_LOG_EXEC ++#if defined(AP_LOG_SYSLOG) ++ fprintf(stderr, " -D AP_LOG_SYSLOG\n"); ++#elif defined(AP_LOG_EXEC) + fprintf(stderr, " -D AP_LOG_EXEC=\"%s\"\n", AP_LOG_EXEC); + #endif + #ifdef AP_SAFE_PATH +@@ -591,6 +617,12 @@ int main(int argc, char *argv[]) + #endif /* AP_SUEXEC_UMASK */ + + /* Be sure to close the log file so the CGI can't mess with it. */ ++#ifdef AP_LOG_SYSLOG ++ if (log_open) { ++ closelog(); ++ log_open = 0; ++ } ++#else + if (log != NULL) { + #if APR_HAVE_FCNTL_H + /* +@@ -612,6 +644,7 @@ int main(int argc, char *argv[]) + log = NULL; + #endif + } ++#endif + + /* + * Execute the command, replacing our image with its own. diff --git a/SOURCES/httpd-2.4.6-CVE-2013-4352.patch b/SOURCES/httpd-2.4.6-CVE-2013-4352.patch new file mode 100644 index 0000000..48a52f2 --- /dev/null +++ b/SOURCES/httpd-2.4.6-CVE-2013-4352.patch @@ -0,0 +1,25 @@ +--- a/modules/cache/cache_storage.c 2013/09/14 13:30:39 1523234 ++++ b/modules/cache/cache_storage.c 2013/09/14 13:32:25 1523235 +@@ -713,7 +713,9 @@ + || APR_SUCCESS + != cache_canonicalise_key(r, r->pool, location, + &location_uri, &location_key) +- || strcmp(r->parsed_uri.hostname, location_uri.hostname)) { ++ || !(r->parsed_uri.hostname && location_uri.hostname ++ && !strcmp(r->parsed_uri.hostname, ++ location_uri.hostname))) { + location_key = NULL; + } + } +@@ -726,8 +728,9 @@ + || APR_SUCCESS + != cache_canonicalise_key(r, r->pool, content_location, + &content_location_uri, &content_location_key) +- || strcmp(r->parsed_uri.hostname, +- content_location_uri.hostname)) { ++ || !(r->parsed_uri.hostname && content_location_uri.hostname ++ && !strcmp(r->parsed_uri.hostname, ++ content_location_uri.hostname))) { + content_location_key = NULL; + } + } diff --git a/SOURCES/httpd-2.4.6-CVE-2013-5704.patch b/SOURCES/httpd-2.4.6-CVE-2013-5704.patch new file mode 100644 index 0000000..ee42b25 --- /dev/null +++ b/SOURCES/httpd-2.4.6-CVE-2013-5704.patch @@ -0,0 +1,381 @@ +diff --git a/include/http_core.h b/include/http_core.h +index 3c47989..f6f4aa2 100644 +--- a/include/http_core.h ++++ b/include/http_core.h +@@ -663,6 +663,10 @@ typedef struct { + #define AP_TRACE_ENABLE 1 + #define AP_TRACE_EXTENDED 2 + int trace_enable; ++#define AP_MERGE_TRAILERS_UNSET 0 ++#define AP_MERGE_TRAILERS_ENABLE 1 ++#define AP_MERGE_TRAILERS_DISABLE 2 ++ int merge_trailers; + + } core_server_config; + +diff --git a/include/httpd.h b/include/httpd.h +index 36cd58d..2e415f9 100644 +--- a/include/httpd.h ++++ b/include/httpd.h +@@ -1032,6 +1032,11 @@ struct request_rec { + */ + apr_sockaddr_t *useragent_addr; + char *useragent_ip; ++ ++ /** MIME trailer environment from the request */ ++ apr_table_t *trailers_in; ++ /** MIME trailer environment from the response */ ++ apr_table_t *trailers_out; + }; + + /** +diff --git a/modules/http/http_filters.c b/modules/http/http_filters.c +index 24a939a..2ae8f46 100644 +--- a/modules/http/http_filters.c ++++ b/modules/http/http_filters.c +@@ -214,6 +214,49 @@ static apr_status_t get_chunk_line(http_ctx_t *ctx, apr_bucket_brigade *b, + } + + ++static apr_status_t read_chunked_trailers(http_ctx_t *ctx, ap_filter_t *f, ++ apr_bucket_brigade *b, int merge) ++{ ++ int rv; ++ apr_bucket *e; ++ request_rec *r = f->r; ++ apr_table_t *saved_headers_in = r->headers_in; ++ int saved_status = r->status; ++ ++ r->status = HTTP_OK; ++ r->headers_in = r->trailers_in; ++ apr_table_clear(r->headers_in); ++ ctx->state = BODY_NONE; ++ ap_get_mime_headers(r); ++ ++ if(r->status == HTTP_OK) { ++ r->status = saved_status; ++ e = apr_bucket_eos_create(f->c->bucket_alloc); ++ APR_BRIGADE_INSERT_TAIL(b, e); ++ ctx->eos_sent = 1; ++ rv = APR_SUCCESS; ++ } ++ else { ++ const char *error_notes = apr_table_get(r->notes, ++ "error-notes"); ++ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, ++ "Error while reading HTTP trailer: %i%s%s", ++ r->status, error_notes ? ": " : "", ++ error_notes ? error_notes : ""); ++ rv = APR_EINVAL; ++ } ++ ++ if(!merge) { ++ r->headers_in = saved_headers_in; ++ } ++ else { ++ r->headers_in = apr_table_overlay(r->pool, saved_headers_in, ++ r->trailers_in); ++ } ++ ++ return rv; ++} ++ + /* This is the HTTP_INPUT filter for HTTP requests and responses from + * proxied servers (mod_proxy). It handles chunked and content-length + * bodies. This can only be inserted/used after the headers +@@ -223,6 +266,7 @@ apr_status_t ap_http_filter(ap_filter_t *f, apr_bucket_brigade *b, + ap_input_mode_t mode, apr_read_type_e block, + apr_off_t readbytes) + { ++ core_server_config *conf; + apr_bucket *e; + http_ctx_t *ctx = f->ctx; + apr_status_t rv; +@@ -230,6 +274,9 @@ apr_status_t ap_http_filter(ap_filter_t *f, apr_bucket_brigade *b, + int http_error = HTTP_REQUEST_ENTITY_TOO_LARGE; + apr_bucket_brigade *bb; + ++ conf = (core_server_config *) ++ ap_get_module_config(f->r->server->module_config, &core_module); ++ + /* just get out of the way of things we don't want. */ + if (mode != AP_MODE_READBYTES && mode != AP_MODE_GETLINE) { + return ap_get_brigade(f->next, b, mode, block, readbytes); +@@ -403,13 +450,8 @@ apr_status_t ap_http_filter(ap_filter_t *f, apr_bucket_brigade *b, + } + + if (!ctx->remaining) { +- /* Handle trailers by calling ap_get_mime_headers again! */ +- ctx->state = BODY_NONE; +- ap_get_mime_headers(f->r); +- e = apr_bucket_eos_create(f->c->bucket_alloc); +- APR_BRIGADE_INSERT_TAIL(b, e); +- ctx->eos_sent = 1; +- return APR_SUCCESS; ++ return read_chunked_trailers(ctx, f, b, ++ conf->merge_trailers == AP_MERGE_TRAILERS_ENABLE); + } + } + } +@@ -509,13 +551,8 @@ apr_status_t ap_http_filter(ap_filter_t *f, apr_bucket_brigade *b, + } + + if (!ctx->remaining) { +- /* Handle trailers by calling ap_get_mime_headers again! */ +- ctx->state = BODY_NONE; +- ap_get_mime_headers(f->r); +- e = apr_bucket_eos_create(f->c->bucket_alloc); +- APR_BRIGADE_INSERT_TAIL(b, e); +- ctx->eos_sent = 1; +- return APR_SUCCESS; ++ return read_chunked_trailers(ctx, f, b, ++ conf->merge_trailers == AP_MERGE_TRAILERS_ENABLE); + } + } + break; +diff --git a/modules/http/http_request.c b/modules/http/http_request.c +index 796d506..cdfec8b 100644 +--- a/modules/http/http_request.c ++++ b/modules/http/http_request.c +@@ -463,6 +463,7 @@ static request_rec *internal_internal_redirect(const char *new_uri, + new->main = r->main; + + new->headers_in = r->headers_in; ++ new->trailers_in = r->trailers_in; + new->headers_out = apr_table_make(r->pool, 12); + if (ap_is_HTTP_REDIRECT(new->status)) { + const char *location = apr_table_get(r->headers_out, "Location"); +@@ -470,6 +471,7 @@ static request_rec *internal_internal_redirect(const char *new_uri, + apr_table_setn(new->headers_out, "Location", location); + } + new->err_headers_out = r->err_headers_out; ++ new->trailers_out = apr_table_make(r->pool, 5); + new->subprocess_env = rename_original_env(r->pool, r->subprocess_env); + new->notes = apr_table_make(r->pool, 5); + +@@ -583,6 +585,8 @@ AP_DECLARE(void) ap_internal_fast_redirect(request_rec *rr, request_rec *r) + r->headers_out); + r->err_headers_out = apr_table_overlay(r->pool, rr->err_headers_out, + r->err_headers_out); ++ r->trailers_out = apr_table_overlay(r->pool, rr->trailers_out, ++ r->trailers_out); + r->subprocess_env = apr_table_overlay(r->pool, rr->subprocess_env, + r->subprocess_env); + +diff --git a/modules/loggers/mod_log_config.c b/modules/loggers/mod_log_config.c +index 25f5030..b021dd3 100644 +--- a/modules/loggers/mod_log_config.c ++++ b/modules/loggers/mod_log_config.c +@@ -431,6 +431,12 @@ static const char *log_header_in(request_rec *r, char *a) + return ap_escape_logitem(r->pool, apr_table_get(r->headers_in, a)); + } + ++static const char *log_trailer_in(request_rec *r, char *a) ++{ ++ return ap_escape_logitem(r->pool, apr_table_get(r->trailers_in, a)); ++} ++ ++ + static APR_INLINE char *find_multiple_headers(apr_pool_t *pool, + const apr_table_t *table, + const char *key) +@@ -514,6 +520,11 @@ static const char *log_header_out(request_rec *r, char *a) + return ap_escape_logitem(r->pool, cp); + } + ++static const char *log_trailer_out(request_rec *r, char *a) ++{ ++ return ap_escape_logitem(r->pool, apr_table_get(r->trailers_out, a)); ++} ++ + static const char *log_note(request_rec *r, char *a) + { + return ap_escape_logitem(r->pool, apr_table_get(r->notes, a)); +@@ -916,7 +927,7 @@ static char *parse_log_misc_string(apr_pool_t *p, log_format_item *it, + static char *parse_log_item(apr_pool_t *p, log_format_item *it, const char **sa) + { + const char *s = *sa; +- ap_log_handler *handler; ++ ap_log_handler *handler = NULL; + + if (*s != '%') { + return parse_log_misc_string(p, it, sa); +@@ -986,7 +997,16 @@ static char *parse_log_item(apr_pool_t *p, log_format_item *it, const char **sa) + break; + + default: +- handler = (ap_log_handler *)apr_hash_get(log_hash, s++, 1); ++ /* check for '^' + two character format first */ ++ if (*s == '^' && *(s+1) && *(s+2)) { ++ handler = (ap_log_handler *)apr_hash_get(log_hash, s, 3); ++ if (handler) { ++ s += 3; ++ } ++ } ++ if (!handler) { ++ handler = (ap_log_handler *)apr_hash_get(log_hash, s++, 1); ++ } + if (!handler) { + char dummy[2]; + +@@ -1516,7 +1536,7 @@ static void ap_register_log_handler(apr_pool_t *p, char *tag, + log_struct->func = handler; + log_struct->want_orig_default = def; + +- apr_hash_set(log_hash, tag, 1, (const void *)log_struct); ++ apr_hash_set(log_hash, tag, strlen(tag), (const void *)log_struct); + } + static ap_log_writer_init* ap_log_set_writer_init(ap_log_writer_init *handle) + { +@@ -1686,6 +1706,9 @@ static int log_pre_config(apr_pool_t *p, apr_pool_t *plog, apr_pool_t *ptemp) + log_pfn_register(p, "U", log_request_uri, 1); + log_pfn_register(p, "s", log_status, 1); + log_pfn_register(p, "R", log_handler, 1); ++ ++ log_pfn_register(p, "^ti", log_trailer_in, 0); ++ log_pfn_register(p, "^to", log_trailer_out, 0); + } + + /* reset to default conditions */ +diff --git a/modules/proxy/mod_proxy_http.c b/modules/proxy/mod_proxy_http.c +index 7ae0fa4..05f33b4 100644 +--- a/modules/proxy/mod_proxy_http.c ++++ b/modules/proxy/mod_proxy_http.c +@@ -994,8 +994,11 @@ static request_rec *make_fake_req(conn_rec *c, request_rec *r) + rp->status = HTTP_OK; + + rp->headers_in = apr_table_make(pool, 50); ++ rp->trailers_in = apr_table_make(pool, 5); ++ + rp->subprocess_env = apr_table_make(pool, 50); + rp->headers_out = apr_table_make(pool, 12); ++ rp->trailers_out = apr_table_make(pool, 5); + rp->err_headers_out = apr_table_make(pool, 5); + rp->notes = apr_table_make(pool, 5); + +@@ -1076,6 +1079,7 @@ static void ap_proxy_read_headers(request_rec *r, request_rec *rr, + psc = (proxy_server_conf *) ap_get_module_config(sconf, &proxy_module); + + r->headers_out = apr_table_make(r->pool, 20); ++ r->trailers_out = apr_table_make(r->pool, 5); + *pread_len = 0; + + /* +@@ -1206,6 +1210,14 @@ apr_status_t ap_proxygetline(apr_bucket_brigade *bb, char *s, int n, request_rec + #define AP_MAX_INTERIM_RESPONSES 10 + #endif + ++static int add_trailers(void *data, const char *key, const char *val) ++{ ++ if (val) { ++ apr_table_add((apr_table_t*)data, key, val); ++ } ++ return 1; ++} ++ + static + apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, + proxy_conn_rec **backend_ptr, +@@ -1717,6 +1729,12 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, + /* next time try a non-blocking read */ + mode = APR_NONBLOCK_READ; + ++ if (!apr_is_empty_table(backend->r->trailers_in)) { ++ apr_table_do(add_trailers, r->trailers_out, ++ backend->r->trailers_in, NULL); ++ apr_table_clear(backend->r->trailers_in); ++ } ++ + apr_brigade_length(bb, 0, &readbytes); + backend->worker->s->read += readbytes; + #if DEBUGGING +diff --git a/server/core.c b/server/core.c +index 024bab6..7cfde63 100644 +--- a/server/core.c ++++ b/server/core.c +@@ -523,6 +523,10 @@ static void *merge_core_server_configs(apr_pool_t *p, void *basev, void *virtv) + if (virt->error_log_req) + conf->error_log_req = virt->error_log_req; + ++ conf->merge_trailers = (virt->merge_trailers != AP_MERGE_TRAILERS_UNSET) ++ ? virt->merge_trailers ++ : base->merge_trailers; ++ + return conf; + } + +@@ -3877,6 +3881,16 @@ AP_DECLARE(void) ap_register_errorlog_handler(apr_pool_t *p, char *tag, + } + + ++static const char *set_merge_trailers(cmd_parms *cmd, void *dummy, int arg) ++{ ++ core_server_config *conf = ap_get_module_config(cmd->server->module_config, ++ &core_module); ++ conf->merge_trailers = (arg ? AP_MERGE_TRAILERS_ENABLE : ++ AP_MERGE_TRAILERS_DISABLE); ++ ++ return NULL; ++} ++ + /* Note --- ErrorDocument will now work from .htaccess files. + * The AllowOverride of Fileinfo allows webmasters to turn it off + */ +@@ -4124,6 +4138,8 @@ AP_INIT_TAKE1("EnableExceptionHook", ap_mpm_set_exception_hook, NULL, RSRC_CONF, + #endif + AP_INIT_TAKE1("TraceEnable", set_trace_enable, NULL, RSRC_CONF, + "'on' (default), 'off' or 'extended' to trace request body content"), ++AP_INIT_FLAG("MergeTrailers", set_merge_trailers, NULL, RSRC_CONF, ++ "merge request trailers into request headers or not"), + { NULL } + }; + +@@ -4206,7 +4222,6 @@ static int core_map_to_storage(request_rec *r) + + static int do_nothing(request_rec *r) { return OK; } + +- + static int core_override_type(request_rec *r) + { + core_dir_config *conf = +diff --git a/server/protocol.c b/server/protocol.c +index 14329eb..46fc034 100644 +--- a/server/protocol.c ++++ b/server/protocol.c +@@ -718,6 +718,8 @@ AP_DECLARE(void) ap_get_mime_headers_core(request_rec *r, apr_bucket_brigade *bb + r->status = HTTP_REQUEST_TIME_OUT; + } + else { ++ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv, r, ++ "Failed to read request header line %s", field); + r->status = HTTP_BAD_REQUEST; + } + +@@ -917,9 +919,11 @@ request_rec *ap_read_request(conn_rec *conn) + r->allowed_methods = ap_make_method_list(p, 2); + + r->headers_in = apr_table_make(r->pool, 25); ++ r->trailers_in = apr_table_make(r->pool, 5); + r->subprocess_env = apr_table_make(r->pool, 25); + r->headers_out = apr_table_make(r->pool, 12); + r->err_headers_out = apr_table_make(r->pool, 5); ++ r->trailers_out = apr_table_make(r->pool, 5); + r->notes = apr_table_make(r->pool, 5); + + r->request_config = ap_create_request_config(r->pool); +@@ -1162,6 +1166,7 @@ AP_DECLARE(void) ap_set_sub_req_protocol(request_rec *rnew, + rnew->status = HTTP_OK; + + rnew->headers_in = apr_table_copy(rnew->pool, r->headers_in); ++ rnew->trailers_in = apr_table_copy(rnew->pool, r->trailers_in); + + /* did the original request have a body? (e.g. POST w/SSI tags) + * if so, make sure the subrequest doesn't inherit body headers +@@ -1173,6 +1178,7 @@ AP_DECLARE(void) ap_set_sub_req_protocol(request_rec *rnew, + rnew->subprocess_env = apr_table_copy(rnew->pool, r->subprocess_env); + rnew->headers_out = apr_table_make(rnew->pool, 5); + rnew->err_headers_out = apr_table_make(rnew->pool, 5); ++ rnew->trailers_out = apr_table_make(rnew->pool, 5); + rnew->notes = apr_table_make(rnew->pool, 5); + + rnew->expecting_100 = r->expecting_100; diff --git a/SOURCES/httpd-2.4.6-CVE-2013-6438.patch b/SOURCES/httpd-2.4.6-CVE-2013-6438.patch new file mode 100644 index 0000000..1b154f5 --- /dev/null +++ b/SOURCES/httpd-2.4.6-CVE-2013-6438.patch @@ -0,0 +1,14 @@ +--- a/modules/dav/main/util.c 2014/01/09 14:28:39 1556815 ++++ b/modules/dav/main/util.c 2014/01/09 14:29:47 1556816 +@@ -396,8 +396,10 @@ + + if (strip_white) { + /* trim leading whitespace */ +- while (apr_isspace(*cdata)) /* assume: return false for '\0' */ ++ while (apr_isspace(*cdata)) { /* assume: return false for '\0' */ + ++cdata; ++ --len; ++ } + + /* trim trailing whitespace */ + while (len-- > 0 && apr_isspace(cdata[len])) diff --git a/SOURCES/httpd-2.4.6-CVE-2014-0098.patch b/SOURCES/httpd-2.4.6-CVE-2014-0098.patch new file mode 100644 index 0000000..64d5064 --- /dev/null +++ b/SOURCES/httpd-2.4.6-CVE-2014-0098.patch @@ -0,0 +1,41 @@ +--- a/modules/loggers/mod_log_config.c 2013/11/15 17:07:52 1542329 ++++ b/modules/loggers/mod_log_config.c 2014/03/10 11:23:47 1575904 +@@ -543,14 +543,24 @@ + + while ((cookie = apr_strtok(cookies, ";", &last1))) { + char *name = apr_strtok(cookie, "=", &last2); +- if (name) { +- char *value = name + strlen(name) + 1; +- apr_collapse_spaces(name, name); ++ /* last2 points to the next char following an '=' delim, ++ or the trailing NUL char of the string */ ++ char *value = last2; ++ if (name && *name && value && *value) { ++ char *last = value - 2; ++ /* Move past leading WS */ ++ name += strspn(name, " \t"); ++ while (last >= name && apr_isspace(*last)) { ++ *last = '\0'; ++ --last; ++ } + + if (!strcasecmp(name, a)) { +- char *last; +- value += strspn(value, " \t"); /* Move past leading WS */ +- last = value + strlen(value) - 1; ++ /* last1 points to the next char following the ';' delim, ++ or the trailing NUL char of the string */ ++ last = last1 - (*last1 ? 2 : 1); ++ /* Move past leading WS */ ++ value += strspn(value, " \t"); + while (last >= value && apr_isspace(*last)) { + *last = '\0'; + --last; +@@ -559,6 +569,7 @@ + return ap_escape_logitem(r->pool, value); + } + } ++ /* Iterate the remaining tokens using apr_strtok(NULL, ...) */ + cookies = NULL; + } + } diff --git a/SOURCES/httpd-2.4.6-CVE-2014-0117.patch b/SOURCES/httpd-2.4.6-CVE-2014-0117.patch new file mode 100644 index 0000000..f548d99 --- /dev/null +++ b/SOURCES/httpd-2.4.6-CVE-2014-0117.patch @@ -0,0 +1,11 @@ +--- a/modules/proxy/proxy_util.c 2014/07/15 16:07:44 1610736 ++++ b/modules/proxy/proxy_util.c 2014/07/15 16:11:04 1610737 +@@ -3132,7 +3132,7 @@ + const char *name; + + do { +- while (*val == ',') { ++ while (*val == ',' || *val == ';') { + val++; + } + name = ap_get_token(x->pool, &val, 0); diff --git a/SOURCES/httpd-2.4.6-CVE-2014-0118.patch b/SOURCES/httpd-2.4.6-CVE-2014-0118.patch new file mode 100644 index 0000000..e82b79f --- /dev/null +++ b/SOURCES/httpd-2.4.6-CVE-2014-0118.patch @@ -0,0 +1,266 @@ +diff --git a/modules/filters/mod_deflate.c b/modules/filters/mod_deflate.c +index 79f6f8d..6c415c8 100644 +--- a/modules/filters/mod_deflate.c ++++ b/modules/filters/mod_deflate.c +@@ -37,6 +37,7 @@ + #include "httpd.h" + #include "http_config.h" + #include "http_log.h" ++#include "http_core.h" + #include "apr_lib.h" + #include "apr_strings.h" + #include "apr_general.h" +@@ -52,6 +53,9 @@ + static const char deflateFilterName[] = "DEFLATE"; + module AP_MODULE_DECLARE_DATA deflate_module; + ++#define AP_INFLATE_RATIO_LIMIT 200 ++#define AP_INFLATE_RATIO_BURST 3 ++ + typedef struct deflate_filter_config_t + { + int windowSize; +@@ -63,6 +67,12 @@ typedef struct deflate_filter_config_t + char *note_output_name; + } deflate_filter_config; + ++typedef struct deflate_dirconf_t { ++ apr_off_t inflate_limit; ++ int ratio_limit, ++ ratio_burst; ++} deflate_dirconf_t; ++ + /* RFC 1952 Section 2.3 defines the gzip header: + * + * +---+---+---+---+---+---+---+---+---+---+ +@@ -204,6 +214,14 @@ static void *create_deflate_server_config(apr_pool_t *p, server_rec *s) + return c; + } + ++static void *create_deflate_dirconf(apr_pool_t *p, char *dummy) ++{ ++ deflate_dirconf_t *dc = apr_pcalloc(p, sizeof(*dc)); ++ dc->ratio_limit = AP_INFLATE_RATIO_LIMIT; ++ dc->ratio_burst = AP_INFLATE_RATIO_BURST; ++ return dc; ++} ++ + static const char *deflate_set_window_size(cmd_parms *cmd, void *dummy, + const char *arg) + { +@@ -295,6 +313,55 @@ static const char *deflate_set_compressionlevel(cmd_parms *cmd, void *dummy, + return NULL; + } + ++ ++static const char *deflate_set_inflate_limit(cmd_parms *cmd, void *dirconf, ++ const char *arg) ++{ ++ deflate_dirconf_t *dc = (deflate_dirconf_t*) dirconf; ++ char *errp; ++ ++ if (APR_SUCCESS != apr_strtoff(&dc->inflate_limit, arg, &errp, 10)) { ++ return "DeflateInflateLimitRequestBody is not parsable."; ++ } ++ if (*errp || dc->inflate_limit < 0) { ++ return "DeflateInflateLimitRequestBody requires a non-negative integer."; ++ } ++ ++ return NULL; ++} ++ ++static const char *deflate_set_inflate_ratio_limit(cmd_parms *cmd, ++ void *dirconf, ++ const char *arg) ++{ ++ deflate_dirconf_t *dc = (deflate_dirconf_t*) dirconf; ++ int i; ++ ++ i = atoi(arg); ++ if (i <= 0) ++ return "DeflateInflateRatioLimit must be positive"; ++ ++ dc->ratio_limit = i; ++ ++ return NULL; ++} ++ ++static const char *deflate_set_inflate_ratio_burst(cmd_parms *cmd, ++ void *dirconf, ++ const char *arg) ++{ ++ deflate_dirconf_t *dc = (deflate_dirconf_t*) dirconf; ++ int i; ++ ++ i = atoi(arg); ++ if (i <= 0) ++ return "DeflateInflateRatioBurst must be positive"; ++ ++ dc->ratio_burst = i; ++ ++ return NULL; ++} ++ + typedef struct deflate_ctx_t + { + z_stream stream; +@@ -304,6 +371,8 @@ typedef struct deflate_ctx_t + int (*libz_end_func)(z_streamp); + unsigned char *validation_buffer; + apr_size_t validation_buffer_length; ++ int ratio_hits; ++ apr_off_t inflate_total; + unsigned int inflate_init:1; + unsigned int filter_init:1; + unsigned int done:1; +@@ -422,6 +491,22 @@ static void deflate_check_etag(request_rec *r, const char *transform) + } + } + ++/* Check whether the (inflate) ratio exceeds the configured limit/burst. */ ++static int check_ratio(request_rec *r, deflate_ctx *ctx, ++ const deflate_dirconf_t *dc) ++{ ++ if (ctx->stream.total_in) { ++ int ratio = ctx->stream.total_out / ctx->stream.total_in; ++ if (ratio < dc->ratio_limit) { ++ ctx->ratio_hits = 0; ++ } ++ else if (++ctx->ratio_hits > dc->ratio_burst) { ++ return 0; ++ } ++ } ++ return 1; ++} ++ + static int have_ssl_compression(request_rec *r) + { + const char *comp; +@@ -897,6 +982,8 @@ static apr_status_t deflate_in_filter(ap_filter_t *f, + int zRC; + apr_status_t rv; + deflate_filter_config *c; ++ deflate_dirconf_t *dc; ++ apr_off_t inflate_limit; + + /* just get out of the way of things we don't want. */ + if (mode != AP_MODE_READBYTES) { +@@ -904,6 +991,7 @@ static apr_status_t deflate_in_filter(ap_filter_t *f, + } + + c = ap_get_module_config(r->server->module_config, &deflate_module); ++ dc = ap_get_module_config(r->per_dir_config, &deflate_module); + + if (!ctx) { + char deflate_hdr[10]; +@@ -994,6 +1082,12 @@ static apr_status_t deflate_in_filter(ap_filter_t *f, + apr_brigade_cleanup(ctx->bb); + } + ++ inflate_limit = dc->inflate_limit; ++ if (inflate_limit == 0) { ++ /* The core is checking the deflated body, we'll check the inflated */ ++ inflate_limit = ap_get_limit_req_body(f->r); ++ } ++ + if (APR_BRIGADE_EMPTY(ctx->proc_bb)) { + rv = ap_get_brigade(f->next, ctx->bb, mode, block, readbytes); + +@@ -1038,6 +1132,17 @@ static apr_status_t deflate_in_filter(ap_filter_t *f, + + ctx->stream.next_out = ctx->buffer; + len = c->bufferSize - ctx->stream.avail_out; ++ ++ ctx->inflate_total += len; ++ if (inflate_limit && ctx->inflate_total > inflate_limit) { ++ inflateEnd(&ctx->stream); ++ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(02647) ++ "Inflated content length of %" APR_OFF_T_FMT ++ " is larger than the configured limit" ++ " of %" APR_OFF_T_FMT, ++ ctx->inflate_total, inflate_limit); ++ return APR_ENOSPC; ++ } + + ctx->crc = crc32(ctx->crc, (const Bytef *)ctx->buffer, len); + tmp_heap = apr_bucket_heap_create((char *)ctx->buffer, len, +@@ -1073,6 +1178,26 @@ static apr_status_t deflate_in_filter(ap_filter_t *f, + ctx->stream.next_out = ctx->buffer; + len = c->bufferSize - ctx->stream.avail_out; + ++ ctx->inflate_total += len; ++ if (inflate_limit && ctx->inflate_total > inflate_limit) { ++ inflateEnd(&ctx->stream); ++ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(02648) ++ "Inflated content length of %" APR_OFF_T_FMT ++ " is larger than the configured limit" ++ " of %" APR_OFF_T_FMT, ++ ctx->inflate_total, inflate_limit); ++ return APR_ENOSPC; ++ } ++ ++ if (!check_ratio(r, ctx, dc)) { ++ inflateEnd(&ctx->stream); ++ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(02649) ++ "Inflated content ratio is larger than the " ++ "configured limit %i by %i time(s)", ++ dc->ratio_limit, dc->ratio_burst); ++ return APR_EINVAL; ++ } ++ + ctx->crc = crc32(ctx->crc, (const Bytef *)ctx->buffer, len); + tmp_heap = apr_bucket_heap_create((char *)ctx->buffer, len, + NULL, f->c->bucket_alloc); +@@ -1193,6 +1318,7 @@ static apr_status_t inflate_out_filter(ap_filter_t *f, + int zRC; + apr_status_t rv; + deflate_filter_config *c; ++ deflate_dirconf_t *dc; + + /* Do nothing if asked to filter nothing. */ + if (APR_BRIGADE_EMPTY(bb)) { +@@ -1200,6 +1326,7 @@ static apr_status_t inflate_out_filter(ap_filter_t *f, + } + + c = ap_get_module_config(r->server->module_config, &deflate_module); ++ dc = ap_get_module_config(r->per_dir_config, &deflate_module); + + if (!ctx) { + +@@ -1462,6 +1589,14 @@ static apr_status_t inflate_out_filter(ap_filter_t *f, + while (ctx->stream.avail_in != 0) { + if (ctx->stream.avail_out == 0) { + ++ if (!check_ratio(r, ctx, dc)) { ++ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(02650) ++ "Inflated content ratio is larger than the " ++ "configured limit %i by %i time(s)", ++ dc->ratio_limit, dc->ratio_burst); ++ return APR_EINVAL; ++ } ++ + ctx->stream.next_out = ctx->buffer; + len = c->bufferSize - ctx->stream.avail_out; + +@@ -1548,12 +1683,20 @@ static const command_rec deflate_filter_cmds[] = { + "Set the Deflate Memory Level (1-9)"), + AP_INIT_TAKE1("DeflateCompressionLevel", deflate_set_compressionlevel, NULL, RSRC_CONF, + "Set the Deflate Compression Level (1-9)"), ++ AP_INIT_TAKE1("DeflateInflateLimitRequestBody", deflate_set_inflate_limit, NULL, OR_ALL, ++ "Set a limit on size of inflated input"), ++ AP_INIT_TAKE1("DeflateInflateRatioLimit", deflate_set_inflate_ratio_limit, NULL, OR_ALL, ++ "Set the inflate ratio limit above which inflation is " ++ "aborted (default: " APR_STRINGIFY(AP_INFLATE_RATIO_LIMIT) ")"), ++ AP_INIT_TAKE1("DeflateInflateRatioBurst", deflate_set_inflate_ratio_burst, NULL, OR_ALL, ++ "Set the maximum number of following inflate ratios above limit " ++ "(default: " APR_STRINGIFY(AP_INFLATE_RATIO_BURST) ")"), + {NULL} + }; + + AP_DECLARE_MODULE(deflate) = { + STANDARD20_MODULE_STUFF, +- NULL, /* dir config creater */ ++ create_deflate_dirconf, /* dir config creater */ + NULL, /* dir merger --- default is to override */ + create_deflate_server_config, /* server config */ + NULL, /* merge server config */ diff --git a/SOURCES/httpd-2.4.6-CVE-2014-0226.patch b/SOURCES/httpd-2.4.6-CVE-2014-0226.patch new file mode 100644 index 0000000..67c7046 --- /dev/null +++ b/SOURCES/httpd-2.4.6-CVE-2014-0226.patch @@ -0,0 +1,119 @@ +Index: server/scoreboard.c +=================================================================== +--- a/server/scoreboard.c (revision 1610498) ++++ b/server/scoreboard.c (revision 1610499) +@@ -579,6 +579,21 @@ + sbh->thread_num); + } + ++AP_DECLARE(void) ap_copy_scoreboard_worker(worker_score *dest, ++ int child_num, ++ int thread_num) ++{ ++ worker_score *ws = ap_get_scoreboard_worker_from_indexes(child_num, thread_num); ++ ++ memcpy(dest, ws, sizeof *ws); ++ ++ /* For extra safety, NUL-terminate the strings returned, though it ++ * should be true those last bytes are always zero anyway. */ ++ dest->client[sizeof(dest->client) - 1] = '\0'; ++ dest->request[sizeof(dest->request) - 1] = '\0'; ++ dest->vhost[sizeof(dest->vhost) - 1] = '\0'; ++} ++ + AP_DECLARE(process_score *) ap_get_scoreboard_process(int x) + { + if ((x < 0) || (x >= server_limit)) { +Index: modules/generators/mod_status.c +=================================================================== +--- a/modules/generators/mod_status.c (revision 1610498) ++++ b/modules/generators/mod_status.c (revision 1610499) +@@ -194,7 +194,7 @@ + long req_time; + int short_report; + int no_table_report; +- worker_score *ws_record; ++ worker_score *ws_record = apr_palloc(r->pool, sizeof *ws_record); + process_score *ps_record; + char *stat_buffer; + pid_t *pid_buffer, worker_pid; +@@ -306,7 +306,7 @@ + for (j = 0; j < thread_limit; ++j) { + int indx = (i * thread_limit) + j; + +- ws_record = ap_get_scoreboard_worker_from_indexes(i, j); ++ ap_copy_scoreboard_worker(ws_record, i, j); + res = ws_record->status; + + if ((i >= max_servers || j >= threads_per_child) +@@ -637,7 +637,7 @@ + + for (i = 0; i < server_limit; ++i) { + for (j = 0; j < thread_limit; ++j) { +- ws_record = ap_get_scoreboard_worker_from_indexes(i, j); ++ ap_copy_scoreboard_worker(ws_record, i, j); + + if (ws_record->access_count == 0 && + (ws_record->status == SERVER_READY || +Index: modules/lua/lua_request.c +=================================================================== +--- a/modules/lua/lua_request.c (revision 1610498) ++++ b/modules/lua/lua_request.c (revision 1610499) +@@ -1245,16 +1245,22 @@ + */ + static int lua_ap_scoreboard_worker(lua_State *L) + { +- int i, +- j; +- worker_score *ws_record; ++ int i, j; ++ worker_score *ws_record = NULL; ++ request_rec *r = NULL; + + luaL_checktype(L, 1, LUA_TUSERDATA); + luaL_checktype(L, 2, LUA_TNUMBER); + luaL_checktype(L, 3, LUA_TNUMBER); ++ ++ r = ap_lua_check_request_rec(L, 1); ++ if (!r) return 0; ++ + i = lua_tointeger(L, 2); + j = lua_tointeger(L, 3); +- ws_record = ap_get_scoreboard_worker_from_indexes(i, j); ++ ws_record = apr_palloc(r->pool, sizeof *ws_record); ++ ++ ap_copy_scoreboard_worker(ws_record, i, j); + if (ws_record) { + lua_newtable(L); + +Index: include/scoreboard.h +=================================================================== +--- a/include/scoreboard.h (revision 1610498) ++++ b/include/scoreboard.h (revision 1610499) +@@ -183,8 +183,25 @@ + AP_DECLARE(void) ap_time_process_request(ap_sb_handle_t *sbh, int status); + + AP_DECLARE(worker_score *) ap_get_scoreboard_worker(ap_sb_handle_t *sbh); ++ ++/** Return a pointer to the worker_score for a given child, thread pair. ++ * @param child_num The child number. ++ * @param thread_num The thread number. ++ * @return A pointer to the worker_score structure. ++ * @deprecated This function is deprecated, use ap_copy_scoreboard_worker instead. */ + AP_DECLARE(worker_score *) ap_get_scoreboard_worker_from_indexes(int child_num, + int thread_num); ++ ++/** Copy the contents of a worker scoreboard entry. The contents of ++ * the worker_score structure are copied verbatim into the dest ++ * structure. ++ * @param dest Output parameter. ++ * @param child_num The child number. ++ * @param thread_num The thread number. ++ */ ++AP_DECLARE(void) ap_copy_scoreboard_worker(worker_score *dest, ++ int child_num, int thread_num); ++ + AP_DECLARE(process_score *) ap_get_scoreboard_process(int x); + AP_DECLARE(global_score *) ap_get_scoreboard_global(void); + + diff --git a/SOURCES/httpd-2.4.6-CVE-2014-0231.patch b/SOURCES/httpd-2.4.6-CVE-2014-0231.patch new file mode 100644 index 0000000..580123a --- /dev/null +++ b/SOURCES/httpd-2.4.6-CVE-2014-0231.patch @@ -0,0 +1,144 @@ +--- a/modules/generators/mod_cgid.c 2014/07/14 20:16:45 1610511 ++++ b/modules/generators/mod_cgid.c 2014/07/14 20:18:26 1610512 +@@ -97,6 +97,10 @@ + static pid_t parent_pid; + static ap_unix_identity_t empty_ugid = { (uid_t)-1, (gid_t)-1, -1 }; + ++typedef struct { ++ apr_interval_time_t timeout; ++} cgid_dirconf; ++ + /* The APR other-child API doesn't tell us how the daemon exited + * (SIGSEGV vs. exit(1)). The other-child maintenance function + * needs to decide whether to restart the daemon after a failure +@@ -968,7 +972,14 @@ + return overrides->logname ? overrides : base; + } + ++static void *create_cgid_dirconf(apr_pool_t *p, char *dummy) ++{ ++ cgid_dirconf *c = (cgid_dirconf *) apr_pcalloc(p, sizeof(cgid_dirconf)); ++ return c; ++} ++ + static const char *set_scriptlog(cmd_parms *cmd, void *dummy, const char *arg) ++ + { + server_rec *s = cmd->server; + cgid_server_conf *conf = ap_get_module_config(s->module_config, +@@ -1021,7 +1032,16 @@ + + return NULL; + } ++static const char *set_script_timeout(cmd_parms *cmd, void *dummy, const char *arg) ++{ ++ cgid_dirconf *dc = dummy; + ++ if (ap_timeout_parameter_parse(arg, &dc->timeout, "s") != APR_SUCCESS) { ++ return "CGIDScriptTimeout has wrong format"; ++ } ++ ++ return NULL; ++} + static const command_rec cgid_cmds[] = + { + AP_INIT_TAKE1("ScriptLog", set_scriptlog, NULL, RSRC_CONF, +@@ -1033,6 +1053,10 @@ + AP_INIT_TAKE1("ScriptSock", set_script_socket, NULL, RSRC_CONF, + "the name of the socket to use for communication with " + "the cgi daemon."), ++ AP_INIT_TAKE1("CGIDScriptTimeout", set_script_timeout, NULL, RSRC_CONF | ACCESS_CONF, ++ "The amount of time to wait between successful reads from " ++ "the CGI script, in seconds."), ++ + {NULL} + }; + +@@ -1356,12 +1380,16 @@ + apr_file_t *tempsock; + struct cleanup_script_info *info; + apr_status_t rv; ++ cgid_dirconf *dc; + + if (strcmp(r->handler, CGI_MAGIC_TYPE) && strcmp(r->handler, "cgi-script")) { + return DECLINED; + } + + conf = ap_get_module_config(r->server->module_config, &cgid_module); ++ dc = ap_get_module_config(r->per_dir_config, &cgid_module); ++ ++ + is_included = !strcmp(r->protocol, "INCLUDED"); + + if ((argv0 = strrchr(r->filename, '/')) != NULL) { +@@ -1441,6 +1469,12 @@ + */ + + apr_os_pipe_put_ex(&tempsock, &sd, 1, r->pool); ++ if (dc->timeout > 0) { ++ apr_file_pipe_timeout_set(tempsock, dc->timeout); ++ } ++ else { ++ apr_file_pipe_timeout_set(tempsock, r->server->timeout); ++ } + apr_pool_cleanup_kill(r->pool, (void *)((long)sd), close_unix_socket); + + /* Transfer any put/post args, CERN style... +@@ -1517,6 +1551,10 @@ + if (rv != APR_SUCCESS) { + /* silly script stopped reading, soak up remaining message */ + child_stopped_reading = 1; ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(02651) ++ "Error writing request body to script %s", ++ r->filename); ++ + } + } + apr_brigade_cleanup(bb); +@@ -1610,7 +1648,13 @@ + return HTTP_MOVED_TEMPORARILY; + } + +- ap_pass_brigade(r->output_filters, bb); ++ rv = ap_pass_brigade(r->output_filters, bb); ++ if (rv != APR_SUCCESS) { ++ /* APLOG_ERR because the core output filter message is at error, ++ * but doesn't know it's passing CGI output ++ */ ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(02550) "Failed to flush CGI output to client"); ++ } + } + + if (nph) { +@@ -1741,6 +1785,8 @@ + request_rec *r = f->r; + cgid_server_conf *conf = ap_get_module_config(r->server->module_config, + &cgid_module); ++ cgid_dirconf *dc = ap_get_module_config(r->per_dir_config, &cgid_module); ++ + struct cleanup_script_info *info; + + add_ssi_vars(r); +@@ -1770,6 +1816,13 @@ + * get rid of the cleanup we registered when we created the socket. + */ + apr_os_pipe_put_ex(&tempsock, &sd, 1, r->pool); ++ if (dc->timeout > 0) { ++ apr_file_pipe_timeout_set(tempsock, dc->timeout); ++ } ++ else { ++ apr_file_pipe_timeout_set(tempsock, r->server->timeout); ++ } ++ + apr_pool_cleanup_kill(r->pool, (void *)((long)sd), close_unix_socket); + + APR_BRIGADE_INSERT_TAIL(bb, apr_bucket_pipe_create(tempsock, +@@ -1875,7 +1928,7 @@ + + AP_DECLARE_MODULE(cgid) = { + STANDARD20_MODULE_STUFF, +- NULL, /* dir config creater */ ++ create_cgid_dirconf, /* dir config creater */ + NULL, /* dir merger --- default is to override */ + create_cgid_config, /* server config */ + merge_cgid_config, /* merge server config */ diff --git a/SOURCES/httpd-2.4.6-CVE-2014-3581.patch b/SOURCES/httpd-2.4.6-CVE-2014-3581.patch new file mode 100644 index 0000000..2f2217d --- /dev/null +++ b/SOURCES/httpd-2.4.6-CVE-2014-3581.patch @@ -0,0 +1,17 @@ +diff --git a/modules/cache/cache_util.c b/modules/cache/cache_util.c +index 7b7fb45..fbebb1e 100644 +--- a/modules/cache/cache_util.c ++++ b/modules/cache/cache_util.c +@@ -1251,8 +1251,10 @@ CACHE_DECLARE(apr_table_t *)ap_cache_cacheable_headers_out(request_rec *r) + + if (!apr_table_get(headers_out, "Content-Type") + && r->content_type) { +- apr_table_setn(headers_out, "Content-Type", +- ap_make_content_type(r, r->content_type)); ++ const char *ctype = ap_make_content_type(r, r->content_type); ++ if (ctype) { ++ apr_table_setn(headers_out, "Content-Type", ctype); ++ } + } + + if (!apr_table_get(headers_out, "Content-Encoding") diff --git a/SOURCES/httpd-2.4.6-CVE-2015-3183.patch b/SOURCES/httpd-2.4.6-CVE-2015-3183.patch new file mode 100644 index 0000000..da4d4fa --- /dev/null +++ b/SOURCES/httpd-2.4.6-CVE-2015-3183.patch @@ -0,0 +1,982 @@ +diff --git a/include/http_protocol.h b/include/http_protocol.h +index 415270b..67fa02f 100644 +--- a/include/http_protocol.h ++++ b/include/http_protocol.h +@@ -502,6 +502,23 @@ AP_DECLARE(int) ap_should_client_block(request_rec *r); + */ + AP_DECLARE(long) ap_get_client_block(request_rec *r, char *buffer, apr_size_t bufsiz); + ++/* ++ * Map specific APR codes returned by the filter stack to HTTP error ++ * codes, or the default status code provided. Use it as follows: ++ * ++ * return ap_map_http_request_error(rv, HTTP_BAD_REQUEST); ++ * ++ * If the filter has already handled the error, AP_FILTER_ERROR will ++ * be returned, which is cleanly passed through. ++ * ++ * These mappings imply that the filter stack is reading from the ++ * downstream client, the proxy will map these codes differently. ++ * @param rv APR status code ++ * @param status Default HTTP code should the APR code not be recognised ++ * @return Mapped HTTP status code ++ */ ++AP_DECLARE(int) ap_map_http_request_error(apr_status_t rv, int status); ++ + /** + * In HTTP/1.1, any method can have a body. However, most GET handlers + * wouldn't know what to do with a request body if they received one. +diff --git a/modules/http/http_filters.c b/modules/http/http_filters.c +index 1dde402..ed8749f 100644 +--- a/modules/http/http_filters.c ++++ b/modules/http/http_filters.c +@@ -57,24 +57,29 @@ + + APLOG_USE_MODULE(http); + +-#define INVALID_CHAR -2 +- +-static long get_chunk_size(char *); +- +-typedef struct http_filter_ctx { ++typedef struct http_filter_ctx ++{ + apr_off_t remaining; + apr_off_t limit; + apr_off_t limit_used; +- enum { +- BODY_NONE, +- BODY_LENGTH, +- BODY_CHUNK, +- BODY_CHUNK_PART ++ apr_int32_t chunk_used; ++ apr_int32_t chunk_bws; ++ apr_int32_t chunkbits; ++ enum ++ { ++ BODY_NONE, /* streamed data */ ++ BODY_LENGTH, /* data constrained by content length */ ++ BODY_CHUNK, /* chunk expected */ ++ BODY_CHUNK_PART, /* chunk digits */ ++ BODY_CHUNK_EXT, /* chunk extension */ ++ BODY_CHUNK_CR, /* got space(s) after digits, expect [CR]LF or ext */ ++ BODY_CHUNK_LF, /* got CR after digits or ext, expect LF */ ++ BODY_CHUNK_DATA, /* data constrained by chunked encoding */ ++ BODY_CHUNK_END, /* chunked data terminating CRLF */ ++ BODY_CHUNK_END_LF, /* got CR after data, expect LF */ ++ BODY_CHUNK_TRAILER /* trailers */ + } state; +- int eos_sent; +- char chunk_ln[32]; +- char *pos; +- apr_off_t linesize; ++ unsigned int eos_sent :1; + apr_bucket_brigade *bb; + } http_ctx_t; + +@@ -87,6 +92,23 @@ static apr_status_t bail_out_on_error(http_ctx_t *ctx, + apr_bucket_brigade *bb = ctx->bb; + + apr_brigade_cleanup(bb); ++ ++ if (f->r->proxyreq == PROXYREQ_RESPONSE) { ++ switch (http_error) { ++ case HTTP_REQUEST_ENTITY_TOO_LARGE: ++ return APR_ENOSPC; ++ ++ case HTTP_REQUEST_TIME_OUT: ++ return APR_INCOMPLETE; ++ ++ case HTTP_NOT_IMPLEMENTED: ++ return APR_ENOTIMPL; ++ ++ default: ++ return APR_EGENERAL; ++ } ++ } ++ + e = ap_bucket_error_create(http_error, + NULL, f->r->pool, + f->c->bucket_alloc); +@@ -102,117 +124,154 @@ static apr_status_t bail_out_on_error(http_ctx_t *ctx, + return ap_pass_brigade(f->r->output_filters, bb); + } + +-static apr_status_t get_remaining_chunk_line(http_ctx_t *ctx, +- apr_bucket_brigade *b, +- int linelimit) ++/** ++ * Parse a chunk line with optional extension, detect overflow. ++ * There are two error cases: ++ * 1) If the conversion would require too many bits, APR_EGENERAL is returned. ++ * 2) If the conversion used the correct number of bits, but an overflow ++ * caused only the sign bit to flip, then APR_ENOSPC is returned. ++ * In general, any negative number can be considered an overflow error. ++ */ ++static apr_status_t parse_chunk_size(http_ctx_t *ctx, const char *buffer, ++ apr_size_t len, int linelimit) + { +- apr_status_t rv; +- apr_off_t brigade_length; +- apr_bucket *e; +- const char *lineend; +- apr_size_t len = 0; ++ apr_size_t i = 0; + +- /* +- * As the brigade b should have been requested in mode AP_MODE_GETLINE +- * all buckets in this brigade are already some type of memory +- * buckets (due to the needed scanning for LF in mode AP_MODE_GETLINE) +- * or META buckets. +- */ +- rv = apr_brigade_length(b, 0, &brigade_length); +- if (rv != APR_SUCCESS) { +- return rv; +- } +- /* Sanity check. Should never happen. See above. */ +- if (brigade_length == -1) { +- return APR_EGENERAL; +- } +- if (!brigade_length) { +- return APR_EAGAIN; +- } +- ctx->linesize += brigade_length; +- if (ctx->linesize > linelimit) { +- return APR_ENOSPC; +- } +- /* +- * As all buckets are already some type of memory buckets or META buckets +- * (see above), we only need to check the last byte in the last data bucket. +- */ +- for (e = APR_BRIGADE_LAST(b); +- e != APR_BRIGADE_SENTINEL(b); +- e = APR_BUCKET_PREV(e)) { ++ while (i < len) { ++ char c = buffer[i]; ++ ++ ap_xlate_proto_from_ascii(&c, 1); + +- if (APR_BUCKET_IS_METADATA(e)) { ++ /* handle CRLF after the chunk */ ++ if (ctx->state == BODY_CHUNK_END ++ || ctx->state == BODY_CHUNK_END_LF) { ++ if (c == LF) { ++ ctx->state = BODY_CHUNK; ++ } ++ else if (c == CR && ctx->state == BODY_CHUNK_END) { ++ ctx->state = BODY_CHUNK_END_LF; ++ } ++ else { ++ /* ++ * LF expected. ++ */ ++ return APR_EINVAL; ++ } ++ i++; + continue; + } +- rv = apr_bucket_read(e, &lineend, &len, APR_BLOCK_READ); +- if (rv != APR_SUCCESS) { +- return rv; ++ ++ /* handle start of the chunk */ ++ if (ctx->state == BODY_CHUNK) { ++ if (!apr_isxdigit(c)) { ++ /* ++ * Detect invalid character at beginning. This also works for ++ * empty chunk size lines. ++ */ ++ return APR_EINVAL; ++ } ++ else { ++ ctx->state = BODY_CHUNK_PART; ++ } ++ ctx->remaining = 0; ++ ctx->chunkbits = sizeof(apr_off_t) * 8; ++ ctx->chunk_used = 0; ++ ctx->chunk_bws = 0; + } +- if (len > 0) { +- break; /* we got the data we want */ ++ ++ if (c == LF) { ++ if (ctx->remaining) { ++ ctx->state = BODY_CHUNK_DATA; ++ } ++ else { ++ ctx->state = BODY_CHUNK_TRAILER; ++ } + } +- /* If we got a zero-length data bucket, we try the next one */ +- } +- /* We had no data in this brigade */ +- if (!len || e == APR_BRIGADE_SENTINEL(b)) { +- return APR_EAGAIN; +- } +- if (lineend[len - 1] != APR_ASCII_LF) { +- return APR_EAGAIN; +- } +- /* Line is complete. So reset ctx for next round. */ +- ctx->linesize = 0; +- ctx->pos = ctx->chunk_ln; +- return APR_SUCCESS; +-} ++ else if (ctx->state == BODY_CHUNK_LF) { ++ /* ++ * LF expected. ++ */ ++ return APR_EINVAL; ++ } ++ else if (c == CR) { ++ ctx->state = BODY_CHUNK_LF; ++ } ++ else if (c == ';') { ++ ctx->state = BODY_CHUNK_EXT; ++ } ++ else if (ctx->state == BODY_CHUNK_EXT) { ++ /* ++ * Control chars (but tabs) are invalid. ++ */ ++ if (c != '\t' && apr_iscntrl(c)) { ++ return APR_EINVAL; ++ } ++ } ++ else if (c == ' ' || c == '\t') { ++ /* Be lenient up to 10 BWS (term from rfc7230 - 3.2.3). ++ */ ++ ctx->state = BODY_CHUNK_CR; ++ if (++ctx->chunk_bws > 10) { ++ return APR_EINVAL; ++ } ++ } ++ else if (ctx->state == BODY_CHUNK_CR) { ++ /* ++ * ';', CR or LF expected. ++ */ ++ return APR_EINVAL; ++ } ++ else if (ctx->state == BODY_CHUNK_PART) { ++ int xvalue; + +-static apr_status_t get_chunk_line(http_ctx_t *ctx, apr_bucket_brigade *b, +- int linelimit) +-{ +- apr_size_t len; +- int tmp_len; +- apr_status_t rv; ++ /* ignore leading zeros */ ++ if (!ctx->remaining && c == '0') { ++ i++; ++ continue; ++ } + +- tmp_len = sizeof(ctx->chunk_ln) - (ctx->pos - ctx->chunk_ln) - 1; +- /* Saveguard ourselves against underflows */ +- if (tmp_len < 0) { +- len = 0; +- } +- else { +- len = (apr_size_t) tmp_len; +- } +- /* +- * Check if there is space left in ctx->chunk_ln. If not, then either +- * the chunk size is insane or we have chunk-extensions. Ignore both +- * by discarding the remaining part of the line via +- * get_remaining_chunk_line. Only bail out if the line is too long. +- */ +- if (len > 0) { +- rv = apr_brigade_flatten(b, ctx->pos, &len); +- if (rv != APR_SUCCESS) { +- return rv; ++ ctx->chunkbits -= 4; ++ if (ctx->chunkbits < 0) { ++ /* overflow */ ++ return APR_ENOSPC; ++ } ++ ++ if (c >= '0' && c <= '9') { ++ xvalue = c - '0'; ++ } ++ else if (c >= 'A' && c <= 'F') { ++ xvalue = c - 'A' + 0xa; ++ } ++ else if (c >= 'a' && c <= 'f') { ++ xvalue = c - 'a' + 0xa; ++ } ++ else { ++ /* bogus character */ ++ return APR_EINVAL; ++ } ++ ++ ctx->remaining = (ctx->remaining << 4) | xvalue; ++ if (ctx->remaining < 0) { ++ /* overflow */ ++ return APR_ENOSPC; ++ } + } +- ctx->pos += len; +- ctx->linesize += len; +- *(ctx->pos) = '\0'; +- /* +- * Check if we really got a full line. If yes the +- * last char in the just read buffer must be LF. +- * If not advance the buffer and return APR_EAGAIN. +- * We do not start processing until we have the +- * full line. +- */ +- if (ctx->pos[-1] != APR_ASCII_LF) { +- /* Check if the remaining data in the brigade has the LF */ +- return get_remaining_chunk_line(ctx, b, linelimit); ++ else { ++ /* Should not happen */ ++ return APR_EGENERAL; + } +- /* Line is complete. So reset ctx->pos for next round. */ +- ctx->pos = ctx->chunk_ln; +- return APR_SUCCESS; ++ ++ i++; + } +- return get_remaining_chunk_line(ctx, b, linelimit); +-} + ++ /* sanity check */ ++ ctx->chunk_used += len; ++ if (ctx->chunk_used < 0 || ctx->chunk_used > linelimit) { ++ return APR_ENOSPC; ++ } ++ ++ return APR_SUCCESS; ++} + + static apr_status_t read_chunked_trailers(http_ctx_t *ctx, ap_filter_t *f, + apr_bucket_brigade *b, int merge) +@@ -226,7 +285,6 @@ static apr_status_t read_chunked_trailers(http_ctx_t *ctx, ap_filter_t *f, + r->status = HTTP_OK; + r->headers_in = r->trailers_in; + apr_table_clear(r->headers_in); +- ctx->state = BODY_NONE; + ap_get_mime_headers(r); + + if(r->status == HTTP_OK) { +@@ -239,7 +297,7 @@ static apr_status_t read_chunked_trailers(http_ctx_t *ctx, ap_filter_t *f, + else { + const char *error_notes = apr_table_get(r->notes, + "error-notes"); +- ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, ++ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(02656) + "Error while reading HTTP trailer: %i%s%s", + r->status, error_notes ? ": " : "", + error_notes ? error_notes : ""); +@@ -270,9 +328,9 @@ apr_status_t ap_http_filter(ap_filter_t *f, apr_bucket_brigade *b, + apr_bucket *e; + http_ctx_t *ctx = f->ctx; + apr_status_t rv; +- apr_off_t totalread; + int http_error = HTTP_REQUEST_ENTITY_TOO_LARGE; + apr_bucket_brigade *bb; ++ int again; + + conf = (core_server_config *) + ap_get_module_config(f->r->server->module_config, &core_module); +@@ -286,7 +344,6 @@ apr_status_t ap_http_filter(ap_filter_t *f, apr_bucket_brigade *b, + const char *tenc, *lenp; + f->ctx = ctx = apr_pcalloc(f->r->pool, sizeof(*ctx)); + ctx->state = BODY_NONE; +- ctx->pos = ctx->chunk_ln; + ctx->bb = apr_brigade_create(f->r->pool, f->c->bucket_alloc); + bb = ctx->bb; + +@@ -306,25 +363,33 @@ apr_status_t ap_http_filter(ap_filter_t *f, apr_bucket_brigade *b, + lenp = apr_table_get(f->r->headers_in, "Content-Length"); + + if (tenc) { +- if (!strcasecmp(tenc, "chunked")) { ++ if (strcasecmp(tenc, "chunked") == 0 /* fast path */ ++ || ap_find_last_token(f->r->pool, tenc, "chunked")) { + ctx->state = BODY_CHUNK; + } +- /* test lenp, because it gives another case we can handle */ +- else if (!lenp) { +- /* Something that isn't in HTTP, unless some future ++ else if (f->r->proxyreq == PROXYREQ_RESPONSE) { ++ /* http://tools.ietf.org/html/draft-ietf-httpbis-p1-messaging-23 ++ * Section 3.3.3.3: "If a Transfer-Encoding header field is ++ * present in a response and the chunked transfer coding is not ++ * the final encoding, the message body length is determined by ++ * reading the connection until it is closed by the server." ++ */ ++ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r, APLOGNO(02555) ++ "Unknown Transfer-Encoding: %s; " ++ "using read-until-close", tenc); ++ tenc = NULL; ++ } ++ else { ++ /* Something that isn't a HTTP request, unless some future + * edition defines new transfer encodings, is unsupported. + */ + ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r, APLOGNO(01585) + "Unknown Transfer-Encoding: %s", tenc); +- return bail_out_on_error(ctx, f, HTTP_NOT_IMPLEMENTED); +- } +- else { +- ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, f->r, APLOGNO(01586) +- "Unknown Transfer-Encoding: %s; using Content-Length", tenc); +- tenc = NULL; ++ return bail_out_on_error(ctx, f, HTTP_BAD_REQUEST); + } ++ lenp = NULL; + } +- if (lenp && !tenc) { ++ if (lenp) { + char *endstr; + + ctx->state = BODY_LENGTH; +@@ -339,7 +404,7 @@ apr_status_t ap_http_filter(ap_filter_t *f, apr_bucket_brigade *b, + ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r, APLOGNO(01587) + "Invalid Content-Length"); + +- return bail_out_on_error(ctx, f, HTTP_REQUEST_ENTITY_TOO_LARGE); ++ return bail_out_on_error(ctx, f, HTTP_BAD_REQUEST); + } + + /* If we have a limit in effect and we know the C-L ahead of +@@ -381,7 +446,8 @@ apr_status_t ap_http_filter(ap_filter_t *f, apr_bucket_brigade *b, + if (!ap_is_HTTP_SUCCESS(f->r->status)) { + ctx->state = BODY_NONE; + ctx->eos_sent = 1; +- } else { ++ } ++ else { + char *tmp; + int len; + +@@ -389,7 +455,7 @@ apr_status_t ap_http_filter(ap_filter_t *f, apr_bucket_brigade *b, + * in a state of expecting one. + */ + f->r->expecting_100 = 0; +- tmp = apr_pstrcat(f->r->pool, AP_SERVER_PROTOCOL, " ", ++ tmp = apr_pstrcat(f->r->pool, AP_SERVER_PROTOCOL " ", + ap_get_status_line(HTTP_CONTINUE), CRLF CRLF, + NULL); + len = strlen(tmp); +@@ -401,279 +467,205 @@ apr_status_t ap_http_filter(ap_filter_t *f, apr_bucket_brigade *b, + e = apr_bucket_flush_create(f->c->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(bb, e); + +- ap_pass_brigade(f->c->output_filters, bb); +- } +- } +- +- /* We can't read the chunk until after sending 100 if required. */ +- if (ctx->state == BODY_CHUNK) { +- apr_brigade_cleanup(bb); +- +- rv = ap_get_brigade(f->next, bb, AP_MODE_GETLINE, +- block, 0); +- +- /* for timeout */ +- if (block == APR_NONBLOCK_READ && +- ( (rv == APR_SUCCESS && APR_BRIGADE_EMPTY(bb)) || +- (APR_STATUS_IS_EAGAIN(rv)) )) { +- ctx->state = BODY_CHUNK_PART; +- return APR_EAGAIN; +- } +- +- if (rv == APR_SUCCESS) { +- rv = get_chunk_line(ctx, bb, f->r->server->limit_req_line); +- if (APR_STATUS_IS_EAGAIN(rv)) { +- apr_brigade_cleanup(bb); +- ctx->state = BODY_CHUNK_PART; +- return rv; +- } +- if (rv == APR_SUCCESS) { +- ctx->remaining = get_chunk_size(ctx->chunk_ln); +- if (ctx->remaining == INVALID_CHAR) { +- rv = APR_EGENERAL; +- http_error = HTTP_BAD_REQUEST; +- } ++ rv = ap_pass_brigade(f->c->output_filters, bb); ++ if (rv != APR_SUCCESS) { ++ return AP_FILTER_ERROR; + } + } +- apr_brigade_cleanup(bb); +- +- /* Detect chunksize error (such as overflow) */ +- if (rv != APR_SUCCESS || ctx->remaining < 0) { +- ap_log_rerror(APLOG_MARK, APLOG_INFO, rv, f->r, APLOGNO(01589) "Error reading first chunk %s ", +- (ctx->remaining < 0) ? "(overflow)" : ""); +- ctx->remaining = 0; /* Reset it in case we have to +- * come back here later */ +- if (APR_STATUS_IS_TIMEUP(rv)) { +- http_error = HTTP_REQUEST_TIME_OUT; +- } +- return bail_out_on_error(ctx, f, http_error); +- } +- +- if (!ctx->remaining) { +- return read_chunked_trailers(ctx, f, b, +- conf->merge_trailers == AP_MERGE_TRAILERS_ENABLE); +- } + } + } +- else { +- bb = ctx->bb; +- } + ++ /* sanity check in case we're read twice */ + if (ctx->eos_sent) { + e = apr_bucket_eos_create(f->c->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(b, e); + return APR_SUCCESS; + } + +- if (!ctx->remaining) { ++ do { ++ apr_brigade_cleanup(b); ++ again = 0; /* until further notice */ ++ ++ /* read and handle the brigade */ + switch (ctx->state) { +- case BODY_NONE: +- break; +- case BODY_LENGTH: +- e = apr_bucket_eos_create(f->c->bucket_alloc); +- APR_BRIGADE_INSERT_TAIL(b, e); +- ctx->eos_sent = 1; +- return APR_SUCCESS; + case BODY_CHUNK: + case BODY_CHUNK_PART: +- { +- apr_brigade_cleanup(bb); ++ case BODY_CHUNK_EXT: ++ case BODY_CHUNK_CR: ++ case BODY_CHUNK_LF: ++ case BODY_CHUNK_END: ++ case BODY_CHUNK_END_LF: { + +- /* We need to read the CRLF after the chunk. */ +- if (ctx->state == BODY_CHUNK) { +- rv = ap_get_brigade(f->next, bb, AP_MODE_GETLINE, +- block, 0); +- if (block == APR_NONBLOCK_READ && +- ( (rv == APR_SUCCESS && APR_BRIGADE_EMPTY(bb)) || +- (APR_STATUS_IS_EAGAIN(rv)) )) { +- return APR_EAGAIN; +- } +- /* If we get an error, then leave */ +- if (rv != APR_SUCCESS) { +- return rv; +- } +- /* +- * We really don't care whats on this line. If it is RFC +- * compliant it should be only \r\n. If there is more +- * before we just ignore it as long as we do not get over +- * the limit for request lines. +- */ +- rv = get_remaining_chunk_line(ctx, bb, +- f->r->server->limit_req_line); +- apr_brigade_cleanup(bb); +- if (APR_STATUS_IS_EAGAIN(rv)) { +- return rv; +- } +- } else { +- rv = APR_SUCCESS; +- } ++ rv = ap_get_brigade(f->next, b, AP_MODE_GETLINE, block, 0); ++ ++ /* for timeout */ ++ if (block == APR_NONBLOCK_READ ++ && ((rv == APR_SUCCESS && APR_BRIGADE_EMPTY(b)) ++ || (APR_STATUS_IS_EAGAIN(rv)))) { ++ return APR_EAGAIN; ++ } ++ ++ if (rv == APR_EOF) { ++ return APR_INCOMPLETE; ++ } ++ ++ if (rv != APR_SUCCESS) { ++ return rv; ++ } ++ ++ e = APR_BRIGADE_FIRST(b); ++ while (e != APR_BRIGADE_SENTINEL(b)) { ++ const char *buffer; ++ apr_size_t len; ++ ++ if (!APR_BUCKET_IS_METADATA(e)) { ++ int parsing = 0; ++ ++ rv = apr_bucket_read(e, &buffer, &len, APR_BLOCK_READ); + +- if (rv == APR_SUCCESS) { +- /* Read the real chunk line. */ +- rv = ap_get_brigade(f->next, bb, AP_MODE_GETLINE, +- block, 0); +- /* Test timeout */ +- if (block == APR_NONBLOCK_READ && +- ( (rv == APR_SUCCESS && APR_BRIGADE_EMPTY(bb)) || +- (APR_STATUS_IS_EAGAIN(rv)) )) { +- ctx->state = BODY_CHUNK_PART; +- return APR_EAGAIN; +- } +- ctx->state = BODY_CHUNK; + if (rv == APR_SUCCESS) { +- rv = get_chunk_line(ctx, bb, f->r->server->limit_req_line); +- if (APR_STATUS_IS_EAGAIN(rv)) { +- ctx->state = BODY_CHUNK_PART; +- apr_brigade_cleanup(bb); +- return rv; +- } +- if (rv == APR_SUCCESS) { +- ctx->remaining = get_chunk_size(ctx->chunk_ln); +- if (ctx->remaining == INVALID_CHAR) { +- rv = APR_EGENERAL; ++ parsing = 1; ++ rv = parse_chunk_size(ctx, buffer, len, ++ f->r->server->limit_req_fieldsize); ++ } ++ if (rv != APR_SUCCESS) { ++ ap_log_rerror(APLOG_MARK, APLOG_INFO, rv, f->r, APLOGNO(01590) ++ "Error reading/parsing chunk %s ", ++ (APR_ENOSPC == rv) ? "(overflow)" : ""); ++ if (parsing) { ++ if (rv != APR_ENOSPC) { + http_error = HTTP_BAD_REQUEST; + } ++ return bail_out_on_error(ctx, f, http_error); + } ++ return rv; + } +- apr_brigade_cleanup(bb); + } + +- /* Detect chunksize error (such as overflow) */ +- if (rv != APR_SUCCESS || ctx->remaining < 0) { +- ap_log_rerror(APLOG_MARK, APLOG_INFO, rv, f->r, APLOGNO(01590) "Error reading chunk %s ", +- (ctx->remaining < 0) ? "(overflow)" : ""); +- ctx->remaining = 0; /* Reset it in case we have to +- * come back here later */ +- if (APR_STATUS_IS_TIMEUP(rv)) { +- http_error = HTTP_REQUEST_TIME_OUT; +- } +- return bail_out_on_error(ctx, f, http_error); +- } ++ apr_bucket_delete(e); ++ e = APR_BRIGADE_FIRST(b); ++ } ++ again = 1; /* come around again */ + +- if (!ctx->remaining) { +- return read_chunked_trailers(ctx, f, b, ++ if (ctx->state == BODY_CHUNK_TRAILER) { ++ /* Treat UNSET as DISABLE - trailers aren't merged by default */ ++ return read_chunked_trailers(ctx, f, b, + conf->merge_trailers == AP_MERGE_TRAILERS_ENABLE); +- } + } ++ + break; + } +- } ++ case BODY_NONE: ++ case BODY_LENGTH: ++ case BODY_CHUNK_DATA: { + +- /* Ensure that the caller can not go over our boundary point. */ +- if (ctx->state == BODY_LENGTH || ctx->state == BODY_CHUNK) { +- if (ctx->remaining < readbytes) { +- readbytes = ctx->remaining; +- } +- AP_DEBUG_ASSERT(readbytes > 0); +- } ++ /* Ensure that the caller can not go over our boundary point. */ ++ if (ctx->state != BODY_NONE && ctx->remaining < readbytes) { ++ readbytes = ctx->remaining; ++ } ++ if (readbytes > 0) { ++ apr_off_t totalread; + +- rv = ap_get_brigade(f->next, b, mode, block, readbytes); ++ rv = ap_get_brigade(f->next, b, mode, block, readbytes); + +- if (rv != APR_SUCCESS) { +- return rv; +- } ++ /* for timeout */ ++ if (block == APR_NONBLOCK_READ ++ && ((rv == APR_SUCCESS && APR_BRIGADE_EMPTY(b)) ++ || (APR_STATUS_IS_EAGAIN(rv)))) { ++ return APR_EAGAIN; ++ } + +- /* How many bytes did we just read? */ +- apr_brigade_length(b, 0, &totalread); ++ if (rv == APR_EOF && ctx->state != BODY_NONE ++ && ctx->remaining > 0) { ++ return APR_INCOMPLETE; ++ } + +- /* If this happens, we have a bucket of unknown length. Die because +- * it means our assumptions have changed. */ +- AP_DEBUG_ASSERT(totalread >= 0); ++ if (rv != APR_SUCCESS) { ++ return rv; ++ } + +- if (ctx->state != BODY_NONE) { +- ctx->remaining -= totalread; +- if (ctx->remaining > 0) { +- e = APR_BRIGADE_LAST(b); +- if (APR_BUCKET_IS_EOS(e)) +- return APR_EOF; +- } +- } ++ /* How many bytes did we just read? */ ++ apr_brigade_length(b, 0, &totalread); + +- /* If we have no more bytes remaining on a C-L request, +- * save the callter a roundtrip to discover EOS. +- */ +- if (ctx->state == BODY_LENGTH && ctx->remaining == 0) { +- e = apr_bucket_eos_create(f->c->bucket_alloc); +- APR_BRIGADE_INSERT_TAIL(b, e); +- } ++ /* If this happens, we have a bucket of unknown length. Die because ++ * it means our assumptions have changed. */ ++ AP_DEBUG_ASSERT(totalread >= 0); + +- /* We have a limit in effect. */ +- if (ctx->limit) { +- /* FIXME: Note that we might get slightly confused on chunked inputs +- * as we'd need to compensate for the chunk lengths which may not +- * really count. This seems to be up for interpretation. */ +- ctx->limit_used += totalread; +- if (ctx->limit < ctx->limit_used) { +- ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r, APLOGNO(01591) +- "Read content-length of %" APR_OFF_T_FMT +- " is larger than the configured limit" +- " of %" APR_OFF_T_FMT, ctx->limit_used, ctx->limit); +- apr_brigade_cleanup(bb); +- e = ap_bucket_error_create(HTTP_REQUEST_ENTITY_TOO_LARGE, NULL, +- f->r->pool, +- f->c->bucket_alloc); +- APR_BRIGADE_INSERT_TAIL(bb, e); +- e = apr_bucket_eos_create(f->c->bucket_alloc); +- APR_BRIGADE_INSERT_TAIL(bb, e); +- ctx->eos_sent = 1; +- return ap_pass_brigade(f->r->output_filters, bb); +- } +- } ++ if (ctx->state != BODY_NONE) { ++ ctx->remaining -= totalread; ++ if (ctx->remaining > 0) { ++ e = APR_BRIGADE_LAST(b); ++ if (APR_BUCKET_IS_EOS(e)) { ++ apr_bucket_delete(e); ++ return APR_INCOMPLETE; ++ } ++ } ++ else if (ctx->state == BODY_CHUNK_DATA) { ++ /* next chunk please */ ++ ctx->state = BODY_CHUNK_END; ++ ctx->chunk_used = 0; ++ } ++ } + +- return APR_SUCCESS; +-} ++ /* We have a limit in effect. */ ++ if (ctx->limit) { ++ /* FIXME: Note that we might get slightly confused on ++ * chunked inputs as we'd need to compensate for the chunk ++ * lengths which may not really count. This seems to be up ++ * for interpretation. ++ */ ++ ctx->limit_used += totalread; ++ if (ctx->limit < ctx->limit_used) { ++ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r, ++ APLOGNO(01591) "Read content length of " ++ "%" APR_OFF_T_FMT " is larger than the " ++ "configured limit of %" APR_OFF_T_FMT, ++ ctx->limit_used, ctx->limit); ++ return bail_out_on_error(ctx, f, ++ HTTP_REQUEST_ENTITY_TOO_LARGE); ++ } ++ } ++ } + +-/** +- * Parse a chunk extension, detect overflow. +- * There are two error cases: +- * 1) If the conversion would require too many bits, a -1 is returned. +- * 2) If the conversion used the correct number of bits, but an overflow +- * caused only the sign bit to flip, then that negative number is +- * returned. +- * In general, any negative number can be considered an overflow error. +- */ +-static long get_chunk_size(char *b) +-{ +- long chunksize = 0; +- size_t chunkbits = sizeof(long) * 8; ++ /* If we have no more bytes remaining on a C-L request, ++ * save the caller a round trip to discover EOS. ++ */ ++ if (ctx->state == BODY_LENGTH && ctx->remaining == 0) { ++ e = apr_bucket_eos_create(f->c->bucket_alloc); ++ APR_BRIGADE_INSERT_TAIL(b, e); ++ ctx->eos_sent = 1; ++ } + +- ap_xlate_proto_from_ascii(b, strlen(b)); ++ break; ++ } ++ case BODY_CHUNK_TRAILER: { + +- if (!apr_isxdigit(*b)) { +- /* +- * Detect invalid character at beginning. This also works for empty +- * chunk size lines. +- */ +- return INVALID_CHAR; +- } +- /* Skip leading zeros */ +- while (*b == '0') { +- ++b; +- } ++ rv = ap_get_brigade(f->next, b, mode, block, readbytes); + +- while (apr_isxdigit(*b) && (chunkbits > 0)) { +- int xvalue = 0; ++ /* for timeout */ ++ if (block == APR_NONBLOCK_READ ++ && ((rv == APR_SUCCESS && APR_BRIGADE_EMPTY(b)) ++ || (APR_STATUS_IS_EAGAIN(rv)))) { ++ return APR_EAGAIN; ++ } ++ ++ if (rv != APR_SUCCESS) { ++ return rv; ++ } + +- if (*b >= '0' && *b <= '9') { +- xvalue = *b - '0'; ++ break; + } +- else if (*b >= 'A' && *b <= 'F') { +- xvalue = *b - 'A' + 0xa; ++ default: { ++ /* Should not happen */ ++ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r, APLOGNO(02901) ++ "Unexpected body state (%i)", (int)ctx->state); ++ return APR_EGENERAL; + } +- else if (*b >= 'a' && *b <= 'f') { +- xvalue = *b - 'a' + 0xa; + } + +- chunksize = (chunksize << 4) | xvalue; +- chunkbits -= 4; +- ++b; +- } +- if (apr_isxdigit(*b)) { +- /* overflow */ +- return -1; +- } ++ } while (again); + +- return chunksize; ++ return APR_SUCCESS; + } + + typedef struct header_struct { +@@ -1385,6 +1377,39 @@ AP_CORE_DECLARE_NONSTD(apr_status_t) ap_http_header_filter(ap_filter_t *f, + return ap_pass_brigade(f->next, b); + } + ++/* ++ * Map specific APR codes returned by the filter stack to HTTP error ++ * codes, or the default status code provided. Use it as follows: ++ * ++ * return ap_map_http_request_error(rv, HTTP_BAD_REQUEST); ++ * ++ * If the filter has already handled the error, AP_FILTER_ERROR will ++ * be returned, which is cleanly passed through. ++ * ++ * These mappings imply that the filter stack is reading from the ++ * downstream client, the proxy will map these codes differently. ++ */ ++AP_DECLARE(int) ap_map_http_request_error(apr_status_t rv, int status) ++{ ++ switch (rv) { ++ case AP_FILTER_ERROR: { ++ return AP_FILTER_ERROR; ++ } ++ case APR_ENOSPC: { ++ return HTTP_REQUEST_ENTITY_TOO_LARGE; ++ } ++ case APR_ENOTIMPL: { ++ return HTTP_NOT_IMPLEMENTED; ++ } ++ case APR_ETIMEDOUT: { ++ return HTTP_REQUEST_TIME_OUT; ++ } ++ default: { ++ return status; ++ } ++ } ++} ++ + /* In HTTP/1.1, any method can have a body. However, most GET handlers + * wouldn't know what to do with a request body if they received one. + * This helper routine tests for and reads any message body in the request, +@@ -1402,7 +1427,8 @@ AP_CORE_DECLARE_NONSTD(apr_status_t) ap_http_header_filter(ap_filter_t *f, + AP_DECLARE(int) ap_discard_request_body(request_rec *r) + { + apr_bucket_brigade *bb; +- int rv, seen_eos; ++ int seen_eos; ++ apr_status_t rv; + + /* Sometimes we'll get in a state where the input handling has + * detected an error where we want to drop the connection, so if +@@ -1425,21 +1451,8 @@ AP_DECLARE(int) ap_discard_request_body(request_rec *r) + APR_BLOCK_READ, HUGE_STRING_LEN); + + if (rv != APR_SUCCESS) { +- /* FIXME: If we ever have a mapping from filters (apr_status_t) +- * to HTTP error codes, this would be a good place for them. +- * +- * If we received the special case AP_FILTER_ERROR, it means +- * that the filters have already handled this error. +- * Otherwise, we should assume we have a bad request. +- */ +- if (rv == AP_FILTER_ERROR) { +- apr_brigade_destroy(bb); +- return rv; +- } +- else { +- apr_brigade_destroy(bb); +- return HTTP_BAD_REQUEST; +- } ++ apr_brigade_destroy(bb); ++ return ap_map_http_request_error(rv, HTTP_BAD_REQUEST); + } + + for (bucket = APR_BRIGADE_FIRST(bb); +@@ -1608,6 +1621,13 @@ AP_DECLARE(long) ap_get_client_block(request_rec *r, char *buffer, + /* We lose the failure code here. This is why ap_get_client_block should + * not be used. + */ ++ if (rv == AP_FILTER_ERROR) { ++ /* AP_FILTER_ERROR means a filter has responded already, ++ * we are DONE. ++ */ ++ apr_brigade_destroy(bb); ++ return -1; ++ } + if (rv != APR_SUCCESS) { + /* if we actually fail here, we want to just return and + * stop trying to read data from the client. diff --git a/SOURCES/httpd-2.4.6-CVE-2015-3185.patch b/SOURCES/httpd-2.4.6-CVE-2015-3185.patch new file mode 100644 index 0000000..e8bb688 --- /dev/null +++ b/SOURCES/httpd-2.4.6-CVE-2015-3185.patch @@ -0,0 +1,175 @@ +Index: server/request.c +=================================================================== +--- a/server/request.c (revision 1684524) ++++ b/server/request.c (revision 1684525) +@@ -71,6 +71,7 @@ + APR_HOOK_LINK(create_request) + APR_HOOK_LINK(post_perdir_config) + APR_HOOK_LINK(dirwalk_stat) ++ APR_HOOK_LINK(force_authn) + ) + + AP_IMPLEMENT_HOOK_RUN_FIRST(int,translate_name, +@@ -97,6 +98,8 @@ + AP_IMPLEMENT_HOOK_RUN_FIRST(apr_status_t,dirwalk_stat, + (apr_finfo_t *finfo, request_rec *r, apr_int32_t wanted), + (finfo, r, wanted), AP_DECLINED) ++AP_IMPLEMENT_HOOK_RUN_FIRST(int,force_authn, ++ (request_rec *r), (r), DECLINED) + + static int auth_internal_per_conf = 0; + static int auth_internal_per_conf_hooks = 0; +@@ -118,6 +121,39 @@ + } + } + ++AP_DECLARE(int) ap_some_authn_required(request_rec *r) ++{ ++ int access_status; ++ ++ switch (ap_satisfies(r)) { ++ case SATISFY_ALL: ++ case SATISFY_NOSPEC: ++ if ((access_status = ap_run_access_checker(r)) != OK) { ++ break; ++ } ++ ++ access_status = ap_run_access_checker_ex(r); ++ if (access_status == DECLINED) { ++ return TRUE; ++ } ++ ++ break; ++ case SATISFY_ANY: ++ if ((access_status = ap_run_access_checker(r)) == OK) { ++ break; ++ } ++ ++ access_status = ap_run_access_checker_ex(r); ++ if (access_status == DECLINED) { ++ return TRUE; ++ } ++ ++ break; ++ } ++ ++ return FALSE; ++} ++ + /* This is the master logic for processing requests. Do NOT duplicate + * this logic elsewhere, or the security model will be broken by future + * API changes. Each phase must be individually optimized to pick up +@@ -232,15 +268,8 @@ + } + + access_status = ap_run_access_checker_ex(r); +- if (access_status == OK) { +- ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r, +- "request authorized without authentication by " +- "access_checker_ex hook: %s", r->uri); +- } +- else if (access_status != DECLINED) { +- return decl_die(access_status, "check access", r); +- } +- else { ++ if (access_status == DECLINED ++ || (access_status == OK && ap_run_force_authn(r) == OK)) { + if ((access_status = ap_run_check_user_id(r)) != OK) { + return decl_die(access_status, "check user", r); + } +@@ -258,6 +287,14 @@ + return decl_die(access_status, "check authorization", r); + } + } ++ else if (access_status == OK) { ++ ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r, ++ "request authorized without authentication by " ++ "access_checker_ex hook: %s", r->uri); ++ } ++ else { ++ return decl_die(access_status, "check access", r); ++ } + break; + case SATISFY_ANY: + if ((access_status = ap_run_access_checker(r)) == OK) { +@@ -269,15 +306,8 @@ + } + + access_status = ap_run_access_checker_ex(r); +- if (access_status == OK) { +- ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r, +- "request authorized without authentication by " +- "access_checker_ex hook: %s", r->uri); +- } +- else if (access_status != DECLINED) { +- return decl_die(access_status, "check access", r); +- } +- else { ++ if (access_status == DECLINED ++ || (access_status == OK && ap_run_force_authn(r) == OK)) { + if ((access_status = ap_run_check_user_id(r)) != OK) { + return decl_die(access_status, "check user", r); + } +@@ -295,6 +325,14 @@ + return decl_die(access_status, "check authorization", r); + } + } ++ else if (access_status == OK) { ++ ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r, ++ "request authorized without authentication by " ++ "access_checker_ex hook: %s", r->uri); ++ } ++ else { ++ return decl_die(access_status, "check access", r); ++ } + break; + } + } +Index: include/http_request.h +=================================================================== +--- a/include/http_request.h (revision 1684524) ++++ b/include/http_request.h (revision 1684525) +@@ -185,6 +185,8 @@ + * is required for the current request + * @param r The current request + * @return 1 if authentication is required, 0 otherwise ++ * @bug Behavior changed in 2.4.x refactoring, API no longer usable ++ * @deprecated @see ap_some_authn_required() + */ + AP_DECLARE(int) ap_some_auth_required(request_rec *r); + +@@ -539,6 +541,16 @@ + AP_DECLARE_HOOK(int,post_perdir_config,(request_rec *r)) + + /** ++ * This hook allows a module to force authn to be required when ++ * processing a request. ++ * This hook should be registered with ap_hook_force_authn(). ++ * @param r The current request ++ * @return OK (force authn), DECLINED (let later modules decide) ++ * @ingroup hooks ++ */ ++AP_DECLARE_HOOK(int,force_authn,(request_rec *r)) ++ ++/** + * This hook allows modules to handle/emulate the apr_stat() calls + * needed for directory walk. + * @param r The current request +@@ -584,6 +596,17 @@ + AP_DECLARE(apr_bucket *) ap_bucket_eor_create(apr_bucket_alloc_t *list, + request_rec *r); + ++/** ++ * Can be used within any handler to determine if any authentication ++ * is required for the current request. Note that if used with an ++ * access_checker hook, an access_checker_ex hook or an authz provider; the ++ * caller should take steps to avoid a loop since this function is ++ * implemented by calling these hooks. ++ * @param r The current request ++ * @return TRUE if authentication is required, FALSE otherwise ++ */ ++AP_DECLARE(int) ap_some_authn_required(request_rec *r); ++ + #ifdef __cplusplus + } + #endif diff --git a/SOURCES/httpd-2.4.6-CVE-2016-0736.patch b/SOURCES/httpd-2.4.6-CVE-2016-0736.patch new file mode 100644 index 0000000..cb80fa0 --- /dev/null +++ b/SOURCES/httpd-2.4.6-CVE-2016-0736.patch @@ -0,0 +1,325 @@ +diff --git a/modules/session/mod_session_crypto.c b/modules/session/mod_session_crypto.c +index 4d65bb8..9231a5e 100644 +--- a/modules/session/mod_session_crypto.c ++++ b/modules/session/mod_session_crypto.c +@@ -18,6 +18,7 @@ + #include "apu_version.h" + #include "apr_base64.h" /* for apr_base64_decode et al */ + #include "apr_lib.h" ++#include "apr_md5.h" + #include "apr_strings.h" + #include "http_log.h" + #include "http_core.h" +@@ -57,6 +58,146 @@ typedef struct { + int library_set; + } session_crypto_conf; + ++/* Wrappers around apr_siphash24() and apr_crypto_equals(), ++ * available in APU-1.6/APR-2.0 only. ++ */ ++#if APU_MAJOR_VERSION > 1 || (APU_MAJOR_VERSION == 1 && APU_MINOR_VERSION >= 6) ++ ++#include "apr_siphash.h" ++ ++#define AP_SIPHASH_DSIZE APR_SIPHASH_DSIZE ++#define AP_SIPHASH_KSIZE APR_SIPHASH_KSIZE ++#define ap_siphash24_auth apr_siphash24_auth ++ ++#define ap_crypto_equals apr_crypto_equals ++ ++#else ++ ++#define AP_SIPHASH_DSIZE 8 ++#define AP_SIPHASH_KSIZE 16 ++ ++#define ROTL64(x, n) (((x) << (n)) | ((x) >> (64 - (n)))) ++ ++#define U8TO64_LE(p) \ ++ (((apr_uint64_t)((p)[0]) ) | \ ++ ((apr_uint64_t)((p)[1]) << 8) | \ ++ ((apr_uint64_t)((p)[2]) << 16) | \ ++ ((apr_uint64_t)((p)[3]) << 24) | \ ++ ((apr_uint64_t)((p)[4]) << 32) | \ ++ ((apr_uint64_t)((p)[5]) << 40) | \ ++ ((apr_uint64_t)((p)[6]) << 48) | \ ++ ((apr_uint64_t)((p)[7]) << 56)) ++ ++#define U64TO8_LE(p, v) \ ++do { \ ++ (p)[0] = (unsigned char)((v) ); \ ++ (p)[1] = (unsigned char)((v) >> 8); \ ++ (p)[2] = (unsigned char)((v) >> 16); \ ++ (p)[3] = (unsigned char)((v) >> 24); \ ++ (p)[4] = (unsigned char)((v) >> 32); \ ++ (p)[5] = (unsigned char)((v) >> 40); \ ++ (p)[6] = (unsigned char)((v) >> 48); \ ++ (p)[7] = (unsigned char)((v) >> 56); \ ++} while (0) ++ ++#define SIPROUND() \ ++do { \ ++ v0 += v1; v1=ROTL64(v1,13); v1 ^= v0; v0=ROTL64(v0,32); \ ++ v2 += v3; v3=ROTL64(v3,16); v3 ^= v2; \ ++ v0 += v3; v3=ROTL64(v3,21); v3 ^= v0; \ ++ v2 += v1; v1=ROTL64(v1,17); v1 ^= v2; v2=ROTL64(v2,32); \ ++} while(0) ++ ++static apr_uint64_t ap_siphash24(const void *src, apr_size_t len, ++ const unsigned char key[AP_SIPHASH_KSIZE]) ++{ ++ const unsigned char *ptr, *end; ++ apr_uint64_t v0, v1, v2, v3, m; ++ apr_uint64_t k0, k1; ++ unsigned int rem; ++ ++ k0 = U8TO64_LE(key + 0); ++ k1 = U8TO64_LE(key + 8); ++ v3 = k1 ^ (apr_uint64_t)0x7465646279746573ULL; ++ v2 = k0 ^ (apr_uint64_t)0x6c7967656e657261ULL; ++ v1 = k1 ^ (apr_uint64_t)0x646f72616e646f6dULL; ++ v0 = k0 ^ (apr_uint64_t)0x736f6d6570736575ULL; ++ ++ rem = (unsigned int)(len & 0x7); ++ for (ptr = src, end = ptr + len - rem; ptr < end; ptr += 8) { ++ m = U8TO64_LE(ptr); ++ v3 ^= m; ++ SIPROUND(); ++ SIPROUND(); ++ v0 ^= m; ++ } ++ m = (apr_uint64_t)(len & 0xff) << 56; ++ switch (rem) { ++ case 7: m |= (apr_uint64_t)ptr[6] << 48; ++ case 6: m |= (apr_uint64_t)ptr[5] << 40; ++ case 5: m |= (apr_uint64_t)ptr[4] << 32; ++ case 4: m |= (apr_uint64_t)ptr[3] << 24; ++ case 3: m |= (apr_uint64_t)ptr[2] << 16; ++ case 2: m |= (apr_uint64_t)ptr[1] << 8; ++ case 1: m |= (apr_uint64_t)ptr[0]; ++ case 0: break; ++ } ++ v3 ^= m; ++ SIPROUND(); ++ SIPROUND(); ++ v0 ^= m; ++ ++ v2 ^= 0xff; ++ SIPROUND(); ++ SIPROUND(); ++ SIPROUND(); ++ SIPROUND(); ++ ++ return v0 ^ v1 ^ v2 ^ v3; ++} ++ ++static void ap_siphash24_auth(unsigned char out[AP_SIPHASH_DSIZE], ++ const void *src, apr_size_t len, ++ const unsigned char key[AP_SIPHASH_KSIZE]) ++{ ++ apr_uint64_t h; ++ h = ap_siphash24(src, len, key); ++ U64TO8_LE(out, h); ++} ++ ++static int ap_crypto_equals(const void *buf1, const void *buf2, ++ apr_size_t size) ++{ ++ const unsigned char *p1 = buf1; ++ const unsigned char *p2 = buf2; ++ unsigned char diff = 0; ++ apr_size_t i; ++ ++ for (i = 0; i < size; ++i) { ++ diff |= p1[i] ^ p2[i]; ++ } ++ ++ return 1 & ((diff - 1) >> 8); ++} ++ ++#endif ++ ++static void compute_auth(const void *src, apr_size_t len, ++ const char *passphrase, apr_size_t passlen, ++ unsigned char auth[AP_SIPHASH_DSIZE]) ++{ ++ unsigned char key[APR_MD5_DIGESTSIZE]; ++ ++ /* XXX: if we had a way to get the raw bytes from an apr_crypto_key_t ++ * we could use them directly (not available in APR-1.5.x). ++ * MD5 is 128bit too, so use it to get a suitable siphash key ++ * from the passphrase. ++ */ ++ apr_md5(key, passphrase, passlen); ++ ++ ap_siphash24_auth(auth, src, len, key); ++} ++ + /** + * Initialise the encryption as per the current config. + * +@@ -128,21 +269,14 @@ static apr_status_t encrypt_string(request_rec * r, const apr_crypto_t *f, + apr_crypto_block_t *block = NULL; + unsigned char *encrypt = NULL; + unsigned char *combined = NULL; +- apr_size_t encryptlen, tlen; ++ apr_size_t encryptlen, tlen, combinedlen; + char *base64; + apr_size_t blockSize = 0; + const unsigned char *iv = NULL; + apr_uuid_t salt; + apr_crypto_block_key_type_e *cipher; + const char *passphrase; +- +- /* by default, return an empty string */ +- *out = ""; +- +- /* don't attempt to encrypt an empty string, trying to do so causes a segfault */ +- if (!in || !*in) { +- return APR_SUCCESS; +- } ++ apr_size_t passlen; + + /* use a uuid as a salt value, and prepend it to our result */ + apr_uuid_get(&salt); +@@ -152,9 +286,9 @@ static apr_status_t encrypt_string(request_rec * r, const apr_crypto_t *f, + } + + /* encrypt using the first passphrase in the list */ +- passphrase = APR_ARRAY_IDX(dconf->passphrases, 0, char *); +- res = apr_crypto_passphrase(&key, &ivSize, passphrase, +- strlen(passphrase), ++ passphrase = APR_ARRAY_IDX(dconf->passphrases, 0, const char *); ++ passlen = strlen(passphrase); ++ res = apr_crypto_passphrase(&key, &ivSize, passphrase, passlen, + (unsigned char *) (&salt), sizeof(apr_uuid_t), + *cipher, APR_MODE_CBC, 1, 4096, f, r->pool); + if (APR_STATUS_IS_ENOKEY(res)) { +@@ -183,8 +317,9 @@ static apr_status_t encrypt_string(request_rec * r, const apr_crypto_t *f, + } + + /* encrypt the given string */ +- res = apr_crypto_block_encrypt(&encrypt, &encryptlen, (unsigned char *)in, +- strlen(in), block); ++ res = apr_crypto_block_encrypt(&encrypt, &encryptlen, ++ (const unsigned char *)in, strlen(in), ++ block); + if (APR_SUCCESS != res) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, res, r, APLOGNO(01830) + "apr_crypto_block_encrypt failed"); +@@ -198,18 +333,20 @@ static apr_status_t encrypt_string(request_rec * r, const apr_crypto_t *f, + } + encryptlen += tlen; + +- /* prepend the salt and the iv to the result */ +- combined = apr_palloc(r->pool, ivSize + encryptlen + sizeof(apr_uuid_t)); +- memcpy(combined, &salt, sizeof(apr_uuid_t)); +- memcpy(combined + sizeof(apr_uuid_t), iv, ivSize); +- memcpy(combined + sizeof(apr_uuid_t) + ivSize, encrypt, encryptlen); +- +- /* base64 encode the result */ +- base64 = apr_palloc(r->pool, apr_base64_encode_len(ivSize + encryptlen + +- sizeof(apr_uuid_t) + 1) +- * sizeof(char)); +- apr_base64_encode(base64, (const char *) combined, +- ivSize + encryptlen + sizeof(apr_uuid_t)); ++ /* prepend the salt and the iv to the result (keep room for the MAC) */ ++ combinedlen = AP_SIPHASH_DSIZE + sizeof(apr_uuid_t) + ivSize + encryptlen; ++ combined = apr_palloc(r->pool, combinedlen); ++ memcpy(combined + AP_SIPHASH_DSIZE, &salt, sizeof(apr_uuid_t)); ++ memcpy(combined + AP_SIPHASH_DSIZE + sizeof(apr_uuid_t), iv, ivSize); ++ memcpy(combined + AP_SIPHASH_DSIZE + sizeof(apr_uuid_t) + ivSize, ++ encrypt, encryptlen); ++ /* authenticate the whole salt+IV+ciphertext with a leading MAC */ ++ compute_auth(combined + AP_SIPHASH_DSIZE, combinedlen - AP_SIPHASH_DSIZE, ++ passphrase, passlen, combined); ++ ++ /* base64 encode the result (APR handles the trailing '\0') */ ++ base64 = apr_palloc(r->pool, apr_base64_encode_len(combinedlen)); ++ apr_base64_encode(base64, (const char *) combined, combinedlen); + *out = base64; + + return res; +@@ -234,6 +371,7 @@ static apr_status_t decrypt_string(request_rec * r, const apr_crypto_t *f, + char *decoded; + apr_size_t blockSize = 0; + apr_crypto_block_key_type_e *cipher; ++ unsigned char auth[AP_SIPHASH_DSIZE]; + int i = 0; + + /* strip base64 from the string */ +@@ -241,6 +379,13 @@ static apr_status_t decrypt_string(request_rec * r, const apr_crypto_t *f, + decodedlen = apr_base64_decode(decoded, in); + decoded[decodedlen] = '\0'; + ++ /* sanity check - decoded too short? */ ++ if (decodedlen < (AP_SIPHASH_DSIZE + sizeof(apr_uuid_t))) { ++ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, r, APLOGNO() ++ "too short to decrypt, aborting"); ++ return APR_ECRYPT; ++ } ++ + res = crypt_init(r, f, &cipher, dconf); + if (res != APR_SUCCESS) { + return res; +@@ -249,14 +394,25 @@ static apr_status_t decrypt_string(request_rec * r, const apr_crypto_t *f, + /* try each passphrase in turn */ + for (; i < dconf->passphrases->nelts; i++) { + const char *passphrase = APR_ARRAY_IDX(dconf->passphrases, i, char *); +- apr_size_t len = decodedlen; +- char *slider = decoded; ++ apr_size_t passlen = strlen(passphrase); ++ apr_size_t len = decodedlen - AP_SIPHASH_DSIZE; ++ unsigned char *slider = (unsigned char *)decoded + AP_SIPHASH_DSIZE; ++ ++ /* Verify authentication of the whole salt+IV+ciphertext by computing ++ * the MAC and comparing it (timing safe) with the one in the payload. ++ */ ++ compute_auth(slider, len, passphrase, passlen, auth); ++ if (!ap_crypto_equals(auth, decoded, AP_SIPHASH_DSIZE)) { ++ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, res, r, APLOGNO() ++ "auth does not match, skipping"); ++ continue; ++ } + + /* encrypt using the first passphrase in the list */ +- res = apr_crypto_passphrase(&key, &ivSize, passphrase, +- strlen(passphrase), +- (unsigned char *)decoded, sizeof(apr_uuid_t), +- *cipher, APR_MODE_CBC, 1, 4096, f, r->pool); ++ res = apr_crypto_passphrase(&key, &ivSize, passphrase, passlen, ++ slider, sizeof(apr_uuid_t), ++ *cipher, APR_MODE_CBC, 1, 4096, ++ f, r->pool); + if (APR_STATUS_IS_ENOKEY(res)) { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, res, r, APLOGNO(01832) + "the passphrase '%s' was empty", passphrase); +@@ -279,7 +435,7 @@ static apr_status_t decrypt_string(request_rec * r, const apr_crypto_t *f, + } + + /* sanity check - decoded too short? */ +- if (decodedlen < (sizeof(apr_uuid_t) + ivSize)) { ++ if (len < (sizeof(apr_uuid_t) + ivSize)) { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, r, APLOGNO(01836) + "too short to decrypt, skipping"); + res = APR_ECRYPT; +@@ -290,8 +446,8 @@ static apr_status_t decrypt_string(request_rec * r, const apr_crypto_t *f, + slider += sizeof(apr_uuid_t); + len -= sizeof(apr_uuid_t); + +- res = apr_crypto_block_decrypt_init(&block, &blockSize, (unsigned char *)slider, key, +- r->pool); ++ res = apr_crypto_block_decrypt_init(&block, &blockSize, slider, key, ++ r->pool); + if (APR_SUCCESS != res) { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, res, r, APLOGNO(01837) + "apr_crypto_block_decrypt_init failed"); +@@ -304,7 +460,7 @@ static apr_status_t decrypt_string(request_rec * r, const apr_crypto_t *f, + + /* decrypt the given string */ + res = apr_crypto_block_decrypt(&decrypted, &decryptedlen, +- (unsigned char *)slider, len, block); ++ slider, len, block); + if (res) { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, res, r, APLOGNO(01838) + "apr_crypto_block_decrypt failed"); + diff --git a/SOURCES/httpd-2.4.6-CVE-2016-2161.patch b/SOURCES/httpd-2.4.6-CVE-2016-2161.patch new file mode 100644 index 0000000..d45c2d9 --- /dev/null +++ b/SOURCES/httpd-2.4.6-CVE-2016-2161.patch @@ -0,0 +1,121 @@ +diff --git a/modules/aaa/mod_auth_digest.c b/modules/aaa/mod_auth_digest.c +index 44b5fc8..6a50ba7 100644 +--- a/modules/aaa/mod_auth_digest.c ++++ b/modules/aaa/mod_auth_digest.c +@@ -261,6 +261,26 @@ static void log_error_and_cleanup(char *msg, apr_status_t sts, server_rec *s) + cleanup_tables(NULL); + } + ++/* RMM helper functions that behave like single-step malloc/free. */ ++ ++static void *rmm_malloc(apr_rmm_t *rmm, apr_size_t size) ++{ ++ apr_rmm_off_t offset = apr_rmm_malloc(rmm, size); ++ ++ if (!offset) { ++ return NULL; ++ } ++ ++ return apr_rmm_addr_get(rmm, offset); ++} ++ ++static apr_status_t rmm_free(apr_rmm_t *rmm, void *alloc) ++{ ++ apr_rmm_off_t offset = apr_rmm_offset_get(rmm, alloc); ++ ++ return apr_rmm_free(rmm, offset); ++} ++ + #if APR_HAS_SHARED_MEMORY + + static int initialize_tables(server_rec *s, apr_pool_t *ctx) +@@ -299,8 +319,8 @@ static int initialize_tables(server_rec *s, apr_pool_t *ctx) + return !OK; + } + +- client_list = apr_rmm_addr_get(client_rmm, apr_rmm_malloc(client_rmm, sizeof(*client_list) + +- sizeof(client_entry*)*num_buckets)); ++ client_list = rmm_malloc(client_rmm, sizeof(*client_list) + ++ sizeof(client_entry *) * num_buckets); + if (!client_list) { + log_error_and_cleanup("failed to allocate shared memory", -1, s); + return !OK; +@@ -322,7 +342,7 @@ static int initialize_tables(server_rec *s, apr_pool_t *ctx) + + /* setup opaque */ + +- opaque_cntr = apr_rmm_addr_get(client_rmm, apr_rmm_malloc(client_rmm, sizeof(*opaque_cntr))); ++ opaque_cntr = rmm_malloc(client_rmm, sizeof(*opaque_cntr)); + if (opaque_cntr == NULL) { + log_error_and_cleanup("failed to allocate shared memory", -1, s); + return !OK; +@@ -339,7 +359,7 @@ static int initialize_tables(server_rec *s, apr_pool_t *ctx) + + /* setup one-time-nonce counter */ + +- otn_counter = apr_rmm_addr_get(client_rmm, apr_rmm_malloc(client_rmm, sizeof(*otn_counter))); ++ otn_counter = rmm_malloc(client_rmm, sizeof(*otn_counter)); + if (otn_counter == NULL) { + log_error_and_cleanup("failed to allocate shared memory", -1, s); + return !OK; +@@ -779,7 +799,7 @@ static client_entry *get_client(unsigned long key, const request_rec *r) + * last entry in each bucket and updates the counters. Returns the + * number of removed entries. + */ +-static long gc(void) ++static long gc(server_rec *s) + { + client_entry *entry, *prev; + unsigned long num_removed = 0, idx; +@@ -789,6 +809,12 @@ static long gc(void) + for (idx = 0; idx < client_list->tbl_len; idx++) { + entry = client_list->table[idx]; + prev = NULL; ++ ++ if (!entry) { ++ /* This bucket is empty. */ ++ continue; ++ } ++ + while (entry->next) { /* find last entry */ + prev = entry; + entry = entry->next; +@@ -800,8 +826,16 @@ static long gc(void) + client_list->table[idx] = NULL; + } + if (entry) { /* remove entry */ +- apr_rmm_free(client_rmm, apr_rmm_offset_get(client_rmm, entry)); ++ apr_status_t err; ++ ++ err = rmm_free(client_rmm, entry); + num_removed++; ++ ++ if (err) { ++ /* Nothing we can really do but log... */ ++ ap_log_error(APLOG_MARK, APLOG_ERR, err, s, APLOGNO() ++ "Failed to free auth_digest client allocation"); ++ } + } + } + +@@ -835,16 +869,16 @@ static client_entry *add_client(unsigned long key, client_entry *info, + + /* try to allocate a new entry */ + +- entry = apr_rmm_addr_get(client_rmm, apr_rmm_malloc(client_rmm, sizeof(client_entry))); ++ entry = rmm_malloc(client_rmm, sizeof(client_entry)); + if (!entry) { +- long num_removed = gc(); ++ long num_removed = gc(s); + ap_log_error(APLOG_MARK, APLOG_INFO, 0, s, APLOGNO(01766) + "gc'd %ld client entries. Total new clients: " + "%ld; Total removed clients: %ld; Total renewed clients: " + "%ld", num_removed, + client_list->num_created - client_list->num_renewed, + client_list->num_removed, client_list->num_renewed); +- entry = apr_rmm_addr_get(client_rmm, apr_rmm_malloc(client_rmm, sizeof(client_entry))); ++ entry = rmm_malloc(client_rmm, sizeof(client_entry)); + if (!entry) { + ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(01767) + "unable to allocate new auth_digest client"); + diff --git a/SOURCES/httpd-2.4.6-CVE-2016-5387.patch b/SOURCES/httpd-2.4.6-CVE-2016-5387.patch new file mode 100644 index 0000000..9f22044 --- /dev/null +++ b/SOURCES/httpd-2.4.6-CVE-2016-5387.patch @@ -0,0 +1,16 @@ + +https://bugzilla.redhat.com/show_bug.cgi?id=CVE-2016-5387 + +--- httpd-2.4.6/server/util_script.c.cve5387 ++++ httpd-2.4.6/server/util_script.c +@@ -190,6 +190,10 @@ + continue; + } + #endif ++ else if (!strcasecmp(hdrs[i].key, "Proxy")) { ++ /* Don't pass through HTTP_PROXY */ ++ continue; ++ } + else + add_unless_null(e, http2env(r, hdrs[i].key), hdrs[i].val); + } diff --git a/SOURCES/httpd-2.4.6-CVE-2016-8743.patch b/SOURCES/httpd-2.4.6-CVE-2016-8743.patch new file mode 100644 index 0000000..5a6fd58 --- /dev/null +++ b/SOURCES/httpd-2.4.6-CVE-2016-8743.patch @@ -0,0 +1,2124 @@ + +https://bugzilla.redhat.com/show_bug.cgi?id=CVE-2016-8743 + +diff -uap httpd-2.4.6/docs/manual/mod/core.html.en.cve8743 httpd-2.4.6/docs/manual/mod/core.html.en +--- httpd-2.4.6/docs/manual/mod/core.html.en.cve8743 ++++ httpd-2.4.6/docs/manual/mod/core.html.en +@@ -67,6 +67,7 @@ +
  • ForceType
  • +
  • GprofDir
  • +
  • HostnameLookups
  • ++
  • HttpProtocolOptions
  • +
  • <If>
  • +
  • <IfDefine>
  • +
  • <IfModule>
  • +@@ -93,6 +94,7 @@ +
  • NameVirtualHost
  • +
  • Options
  • +
  • Protocol
  • ++
  • RegisterHttpMethod
  • +
  • RLimitCPU
  • +
  • RLimitMEM
  • +
  • RLimitNPROC
  • +@@ -1918,6 +1920,74 @@ + +
    +
    top
    ++

    HttpProtocolOptions Directive

    ++ ++ ++ ++ ++ ++ ++ ++ ++
    Description:Modify restrictions on HTTP Request Messages
    Syntax:HttpProtocolOptions [Strict|Unsafe] [RegisteredMethods|LenientMethods] ++ [Allow0.9|Require1.0]
    Default:HttpProtocolOptions Strict LenientMethods Allow0.9
    Context:server config, virtual host
    Status:Core
    Module:core
    Compatibility:2.2.32 or 2.4.24 and later
    ++

    This directive changes the rules applied to the HTTP Request Line ++ (RFC 7230 §3.1.1) and the HTTP Request Header Fields ++ (RFC 7230 §3.2), which are now applied by default or using ++ the Strict option. Due to legacy modules, applications or ++ custom user-agents which must be deperecated the Unsafe ++ option has been added to revert to the legacy behaviors. These rules ++ are applied prior to request processing, so must be configured at the ++ global or default (first) matching virtual host section, by IP/port ++ interface (and not by name) to be honored.

    ++ ++

    Prior to the introduction of this directive, the Apache HTTP Server ++ request message parsers were tolerant of a number of forms of input ++ which did not conform to the protocol. ++ RFC 7230 §9.4 Request Splitting and ++ §9.5 Response Smuggling call out only two of the potential ++ risks of accepting non-conformant request messages, while ++ RFC 7230 §3.5 "Message Parsing Robustness" identify the ++ risks of accepting obscure whitespace and request message formatting. ++ As of the introduction of this directive, all grammer rules of the ++ specification are enforced in the default Strict operating ++ mode, and the strict whitespace suggested by section 3.5 is enforced ++ and cannot be relaxed.

    ++ ++

    Users are strongly cautioned against toggling the Unsafe ++ mode of operation, particularly on outward-facing, publicly accessible ++ server deployments. If an interface is required for faulty monitoring ++ or other custom service consumers running on an intranet, users should ++ toggle the Unsafe option only on a specific virtual host configured ++ to service their internal private network.

    ++ ++

    Reviewing the messages logged to the ErrorLog, ++ configured with LogLevel debug level, ++ can help identify such faulty requests along with their origin. ++ Users should pay particular attention to the 400 responses in the access ++ log for invalid requests which were unexpectedly rejected.

    ++ ++

    RFC 7231 §4.1 "Request Methods" "Overview" requires that ++ origin servers shall respond with an error when an unsupported method ++ is encountered in the request line. This already happens when the ++ LenientMethods option is used, but administrators may wish ++ to toggle the RegisteredMethods option and register any ++ non-standard methods using the RegisterHttpMethod ++ directive, particularly if the Unsafe option has been toggled. ++ The RegisteredMethods option should not ++ be toggled for forward proxy hosts, as the methods supported by the ++ origin servers are unknown to the proxy server.

    ++ ++

    RFC 2616 §19.6 "Compatibility With Previous Versions" had ++ encouraged HTTP servers to support legacy HTTP/0.9 requests. RFC 7230 ++ superceeds this with "The expectation to support HTTP/0.9 requests has ++ been removed" and offers additional comments in ++ RFC 7230 Appendix A. The Require1.0 option allows ++ the user to remove support of the default Allow0.9 option's ++ behavior.

    ++ ++
    ++
    top
    +

    <If> Directive

    + +
    Description:Contains directives that apply only if a condition is +@@ -3541,6 +3611,23 @@ + + +
    top
    ++

    RegisterHttpMethod Directive

    ++ ++ ++ ++ ++ ++ ++
    Description:Register non-standard HTTP methods
    Syntax:RegisterHttpMethod method [method [...]]
    Context:server config
    Status:Core
    Module:core
    ++

    HTTP Methods that are not conforming to the relvant RFCs are normally ++rejected by request processing in Apache HTTPD. To avoid this, modules ++can register non-standard HTTP methods they support. ++The RegisterHttpMethod allows to register such ++methods manually. This can be useful for if such methods are forwared ++for external processing, e.g. to a CGI script.

    ++ ++
    ++
    top
    +

    RLimitCPU Directive

    + + +
  • MaxRangeOverlaps
  • +
  • MaxRangeReversals
  • +
  • MaxRanges
  • ++
  • MergeSlashes
  • +
  • Mutex
  • +
  • NameVirtualHost
  • +
  • Options
  • +@@ -3170,6 +3171,30 @@ resource + + +
    top
    ++
    Description:Limits the CPU consumption of processes launched +diff -uap httpd-2.4.6/include/http_core.h.cve8743 httpd-2.4.6/include/http_core.h +--- httpd-2.4.6/include/http_core.h.cve8743 ++++ httpd-2.4.6/include/http_core.h +@@ -668,6 +668,21 @@ + #define AP_MERGE_TRAILERS_DISABLE 2 + int merge_trailers; + ++#define AP_HTTP09_UNSET 0 ++#define AP_HTTP09_ENABLE 1 ++#define AP_HTTP09_DISABLE 2 ++ char http09_enable; ++ ++#define AP_HTTP_CONFORMANCE_UNSET 0 ++#define AP_HTTP_CONFORMANCE_UNSAFE 1 ++#define AP_HTTP_CONFORMANCE_STRICT 2 ++ char http_conformance; ++ ++#define AP_HTTP_METHODS_UNSET 0 ++#define AP_HTTP_METHODS_LENIENT 1 ++#define AP_HTTP_METHODS_REGISTERED 2 ++ char http_methods; ++ + } core_server_config; + + /* for AddOutputFiltersByType in core.c */ +diff -uap httpd-2.4.6/include/httpd.h.cve8743 httpd-2.4.6/include/httpd.h +--- httpd-2.4.6/include/httpd.h.cve8743 ++++ httpd-2.4.6/include/httpd.h +@@ -1584,6 +1584,28 @@ + */ + AP_DECLARE(int) ap_unescape_url(char *url); + ++/* Scan a string for field content chars, as defined by RFC7230 section 3.2 ++ * including VCHAR/obs-text, as well as HT and SP ++ * @param ptr The string to scan ++ * @return A pointer to the first (non-HT) ASCII ctrl character. ++ * @note lws and trailing whitespace are scanned, the caller is responsible ++ * for trimming leading and trailing whitespace ++ */ ++AP_DECLARE(const char *) ap_scan_http_field_content(const char *ptr); ++ ++/* Scan a string for token characters, as defined by RFC7230 section 3.2.6 ++ * @param ptr The string to scan ++ * @return A pointer to the first non-token character. ++ */ ++AP_DECLARE(const char *) ap_scan_http_token(const char *ptr); ++ ++/* Scan a string for visible ASCII (0x21-0x7E) or obstext (0x80+) ++ * and return a pointer to the first SP/CTL/NUL character encountered. ++ * @param ptr The string to scan ++ * @return A pointer to the first SP/CTL character. ++ */ ++AP_DECLARE(const char *) ap_scan_vchar_obstext(const char *ptr); ++ + /** + * Unescape a URL, but leaving %2f (slashes) escaped + * @param url The url to unescape +diff -uap httpd-2.4.6/include/http_protocol.h.cve8743 httpd-2.4.6/include/http_protocol.h +--- httpd-2.4.6/include/http_protocol.h.cve8743 ++++ httpd-2.4.6/include/http_protocol.h +@@ -582,17 +582,22 @@ + */ + AP_CORE_DECLARE(void) ap_parse_uri(request_rec *r, const char *uri); + ++#define AP_GETLINE_FOLD 1 /* Whether to merge continuation lines */ ++#define AP_GETLINE_CRLF 2 /*Whether line ends must be in the form CR LF */ ++ + /** + * Get the next line of input for the request + * @param s The buffer into which to read the line + * @param n The size of the buffer + * @param r The request +- * @param fold Whether to merge continuation lines ++ * @param flags Bit flag of multiple parsing options ++ * AP_GETLINE_FOLD Whether to merge continuation lines ++ * AP_GETLINE_CRLF Whether line ends must be in the form CR LF + * @return The length of the line, if successful + * n, if the line is too big to fit in the buffer + * -1 for miscellaneous errors + */ +-AP_DECLARE(int) ap_getline(char *s, int n, request_rec *r, int fold); ++AP_DECLARE(int) ap_getline(char *s, int n, request_rec *r, int flags); + + /** + * Get the next line of input for the request +@@ -610,7 +615,9 @@ + * @param n The size of the buffer + * @param read The length of the line. + * @param r The request +- * @param fold Whether to merge continuation lines ++ * @param flags Bit flag of multiple parsing options ++ * AP_GETLINE_FOLD Whether to merge continuation lines ++ * AP_GETLINE_CRLF Whether line ends must be in the form CR LF + * @param bb Working brigade to use when reading buckets + * @return APR_SUCCESS, if successful + * APR_ENOSPC, if the line is too big to fit in the buffer +@@ -619,7 +626,7 @@ + #if APR_CHARSET_EBCDIC + AP_DECLARE(apr_status_t) ap_rgetline(char **s, apr_size_t n, + apr_size_t *read, +- request_rec *r, int fold, ++ request_rec *r, int flags, + apr_bucket_brigade *bb); + #else /* ASCII box */ + #define ap_rgetline(s, n, read, r, fold, bb) \ +@@ -629,7 +636,7 @@ + /** @see ap_rgetline */ + AP_DECLARE(apr_status_t) ap_rgetline_core(char **s, apr_size_t n, + apr_size_t *read, +- request_rec *r, int fold, ++ request_rec *r, int flags, + apr_bucket_brigade *bb); + + /** +diff -uap httpd-2.4.6/modules/http/http_filters.c.cve8743 httpd-2.4.6/modules/http/http_filters.c +--- httpd-2.4.6/modules/http/http_filters.c.cve8743 ++++ httpd-2.4.6/modules/http/http_filters.c +@@ -126,14 +126,15 @@ + + /** + * Parse a chunk line with optional extension, detect overflow. +- * There are two error cases: +- * 1) If the conversion would require too many bits, APR_EGENERAL is returned. +- * 2) If the conversion used the correct number of bits, but an overflow ++ * There are several error cases: ++ * 1) If the chunk link is misformatted, APR_EINVAL is returned. ++ * 2) If the conversion would require too many bits, APR_EGENERAL is returned. ++ * 3) If the conversion used the correct number of bits, but an overflow + * caused only the sign bit to flip, then APR_ENOSPC is returned. +- * In general, any negative number can be considered an overflow error. ++ * A negative chunk length always indicates an overflow error. + */ + static apr_status_t parse_chunk_size(http_ctx_t *ctx, const char *buffer, +- apr_size_t len, int linelimit) ++ apr_size_t len, int linelimit, int strict) + { + apr_size_t i = 0; + +@@ -146,6 +147,12 @@ + if (ctx->state == BODY_CHUNK_END + || ctx->state == BODY_CHUNK_END_LF) { + if (c == LF) { ++ if (strict && (ctx->state != BODY_CHUNK_END_LF)) { ++ /* ++ * CR missing before LF. ++ */ ++ return APR_EINVAL; ++ } + ctx->state = BODY_CHUNK; + } + else if (c == CR && ctx->state == BODY_CHUNK_END) { +@@ -153,7 +160,7 @@ + } + else { + /* +- * LF expected. ++ * CRLF expected. + */ + return APR_EINVAL; + } +@@ -180,6 +187,12 @@ + } + + if (c == LF) { ++ if (strict && (ctx->state != BODY_CHUNK_LF)) { ++ /* ++ * CR missing before LF. ++ */ ++ return APR_EINVAL; ++ } + if (ctx->remaining) { + ctx->state = BODY_CHUNK_DATA; + } +@@ -201,14 +214,17 @@ + } + else if (ctx->state == BODY_CHUNK_EXT) { + /* +- * Control chars (but tabs) are invalid. ++ * Control chars (excluding tabs) are invalid. ++ * TODO: more precisely limit input + */ + if (c != '\t' && apr_iscntrl(c)) { + return APR_EINVAL; + } + } + else if (c == ' ' || c == '\t') { +- /* Be lenient up to 10 BWS (term from rfc7230 - 3.2.3). ++ /* Be lenient up to 10 implied *LWS, a legacy of RFC 2616, ++ * and noted as errata to RFC7230; ++ * https://www.rfc-editor.org/errata_search.php?rfc=7230&eid=4667 + */ + ctx->state = BODY_CHUNK_CR; + if (++ctx->chunk_bws > 10) { +@@ -324,7 +340,10 @@ + ap_input_mode_t mode, apr_read_type_e block, + apr_off_t readbytes) + { +- core_server_config *conf; ++ core_server_config *conf = ++ (core_server_config *) ap_get_module_config(f->r->server->module_config, ++ &core_module); ++ int strict = (conf->http_conformance != AP_HTTP_CONFORMANCE_UNSAFE); + apr_bucket *e; + http_ctx_t *ctx = f->ctx; + apr_status_t rv; +@@ -332,9 +351,6 @@ + apr_bucket_brigade *bb; + int again; + +- conf = (core_server_config *) +- ap_get_module_config(f->r->server->module_config, &core_module); +- + /* just get out of the way of things we don't want. */ + if (mode != AP_MODE_READBYTES && mode != AP_MODE_GETLINE) { + return ap_get_brigade(f->next, b, mode, block, readbytes); +@@ -526,7 +542,7 @@ + if (rv == APR_SUCCESS) { + parsing = 1; + rv = parse_chunk_size(ctx, buffer, len, +- f->r->server->limit_req_fieldsize); ++ f->r->server->limit_req_fieldsize, strict); + } + if (rv != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_INFO, rv, f->r, APLOGNO(01590) +@@ -668,14 +684,121 @@ + return APR_SUCCESS; + } + ++struct check_header_ctx { ++ request_rec *r; ++ int strict; ++}; ++ ++/* check a single header, to be used with apr_table_do() */ ++static int check_header(struct check_header_ctx *ctx, ++ const char *name, const char **val) ++{ ++ const char *pos, *end; ++ char *dst = NULL; ++ ++ if (name[0] == '\0') { ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, ctx->r, APLOGNO(02428) ++ "Empty response header name, aborting request"); ++ return 0; ++ } ++ ++ if (ctx->strict) { ++ end = ap_scan_http_token(name); ++ } ++ else { ++ end = ap_scan_vchar_obstext(name); ++ } ++ if (*end) { ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, ctx->r, APLOGNO(02429) ++ "Response header name '%s' contains invalid " ++ "characters, aborting request", ++ name); ++ return 0; ++ } ++ ++ for (pos = *val; *pos; pos = end) { ++ end = ap_scan_http_field_content(pos); ++ if (*end) { ++ if (end[0] != CR || end[1] != LF || (end[2] != ' ' && ++ end[2] != '\t')) { ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, ctx->r, APLOGNO(02430) ++ "Response header '%s' value of '%s' contains " ++ "invalid characters, aborting request", ++ name, pos); ++ return 0; ++ } ++ if (!dst) { ++ *val = dst = apr_palloc(ctx->r->pool, strlen(*val) + 1); ++ } ++ } ++ if (dst) { ++ memcpy(dst, pos, end - pos); ++ dst += end - pos; ++ if (*end) { ++ /* skip folding and replace with a single space */ ++ end += 3 + strspn(end + 3, "\t "); ++ *dst++ = ' '; ++ } ++ } ++ } ++ if (dst) { ++ *dst = '\0'; ++ } ++ return 1; ++} ++ ++static int check_headers_table(apr_table_t *t, struct check_header_ctx *ctx) ++{ ++ const apr_array_header_t *headers = apr_table_elts(t); ++ apr_table_entry_t *header; ++ int i; ++ ++ for (i = 0; i < headers->nelts; ++i) { ++ header = &APR_ARRAY_IDX(headers, i, apr_table_entry_t); ++ if (!header->key) { ++ continue; ++ } ++ if (!check_header(ctx, header->key, (const char **)&header->val)) { ++ return 0; ++ } ++ } ++ return 1; ++} ++ ++/** ++ * Check headers for HTTP conformance ++ * @return 1 if ok, 0 if bad ++ */ ++static APR_INLINE int check_headers(request_rec *r) ++{ ++ struct check_header_ctx ctx; ++ core_server_config *conf = ++ ap_get_core_module_config(r->server->module_config); ++ ++ ctx.r = r; ++ ctx.strict = (conf->http_conformance != AP_HTTP_CONFORMANCE_UNSAFE); ++ return check_headers_table(r->headers_out, &ctx) && ++ check_headers_table(r->err_headers_out, &ctx); ++} ++ ++static int check_headers_recursion(request_rec *r) ++{ ++ void *check = NULL; ++ apr_pool_userdata_get(&check, "check_headers_recursion", r->pool); ++ if (check) { ++ return 1; ++ } ++ apr_pool_userdata_setn("true", "check_headers_recursion", NULL, r->pool); ++ return 0; ++} ++ + typedef struct header_struct { + apr_pool_t *pool; + apr_bucket_brigade *bb; + } header_struct; + + /* Send a single HTTP header field to the client. Note that this function +- * is used in calls to table_do(), so their interfaces are co-dependent. +- * In other words, don't change this one without checking table_do in alloc.c. ++ * is used in calls to apr_table_do(), so don't change its interface. + * It returns true unless there was a write error of some kind. + */ + static int form_header_field(header_struct *h, +@@ -1160,6 +1283,7 @@ + + typedef struct header_filter_ctx { + int headers_sent; ++ int headers_error; + } header_filter_ctx; + + AP_CORE_DECLARE_NONSTD(apr_status_t) ap_http_header_filter(ap_filter_t *f, +@@ -1175,19 +1299,23 @@ + header_filter_ctx *ctx = f->ctx; + const char *ctype; + ap_bucket_error *eb = NULL; ++ apr_bucket *eos = NULL; + + AP_DEBUG_ASSERT(!r->main); + +- if (r->header_only) { +- if (!ctx) { +- ctx = f->ctx = apr_pcalloc(r->pool, sizeof(header_filter_ctx)); +- } +- else if (ctx->headers_sent) { ++ if (!ctx) { ++ ctx = f->ctx = apr_pcalloc(r->pool, sizeof(header_filter_ctx)); ++ } ++ if (ctx->headers_sent) { ++ /* Eat body if response must not have one. */ ++ if (r->header_only || r->status == HTTP_NO_CONTENT) { + apr_brigade_cleanup(b); +- return OK; ++ return APR_SUCCESS; + } + } +- ++ else if (!ctx->headers_error && !check_headers(r)) { ++ ctx->headers_error = 1; ++ } + for (e = APR_BRIGADE_FIRST(b); + e != APR_BRIGADE_SENTINEL(b); + e = APR_BUCKET_NEXT(e)) +@@ -1204,10 +1332,44 @@ + ap_remove_output_filter(f); + return ap_pass_brigade(f->next, b); + } ++ if (ctx->headers_error && APR_BUCKET_IS_EOS(e)) { ++ eos = e; ++ } + } +- if (eb) { +- int status; ++ if (ctx->headers_error) { ++ if (!eos) { ++ /* Eat body until EOS */ ++ apr_brigade_cleanup(b); ++ return APR_SUCCESS; ++ } + ++ /* We may come back here from ap_die() below, ++ * so clear anything from this response. ++ */ ++ ctx->headers_error = 0; ++ apr_table_clear(r->headers_out); ++ apr_table_clear(r->err_headers_out); ++ ++ /* Don't recall ap_die() if we come back here (from its own internal ++ * redirect or error response), otherwise we can end up in infinite ++ * recursion; better fall through with 500, minimal headers and an ++ * empty body (EOS only). ++ */ ++ if (!check_headers_recursion(r)) { ++ apr_brigade_cleanup(b); ++ ap_die(HTTP_INTERNAL_SERVER_ERROR, r); ++ return AP_FILTER_ERROR; ++ } ++ APR_BUCKET_REMOVE(eos); ++ apr_brigade_cleanup(b); ++ APR_BRIGADE_INSERT_TAIL(b, eos); ++ r->status = HTTP_INTERNAL_SERVER_ERROR; ++ r->content_type = r->content_encoding = NULL; ++ r->content_languages = NULL; ++ ap_set_content_length(r, 0); ++ } ++ else if (eb) { ++ int status; + status = eb->status; + apr_brigade_cleanup(b); + ap_die(status, r); +@@ -1264,6 +1426,10 @@ + apr_table_unset(r->headers_out, "Content-Length"); + } + ++ if (r->status == HTTP_NO_CONTENT) { ++ apr_table_unset(r->headers_out, "Content-Length"); ++ } ++ + ctype = ap_make_content_type(r, r->content_type); + if (ctype) { + apr_table_setn(r->headers_out, "Content-Type", ctype); +@@ -1352,11 +1518,11 @@ + terminate_header(b2); + + ap_pass_brigade(f->next, b2); ++ ctx->headers_sent = 1; + +- if (r->header_only) { ++ if (r->header_only || r->status == HTTP_NO_CONTENT) { + apr_brigade_cleanup(b); +- ctx->headers_sent = 1; +- return OK; ++ return APR_SUCCESS; + } + + r->sent_bodyct = 1; /* Whatever follows is real body stuff... */ +diff -uap httpd-2.4.6/server/core.c.cve8743 httpd-2.4.6/server/core.c +--- httpd-2.4.6/server/core.c.cve8743 ++++ httpd-2.4.6/server/core.c +@@ -506,6 +506,15 @@ + if (virt->trace_enable != AP_TRACE_UNSET) + conf->trace_enable = virt->trace_enable; + ++ if (virt->http09_enable != AP_HTTP09_UNSET) ++ conf->http09_enable = virt->http09_enable; ++ ++ if (virt->http_conformance != AP_HTTP_CONFORMANCE_UNSET) ++ conf->http_conformance = virt->http_conformance; ++ ++ if (virt->http_methods != AP_HTTP_METHODS_UNSET) ++ conf->http_methods = virt->http_methods; ++ + /* no action for virt->accf_map, not allowed per-vhost */ + + if (virt->protocol) +@@ -3632,6 +3641,57 @@ + return NULL; + } + ++static const char *set_http_protocol_options(cmd_parms *cmd, void *dummy, ++ const char *arg) ++{ ++ core_server_config *conf = ++ ap_get_core_module_config(cmd->server->module_config); ++ ++ if (strcasecmp(arg, "allow0.9") == 0) ++ conf->http09_enable |= AP_HTTP09_ENABLE; ++ else if (strcasecmp(arg, "require1.0") == 0) ++ conf->http09_enable |= AP_HTTP09_DISABLE; ++ else if (strcasecmp(arg, "strict") == 0) ++ conf->http_conformance |= AP_HTTP_CONFORMANCE_STRICT; ++ else if (strcasecmp(arg, "unsafe") == 0) ++ conf->http_conformance |= AP_HTTP_CONFORMANCE_UNSAFE; ++ else if (strcasecmp(arg, "registeredmethods") == 0) ++ conf->http_methods |= AP_HTTP_METHODS_REGISTERED; ++ else if (strcasecmp(arg, "lenientmethods") == 0) ++ conf->http_methods |= AP_HTTP_METHODS_LENIENT; ++ else ++ return "HttpProtocolOptions accepts " ++ "'Unsafe' or 'Strict' (default), " ++ "'RegisteredMethods' or 'LenientMethods' (default), and " ++ "'Require1.0' or 'Allow0.9' (default)"; ++ ++ if ((conf->http09_enable & AP_HTTP09_ENABLE) ++ && (conf->http09_enable & AP_HTTP09_DISABLE)) ++ return "HttpProtocolOptions 'Allow0.9' and 'Require1.0'" ++ " are mutually exclusive"; ++ ++ if ((conf->http_conformance & AP_HTTP_CONFORMANCE_STRICT) ++ && (conf->http_conformance & AP_HTTP_CONFORMANCE_UNSAFE)) ++ return "HttpProtocolOptions 'Strict' and 'Unsafe'" ++ " are mutually exclusive"; ++ ++ if ((conf->http_methods & AP_HTTP_METHODS_REGISTERED) ++ && (conf->http_methods & AP_HTTP_METHODS_LENIENT)) ++ return "HttpProtocolOptions 'RegisteredMethods' and 'LenientMethods'" ++ " are mutually exclusive"; ++ ++ return NULL; ++} ++ ++static const char *set_http_method(cmd_parms *cmd, void *conf, const char *arg) ++{ ++ const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY); ++ if (err != NULL) ++ return err; ++ ap_method_register(cmd->pool, arg); ++ return NULL; ++} ++ + static apr_hash_t *errorlog_hash; + + static int log_constant_item(const ap_errorlog_info *info, const char *arg, +@@ -4143,6 +4203,13 @@ + "'on' (default), 'off' or 'extended' to trace request body content"), + AP_INIT_FLAG("MergeTrailers", set_merge_trailers, NULL, RSRC_CONF, + "merge request trailers into request headers or not"), ++AP_INIT_ITERATE("HttpProtocolOptions", set_http_protocol_options, NULL, RSRC_CONF, ++ "'Allow0.9' or 'Require1.0' (default); " ++ "'RegisteredMethods' or 'LenientMethods' (default); " ++ "'Unsafe' or 'Strict' (default). Sets HTTP acceptance rules") ++, ++AP_INIT_ITERATE("RegisterHttpMethod", set_http_method, NULL, RSRC_CONF, ++ "Registers non-standard HTTP methods"), + { NULL } + }; + +diff -uap httpd-2.4.6/server/gen_test_char.c.cve8743 httpd-2.4.6/server/gen_test_char.c +--- httpd-2.4.6/server/gen_test_char.c.cve8743 ++++ httpd-2.4.6/server/gen_test_char.c +@@ -16,11 +16,11 @@ + + #ifdef CROSS_COMPILE + ++#include + #define apr_isalnum(c) (isalnum(((unsigned char)(c)))) + #define apr_isalpha(c) (isalpha(((unsigned char)(c)))) + #define apr_iscntrl(c) (iscntrl(((unsigned char)(c)))) + #define apr_isprint(c) (isprint(((unsigned char)(c)))) +-#include + #define APR_HAVE_STDIO_H 1 + #define APR_HAVE_STRING_H 1 + +@@ -52,11 +52,13 @@ + #define T_ESCAPE_LOGITEM (0x10) + #define T_ESCAPE_FORENSIC (0x20) + #define T_ESCAPE_URLENCODED (0x40) ++#define T_HTTP_CTRLS (0x80) ++#define T_VCHAR_OBSTEXT (0x100) + + int main(int argc, char *argv[]) + { + unsigned c; +- unsigned char flags; ++ unsigned short flags; + + printf("/* this file is automatically generated by gen_test_char, " + "do not edit */\n" +@@ -67,19 +69,23 @@ + "#define T_ESCAPE_LOGITEM (%u)\n" + "#define T_ESCAPE_FORENSIC (%u)\n" + "#define T_ESCAPE_URLENCODED (%u)\n" ++ "#define T_HTTP_CTRLS (%u)\n" ++ "#define T_VCHAR_OBSTEXT (%u)\n" + "\n" +- "static const unsigned char test_char_table[256] = {", ++ "static const unsigned short test_char_table[256] = {", + T_ESCAPE_SHELL_CMD, + T_ESCAPE_PATH_SEGMENT, + T_OS_ESCAPE_PATH, + T_HTTP_TOKEN_STOP, + T_ESCAPE_LOGITEM, + T_ESCAPE_FORENSIC, +- T_ESCAPE_URLENCODED); ++ T_ESCAPE_URLENCODED, ++ T_HTTP_CTRLS, ++ T_VCHAR_OBSTEXT); + + for (c = 0; c < 256; ++c) { + flags = 0; +- if (c % 20 == 0) ++ if (c % 8 == 0) + printf("\n "); + + /* escape_shell_cmd */ +@@ -107,7 +113,7 @@ + flags |= T_ESCAPE_PATH_SEGMENT; + } + +- if (!apr_isalnum(c) && !strchr("$-_.+!*'(),:@&=/~", c)) { ++ if (!apr_isalnum(c) && !strchr("$-_.+!*'(),:;@&=/~", c)) { + flags |= T_OS_ESCAPE_PATH; + } + +@@ -115,11 +121,32 @@ + flags |= T_ESCAPE_URLENCODED; + } + +- /* these are the "tspecials" (RFC2068) or "separators" (RFC2616) */ +- if (c && (apr_iscntrl(c) || strchr(" \t()<>@,;:\\\"/[]?={}", c))) { ++ /* Stop for any non-'token' character, including ctrls, obs-text, ++ * and "tspecials" (RFC2068) a.k.a. "separators" (RFC2616), which ++ * is easer to express as characters remaining in the ASCII token set ++ */ ++ if (!c || !(apr_isalnum(c) || strchr("!#$%&'*+-.^_`|~", c))) { + flags |= T_HTTP_TOKEN_STOP; + } + ++ /* Catch CTRLs other than VCHAR, HT and SP, and obs-text (RFC7230 3.2) ++ * This includes only the C0 plane, not C1 (which is obs-text itself.) ++ * XXX: We should verify that all ASCII C0 ctrls/DEL corresponding to ++ * the current EBCDIC translation are captured, and ASCII C1 ctrls ++ * corresponding are all permitted (as they fall under obs-text rule) ++ */ ++ if (!c || (apr_iscntrl(c) && c != '\t')) { ++ flags |= T_HTTP_CTRLS; ++ } ++ ++ /* From RFC3986, the specific sets of gen-delims, sub-delims (2.2), ++ * and unreserved (2.3) that are possible somewhere within a URI. ++ * Spec requires all others to be %XX encoded, including obs-text. ++ */ ++ if (c && !apr_iscntrl(c) && c != ' ') { ++ flags |= T_VCHAR_OBSTEXT; ++ } ++ + /* For logging, escape all control characters, + * double quotes (because they delimit the request in the log file) + * backslashes (because we use backslash for escaping) +@@ -137,7 +164,7 @@ + flags |= T_ESCAPE_FORENSIC; + } + +- printf("%u%c", flags, (c < 255) ? ',' : ' '); ++ printf("0x%03x%c", flags, (c < 255) ? ',' : ' '); + } + + printf("\n};\n"); +diff -uap httpd-2.4.6/server/protocol.c.cve8743 httpd-2.4.6/server/protocol.c +--- httpd-2.4.6/server/protocol.c.cve8743 ++++ httpd-2.4.6/server/protocol.c +@@ -189,6 +189,10 @@ + * caused by MIME folding (or broken clients) if fold != 0, and place it + * in the buffer s, of size n bytes, without the ending newline. + * ++ * Pulls from r->proto_input_filters instead of r->input_filters for ++ * stricter protocol adherence and better input filter behavior during ++ * chunked trailer processing (for http). ++ * + * If s is NULL, ap_rgetline_core will allocate necessary memory from r->pool. + * + * Returns APR_SUCCESS if there are no problems and sets *read to be +@@ -197,7 +201,7 @@ + * APR_ENOSPC is returned if there is not enough buffer space. + * Other errors may be returned on other errors. + * +- * The LF is *not* returned in the buffer. Therefore, a *read of 0 ++ * The [CR]LF are *not* returned in the buffer. Therefore, a *read of 0 + * indicates that an empty line was read. + * + * Notes: Because the buffer uses 1 char for NUL, the most we can return is +@@ -208,13 +212,15 @@ + */ + AP_DECLARE(apr_status_t) ap_rgetline_core(char **s, apr_size_t n, + apr_size_t *read, request_rec *r, +- int fold, apr_bucket_brigade *bb) ++ int flags, apr_bucket_brigade *bb) + { + apr_status_t rv; + apr_bucket *e; + apr_size_t bytes_handled = 0, current_alloc = 0; + char *pos, *last_char = *s; + int do_alloc = (*s == NULL), saw_eos = 0; ++ int fold = flags & AP_GETLINE_FOLD; ++ int crlf = flags & AP_GETLINE_CRLF; + + /* + * Initialize last_char as otherwise a random value will be compared +@@ -226,13 +232,15 @@ + + for (;;) { + apr_brigade_cleanup(bb); +- rv = ap_get_brigade(r->input_filters, bb, AP_MODE_GETLINE, ++ rv = ap_get_brigade(r->proto_input_filters, bb, AP_MODE_GETLINE, + APR_BLOCK_READ, 0); + if (rv != APR_SUCCESS) { + return rv; + } + +- /* Something horribly wrong happened. Someone didn't block! */ ++ /* Something horribly wrong happened. Someone didn't block! ++ * (this also happens at the end of each keepalive connection) ++ */ + if (APR_BRIGADE_EMPTY(bb)) { + return APR_EGENERAL; + } +@@ -318,6 +326,13 @@ + } + } + ++ if (crlf && (last_char <= *s || last_char[-1] != APR_ASCII_CR)) { ++ *last_char = '\0'; ++ bytes_handled = last_char - *s; ++ *read = bytes_handled; ++ return APR_EINVAL; ++ } ++ + /* Now NUL-terminate the string at the end of the line; + * if the last-but-one character is a CR, terminate there */ + if (last_char > *s && last_char[-1] == APR_ASCII_CR) { +@@ -340,7 +355,7 @@ + apr_brigade_cleanup(bb); + + /* We only care about the first byte. */ +- rv = ap_get_brigade(r->input_filters, bb, AP_MODE_SPECULATIVE, ++ rv = ap_get_brigade(r->proto_input_filters, bb, AP_MODE_SPECULATIVE, + APR_BLOCK_READ, 1); + if (rv != APR_SUCCESS) { + return rv; +@@ -391,7 +406,8 @@ + */ + if (do_alloc) { + tmp = NULL; +- } else { ++ } ++ else { + /* We're null terminated. */ + tmp = last_char; + } +@@ -461,7 +477,7 @@ + } + #endif + +-AP_DECLARE(int) ap_getline(char *s, int n, request_rec *r, int fold) ++AP_DECLARE(int) ap_getline(char *s, int n, request_rec *r, int flags) + { + char *tmp_s = s; + apr_status_t rv; +@@ -469,7 +485,7 @@ + apr_bucket_brigade *tmp_bb; + + tmp_bb = apr_brigade_create(r->pool, r->connection->bucket_alloc); +- rv = ap_rgetline(&tmp_s, n, &len, r, fold, tmp_bb); ++ rv = ap_rgetline(&tmp_s, n, &len, r, flags, tmp_bb); + apr_brigade_destroy(tmp_bb); + + /* Map the out-of-space condition to the old API. */ +@@ -549,16 +565,29 @@ + } + } + +-static int read_request_line(request_rec *r, apr_bucket_brigade *bb) ++/* get the length of the field name for logging, but no more than 80 bytes */ ++#define LOG_NAME_MAX_LEN 80 ++static int field_name_len(const char *field) + { +- const char *ll; +- const char *uri; +- const char *pro; ++ const char *end = ap_strchr_c(field, ':'); ++ if (end == NULL || end - field > LOG_NAME_MAX_LEN) ++ return LOG_NAME_MAX_LEN; ++ return end - field; ++} + +- int major = 1, minor = 0; /* Assume HTTP/1.0 if non-"HTTP" protocol */ +- char http[5]; ++static int read_request_line(request_rec *r, apr_bucket_brigade *bb) ++{ ++ enum { ++ rrl_none, rrl_badmethod, rrl_badwhitespace, rrl_excesswhitespace, ++ rrl_missinguri, rrl_baduri, rrl_badprotocol, rrl_trailingtext, ++ rrl_badmethod09, rrl_reject09 ++ } deferred_error = rrl_none; ++ char *ll; ++ char *uri; + apr_size_t len; + int num_blank_lines = 0; ++ core_server_config *conf = ap_get_core_module_config(r->server->module_config); ++ int strict = (conf->http_conformance != AP_HTTP_CONFORMANCE_UNSAFE); + int max_blank_lines = r->server->limit_req_fields; + + if (max_blank_lines <= 0) { +@@ -588,7 +617,7 @@ + */ + r->the_request = NULL; + rv = ap_rgetline(&(r->the_request), (apr_size_t)(r->server->limit_req_line + 2), +- &len, r, 0, bb); ++ &len, r, strict ? AP_GETLINE_CRLF : 0, bb); + + if (rv != APR_SUCCESS) { + r->request_time = apr_time_now(); +@@ -599,8 +628,6 @@ + */ + if (APR_STATUS_IS_ENOSPC(rv)) { + r->status = HTTP_REQUEST_URI_TOO_LARGE; +- r->proto_num = HTTP_VERSION(1,0); +- r->protocol = apr_pstrdup(r->pool, "HTTP/1.0"); + } + else if (APR_STATUS_IS_TIMEUP(rv)) { + r->status = HTTP_REQUEST_TIME_OUT; +@@ -608,6 +635,8 @@ + else if (APR_STATUS_IS_EINVAL(rv)) { + r->status = HTTP_BAD_REQUEST; + } ++ r->proto_num = HTTP_VERSION(1,0); ++ r->protocol = apr_pstrdup(r->pool, "HTTP/1.0"); + return 0; + } + } while ((len <= 0) && (++num_blank_lines < max_blank_lines)); +@@ -619,46 +648,263 @@ + } + + r->request_time = apr_time_now(); +- ll = r->the_request; +- r->method = ap_getword_white(r->pool, &ll); ++ r->method = r->the_request; ++ ++ /* If there is whitespace before a method, skip it and mark in error */ ++ if (apr_isspace(*r->method)) { ++ deferred_error = rrl_badwhitespace; ++ for ( ; apr_isspace(*r->method); ++r->method) ++ ; ++ } + +- uri = ap_getword_white(r->pool, &ll); ++ /* Scan the method up to the next whitespace, ensure it contains only ++ * valid http-token characters, otherwise mark in error ++ */ ++ if (strict) { ++ ll = (char*) ap_scan_http_token(r->method); ++ } ++ else { ++ ll = (char*) ap_scan_vchar_obstext(r->method); ++ } + +- /* Provide quick information about the request method as soon as known */ ++ if (((ll == r->method) || (*ll && !apr_isspace(*ll))) ++ && deferred_error == rrl_none) { ++ deferred_error = rrl_badmethod; ++ ll = strpbrk(ll, "\t\n\v\f\r "); ++ } + +- r->method_number = ap_method_number_of(r->method); +- if (r->method_number == M_GET && r->method[0] == 'H') { +- r->header_only = 1; ++ /* Verify method terminated with a single SP, or mark as specific error */ ++ if (!ll) { ++ if (deferred_error == rrl_none) ++ deferred_error = rrl_missinguri; ++ r->protocol = uri = ""; ++ len = 0; ++ goto rrl_done; ++ } ++ else if (strict && ll[0] && apr_isspace(ll[1]) ++ && deferred_error == rrl_none) { ++ deferred_error = rrl_excesswhitespace; + } + +- ap_parse_uri(r, uri); ++ /* Advance uri pointer over leading whitespace, NUL terminate the method ++ * If non-SP whitespace is encountered, mark as specific error ++ */ ++ for (uri = ll; apr_isspace(*uri); ++uri) ++ if (*uri != ' ' && deferred_error == rrl_none) ++ deferred_error = rrl_badwhitespace; ++ *ll = '\0'; ++ ++ if (!*uri && deferred_error == rrl_none) ++ deferred_error = rrl_missinguri; ++ ++ /* Scan the URI up to the next whitespace, ensure it contains no raw ++ * control characters, otherwise mark in error ++ */ ++ ll = (char*) ap_scan_vchar_obstext(uri); ++ if (ll == uri || (*ll && !apr_isspace(*ll))) { ++ deferred_error = rrl_baduri; ++ ll = strpbrk(ll, "\t\n\v\f\r "); ++ } + +- if (ll[0]) { ++ /* Verify URI terminated with a single SP, or mark as specific error */ ++ if (!ll) { ++ r->protocol = ""; ++ len = 0; ++ goto rrl_done; ++ } ++ else if (strict && ll[0] && apr_isspace(ll[1]) ++ && deferred_error == rrl_none) { ++ deferred_error = rrl_excesswhitespace; ++ } ++ ++ /* Advance protocol pointer over leading whitespace, NUL terminate the uri ++ * If non-SP whitespace is encountered, mark as specific error ++ */ ++ for (r->protocol = ll; apr_isspace(*r->protocol); ++r->protocol) ++ if (*r->protocol != ' ' && deferred_error == rrl_none) ++ deferred_error = rrl_badwhitespace; ++ *ll = '\0'; ++ ++ /* Scan the protocol up to the next whitespace, validation comes later */ ++ if (!(ll = (char*) ap_scan_vchar_obstext(r->protocol))) { ++ len = strlen(r->protocol); ++ goto rrl_done; ++ } ++ len = ll - r->protocol; ++ ++ /* Advance over trailing whitespace, if found mark in error, ++ * determine if trailing text is found, unconditionally mark in error, ++ * finally NUL terminate the protocol string ++ */ ++ if (*ll && !apr_isspace(*ll)) { ++ deferred_error = rrl_badprotocol; ++ } ++ else if (strict && *ll) { ++ deferred_error = rrl_excesswhitespace; ++ } ++ else { ++ for ( ; apr_isspace(*ll); ++ll) ++ if (*ll != ' ' && deferred_error == rrl_none) ++ deferred_error = rrl_badwhitespace; ++ if (*ll && deferred_error == rrl_none) ++ deferred_error = rrl_trailingtext; ++ } ++ *((char *)r->protocol + len) = '\0'; ++ ++rrl_done: ++ /* For internal integrety and palloc efficiency, reconstruct the_request ++ * in one palloc, using only single SP characters, per spec. ++ */ ++ r->the_request = apr_pstrcat(r->pool, r->method, *uri ? " " : NULL, uri, ++ *r->protocol ? " " : NULL, r->protocol, NULL); ++ ++ if (len == 8 ++ && r->protocol[0] == 'H' && r->protocol[1] == 'T' ++ && r->protocol[2] == 'T' && r->protocol[3] == 'P' ++ && r->protocol[4] == '/' && apr_isdigit(r->protocol[5]) ++ && r->protocol[6] == '.' && apr_isdigit(r->protocol[7]) ++ && r->protocol[5] != '0') { ++ r->assbackwards = 0; ++ r->proto_num = HTTP_VERSION(r->protocol[5] - '0', r->protocol[7] - '0'); ++ } ++ else if (len == 8 ++ && (r->protocol[0] == 'H' || r->protocol[0] == 'h') ++ && (r->protocol[1] == 'T' || r->protocol[1] == 't') ++ && (r->protocol[2] == 'T' || r->protocol[2] == 't') ++ && (r->protocol[3] == 'P' || r->protocol[3] == 'p') ++ && r->protocol[4] == '/' && apr_isdigit(r->protocol[5]) ++ && r->protocol[6] == '.' && apr_isdigit(r->protocol[7]) ++ && r->protocol[5] != '0') { + r->assbackwards = 0; +- pro = ll; +- len = strlen(ll); +- } else { ++ r->proto_num = HTTP_VERSION(r->protocol[5] - '0', r->protocol[7] - '0'); ++ if (strict && deferred_error == rrl_none) ++ deferred_error = rrl_badprotocol; ++ else ++ memcpy((char*)r->protocol, "HTTP", 4); ++ } else if (r->protocol[0]) { ++ r->proto_num = HTTP_VERSION(0, 9); ++ /* Defer setting the r->protocol string till error msg is composed */ ++ if (deferred_error == rrl_none) ++ deferred_error = rrl_badprotocol; ++ } ++ else { + r->assbackwards = 1; +- pro = "HTTP/0.9"; +- len = 8; ++ r->protocol = apr_pstrdup(r->pool, "HTTP/0.9"); ++ r->proto_num = HTTP_VERSION(0, 9); + } +- r->protocol = apr_pstrmemdup(r->pool, pro, len); + +- /* Avoid sscanf in the common case */ +- if (len == 8 +- && pro[0] == 'H' && pro[1] == 'T' && pro[2] == 'T' && pro[3] == 'P' +- && pro[4] == '/' && apr_isdigit(pro[5]) && pro[6] == '.' +- && apr_isdigit(pro[7])) { +- r->proto_num = HTTP_VERSION(pro[5] - '0', pro[7] - '0'); +- } +- else if (3 == sscanf(r->protocol, "%4s/%u.%u", http, &major, &minor) +- && (strcasecmp("http", http) == 0) +- && (minor < HTTP_VERSION(1, 0)) ) /* don't allow HTTP/0.1000 */ +- r->proto_num = HTTP_VERSION(major, minor); +- else +- r->proto_num = HTTP_VERSION(1, 0); ++ /* Determine the method_number and parse the uri prior to invoking error ++ * handling, such that these fields are available for subsitution ++ */ ++ r->method_number = ap_method_number_of(r->method); ++ if (r->method_number == M_GET && r->method[0] == 'H') ++ r->header_only = 1; ++ ++ ap_parse_uri(r, uri); ++ ++ /* With the request understood, we can consider HTTP/0.9 specific errors */ ++ if (r->proto_num == HTTP_VERSION(0, 9) && deferred_error == rrl_none) { ++ if (conf->http09_enable == AP_HTTP09_DISABLE) ++ deferred_error = rrl_reject09; ++ else if (strict && (r->method_number != M_GET || r->header_only)) ++ deferred_error = rrl_badmethod09; ++ } ++ ++ /* Now that the method, uri and protocol are all processed, ++ * we can safely resume any deferred error reporting ++ */ ++ if (deferred_error != rrl_none) { ++ if (deferred_error == rrl_badmethod) ++ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03445) ++ "HTTP Request Line; Invalid method token: '%.*s'", ++ field_name_len(r->method), r->method); ++ else if (deferred_error == rrl_badmethod09) ++ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03444) ++ "HTTP Request Line; Invalid method token: '%.*s'" ++ " (only GET is allowed for HTTP/0.9 requests)", ++ field_name_len(r->method), r->method); ++ else if (deferred_error == rrl_missinguri) ++ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03446) ++ "HTTP Request Line; Missing URI"); ++ else if (deferred_error == rrl_baduri) ++ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03454) ++ "HTTP Request Line; URI incorrectly encoded: '%.*s'", ++ field_name_len(r->uri), r->uri); ++ else if (deferred_error == rrl_badwhitespace) ++ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03447) ++ "HTTP Request Line; Invalid whitespace"); ++ else if (deferred_error == rrl_excesswhitespace) ++ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03448) ++ "HTTP Request Line; Excess whitespace " ++ "(disallowed by HttpProtocolOptions Strict"); ++ else if (deferred_error == rrl_trailingtext) ++ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03449) ++ "HTTP Request Line; Extraneous text found '%.*s' " ++ "(perhaps whitespace was injected?)", ++ field_name_len(ll), ll); ++ else if (deferred_error == rrl_reject09) ++ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02401) ++ "HTTP Request Line; Rejected HTTP/0.9 request"); ++ else if (deferred_error == rrl_badprotocol) ++ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02418) ++ "HTTP Request Line; Unrecognized protocol '%.*s' " ++ "(perhaps whitespace was injected?)", ++ field_name_len(r->protocol), r->protocol); ++ r->status = HTTP_BAD_REQUEST; ++ goto rrl_failed; ++ } ++ ++ if (conf->http_methods == AP_HTTP_METHODS_REGISTERED ++ && r->method_number == M_INVALID) { ++ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02423) ++ "HTTP Request Line; Unrecognized HTTP method: '%.*s' " ++ "(disallowed by RegisteredMethods)", ++ field_name_len(r->method), r->method); ++ r->status = HTTP_NOT_IMPLEMENTED; ++ /* This can't happen in an HTTP/0.9 request, we verified GET above */ ++ return 0; ++ } ++ ++ if (r->status != HTTP_OK) { ++ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03450) ++ "HTTP Request Line; Unable to parse URI: '%.*s'", ++ field_name_len(r->uri), r->uri); ++ goto rrl_failed; ++ } ++ ++ if (strict) { ++ if (r->parsed_uri.fragment) { ++ /* RFC3986 3.5: no fragment */ ++ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02421) ++ "HTTP Request Line; URI must not contain a fragment"); ++ r->status = HTTP_BAD_REQUEST; ++ goto rrl_failed; ++ } ++ if (r->parsed_uri.user || r->parsed_uri.password) { ++ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02422) ++ "HTTP Request Line; URI must not contain a " ++ "username/password"); ++ r->status = HTTP_BAD_REQUEST; ++ goto rrl_failed; ++ } ++ } + + return 1; ++rrl_failed: ++ if (r->proto_num == HTTP_VERSION(0, 9)) { ++ /* Send all parsing and protocol error response with 1.x behavior, ++ * and reserve 505 errors for actual HTTP protocols presented. ++ * As called out in RFC7230 3.5, any errors parsing the protocol ++ * from the request line are nearly always misencoded HTTP/1.x ++ * requests. Only a valid 0.9 request with no parsing errors ++ * at all may be treated as a simple request, if allowed. ++ */ ++ r->assbackwards = 0; ++ r->connection->keepalive = AP_CONN_CLOSE; ++ r->proto_num = HTTP_VERSION(1, 0); ++ r->protocol = apr_pstrdup(r->pool, "HTTP/1.0"); ++ } ++ return 0; + } + + static int table_do_fn_check_lengths(void *r_, const char *key, +@@ -670,26 +916,13 @@ + + r->status = HTTP_BAD_REQUEST; + apr_table_setn(r->notes, "error-notes", +- apr_pstrcat(r->pool, "Size of a request header field " +- "after merging exceeds server limit.
    " +- "\n
    \n",
    +-                               ap_escape_html(r->pool, key),
    +-                               "
    \n", NULL)); +- ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(00560) "Request header " +- "exceeds LimitRequestFieldSize after merging: %s", key); ++ "Size of a request header field exceeds server limit."); ++ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(00560) "Request " ++ "header exceeds LimitRequestFieldSize after merging: %.*s", ++ field_name_len(key), key); + return 0; + } + +-/* get the length of the field name for logging, but no more than 80 bytes */ +-#define LOG_NAME_MAX_LEN 80 +-static int field_name_len(const char *field) +-{ +- const char *end = ap_strchr_c(field, ':'); +- if (end == NULL || end - field > LOG_NAME_MAX_LEN) +- return LOG_NAME_MAX_LEN; +- return end - field; +-} +- + AP_DECLARE(void) ap_get_mime_headers_core(request_rec *r, apr_bucket_brigade *bb) + { + char *last_field = NULL; +@@ -700,6 +933,8 @@ + apr_size_t len; + int fields_read = 0; + char *tmp_field; ++ core_server_config *conf = ap_get_core_module_config(r->server->module_config); ++ int strict = (conf->http_conformance != AP_HTTP_CONFORMANCE_UNSAFE); + + /* + * Read header lines until we get the empty separator line, a read error, +@@ -707,11 +942,10 @@ + */ + while(1) { + apr_status_t rv; +- int folded = 0; + + field = NULL; + rv = ap_rgetline(&field, r->server->limit_req_fieldsize + 2, +- &len, r, 0, bb); ++ &len, r, strict ? AP_GETLINE_CRLF : 0, bb); + + if (rv != APR_SUCCESS) { + if (APR_STATUS_IS_TIMEUP(rv)) { +@@ -728,153 +962,217 @@ + * exceeds the configured limit for a field size. + */ + if (rv == APR_ENOSPC) { +- const char *field_escaped; +- if (field) { +- /* ensure ap_escape_html will terminate correctly */ +- field[len - 1] = '\0'; +- field_escaped = ap_escape_html(r->pool, field); +- } +- else { +- field_escaped = field = ""; +- } +- + apr_table_setn(r->notes, "error-notes", +- apr_psprintf(r->pool, +- "Size of a request header field " +- "exceeds server limit.
    \n" +- "
    \n%.*s\n
    \n", +- field_name_len(field_escaped), +- field_escaped)); ++ "Size of a request header field " ++ "exceeds server limit."); + ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(00561) + "Request header exceeds LimitRequestFieldSize%s" + "%.*s", +- *field ? ": " : "", +- field_name_len(field), field); ++ (field && *field) ? ": " : "", ++ (field) ? field_name_len(field) : 0, ++ (field) ? field : ""); + } + return; + } + +- if (last_field != NULL) { +- if ((len > 0) && ((*field == '\t') || *field == ' ')) { +- /* This line is a continuation of the preceding line(s), +- * so append it to the line that we've set aside. +- * Note: this uses a power-of-two allocator to avoid +- * doing O(n) allocs and using O(n^2) space for +- * continuations that span many many lines. +- */ +- apr_size_t fold_len = last_len + len + 1; /* trailing null */ + +- if (fold_len >= (apr_size_t)(r->server->limit_req_fieldsize)) { +- r->status = HTTP_BAD_REQUEST; +- /* report what we have accumulated so far before the +- * overflow (last_field) as the field with the problem +- */ +- apr_table_setn(r->notes, "error-notes", +- apr_psprintf(r->pool, +- "Size of a request header field " +- "after folding " +- "exceeds server limit.
    \n" +- "
    \n%.*s\n
    \n", +- field_name_len(last_field), +- ap_escape_html(r->pool, last_field))); +- ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(00562) +- "Request header exceeds LimitRequestFieldSize " +- "after folding: %.*s", +- field_name_len(last_field), last_field); +- return; +- } ++ /* For all header values, and all obs-fold lines, the presence of ++ * additional whitespace is a no-op, so collapse trailing whitespace ++ * to save buffer allocation and optimize copy operations. ++ * Do not remove the last single whitespace under any condition. ++ */ ++ while (len > 1 && (field[len-1] == '\t' || field[len-1] == ' ')) { ++ field[--len] = '\0'; ++ } ++ ++ if (*field == '\t' || *field == ' ') { ++ /* Append any newly-read obs-fold line onto the preceding ++ * last_field line we are processing ++ */ ++ apr_size_t fold_len; + ++ if (last_field == NULL) { ++ r->status = HTTP_BAD_REQUEST; ++ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03442) ++ "Line folding encountered before first" ++ " header line"); ++ return; ++ } ++ ++ if (field[1] == '\0') { ++ r->status = HTTP_BAD_REQUEST; ++ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03443) ++ "Empty folded line encountered"); ++ return; ++ } ++ ++ /* Leading whitespace on an obs-fold line can be ++ * similarly discarded */ ++ while (field[1] == '\t' || field[1] == ' ') { ++ ++field; --len; ++ } ++ ++ /* This line is a continuation of the preceding line(s), ++ * so append it to the line that we've set aside. ++ * Note: this uses a power-of-two allocator to avoid ++ * doing O(n) allocs and using O(n^2) space for ++ * continuations that span many many lines. ++ */ ++ fold_len = last_len + len + 1; /* trailing null */ ++ ++ if (fold_len >= (apr_size_t)(r->server->limit_req_fieldsize)) { ++ r->status = HTTP_BAD_REQUEST; ++ /* report what we have accumulated so far before the ++ * overflow (last_field) as the field with the problem ++ */ ++ apr_table_setn(r->notes, "error-notes", ++ "Size of a request header field " ++ "exceeds server limit."); ++ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(00562) ++ "Request header exceeds LimitRequestFieldSize " ++ "after folding: %.*s", ++ field_name_len(last_field), last_field); ++ return; ++ } ++ ++ if (fold_len > alloc_len) { ++ char *fold_buf; ++ alloc_len += alloc_len; + if (fold_len > alloc_len) { +- char *fold_buf; +- alloc_len += alloc_len; +- if (fold_len > alloc_len) { +- alloc_len = fold_len; +- } +- fold_buf = (char *)apr_palloc(r->pool, alloc_len); +- memcpy(fold_buf, last_field, last_len); +- last_field = fold_buf; ++ alloc_len = fold_len; + } +- memcpy(last_field + last_len, field, len +1); /* +1 for nul */ +- last_len += len; +- folded = 1; +- } +- else /* not a continuation line */ { ++ fold_buf = (char *)apr_palloc(r->pool, alloc_len); ++ memcpy(fold_buf, last_field, last_len); ++ last_field = fold_buf; ++ } ++ memcpy(last_field + last_len, field, len +1); /* +1 for nul */ ++ /* Replace obs-fold w/ SP per RFC 7230 3.2.4 */ ++ last_field[last_len] = ' '; ++ last_len += len; + +- if (r->server->limit_req_fields ++ /* We've appended this obs-fold line to last_len, proceed to ++ * read the next input line ++ */ ++ continue; ++ } ++ else if (last_field != NULL) { ++ /* Process the previous last_field header line with all obs-folded ++ * segments already concatinated (this is not operating on the ++ * most recently read input line). ++ */ ++ if (r->server->limit_req_fields + && (++fields_read > r->server->limit_req_fields)) { +- r->status = HTTP_BAD_REQUEST; +- apr_table_setn(r->notes, "error-notes", +- "The number of request header fields " +- "exceeds this server's limit."); +- ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(00563) +- "Number of request headers exceeds " +- "LimitRequestFields"); +- return; +- } ++ r->status = HTTP_BAD_REQUEST; ++ apr_table_setn(r->notes, "error-notes", ++ "The number of request header fields " ++ "exceeds this server's limit."); ++ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(00563) ++ "Number of request headers exceeds " ++ "LimitRequestFields"); ++ return; ++ } + +- if (!(value = strchr(last_field, ':'))) { /* Find ':' or */ +- r->status = HTTP_BAD_REQUEST; /* abort bad request */ +- apr_table_setn(r->notes, "error-notes", +- apr_psprintf(r->pool, +- "Request header field is " +- "missing ':' separator.
    \n" +- "
    \n%.*s
    \n", +- (int)LOG_NAME_MAX_LEN, +- ap_escape_html(r->pool, +- last_field))); +- ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(00564) ++ if (!strict) ++ { ++ /* Not Strict ('Unsafe' mode), using the legacy parser */ ++ ++ if (!(value = strchr(last_field, ':'))) { /* Find ':' or */ ++ r->status = HTTP_BAD_REQUEST; /* abort bad request */ ++ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00564) + "Request header field is missing ':' " + "separator: %.*s", (int)LOG_NAME_MAX_LEN, + last_field); ++ + return; + } + +- tmp_field = value - 1; /* last character of field-name */ ++ /* last character of field-name */ ++ tmp_field = value - (value > last_field ? 1 : 0); + + *value++ = '\0'; /* NUL-terminate at colon */ + ++ if (strpbrk(last_field, "\t\n\v\f\r ")) { ++ r->status = HTTP_BAD_REQUEST; ++ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03452) ++ "Request header field name presented" ++ " invalid whitespace"); ++ return; ++ } ++ + while (*value == ' ' || *value == '\t') { +- ++value; /* Skip to start of value */ ++ ++value; /* Skip to start of value */ ++ } ++ ++ if (strpbrk(value, "\n\v\f\r")) { ++ r->status = HTTP_BAD_REQUEST; ++ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03451) ++ "Request header field value presented" ++ " bad whitespace"); ++ return; + } + +- /* Strip LWS after field-name: */ +- while (tmp_field > last_field +- && (*tmp_field == ' ' || *tmp_field == '\t')) { +- *tmp_field-- = '\0'; ++ if (tmp_field == last_field) { ++ r->status = HTTP_BAD_REQUEST; ++ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03453) ++ "Request header field name was empty"); ++ return; ++ } ++ } ++ else /* Using strict RFC7230 parsing */ ++ { ++ /* Ensure valid token chars before ':' per RFC 7230 3.2.4 */ ++ value = (char *)ap_scan_http_token(last_field); ++ if ((value == last_field) || *value != ':') { ++ r->status = HTTP_BAD_REQUEST; ++ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02426) ++ "Request header field name is malformed: " ++ "%.*s", (int)LOG_NAME_MAX_LEN, last_field); ++ return; + } + +- /* Strip LWS after field-value: */ +- tmp_field = last_field + last_len - 1; +- while (tmp_field > value +- && (*tmp_field == ' ' || *tmp_field == '\t')) { +- *tmp_field-- = '\0'; ++ *value++ = '\0'; /* NUL-terminate last_field name at ':' */ ++ ++ while (*value == ' ' || *value == '\t') { ++ ++value; /* Skip LWS of value */ + } + +- apr_table_addn(r->headers_in, last_field, value); ++ /* Find invalid, non-HT ctrl char, or the trailing NULL */ ++ tmp_field = (char *)ap_scan_http_field_content(value); + +- /* reset the alloc_len so that we'll allocate a new +- * buffer if we have to do any more folding: we can't +- * use the previous buffer because its contents are +- * now part of r->headers_in ++ /* Reject value for all garbage input (CTRLs excluding HT) ++ * e.g. only VCHAR / SP / HT / obs-text are allowed per ++ * RFC7230 3.2.6 - leave all more explicit rule enforcement ++ * for specific header handler logic later in the cycle + */ +- alloc_len = 0; ++ if (*tmp_field != '\0') { ++ r->status = HTTP_BAD_REQUEST; ++ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02427) ++ "Request header value is malformed: " ++ "%.*s", (int)LOG_NAME_MAX_LEN, value); ++ return; ++ } ++ } ++ ++ apr_table_addn(r->headers_in, last_field, value); + +- } /* end if current line is not a continuation starting with tab */ ++ /* This last_field header is now stored in headers_in, ++ * resume processing of the current input line. ++ */ + } + +- /* Found a blank line, stop. */ ++ /* Found the terminating empty end-of-headers line, stop. */ + if (len == 0) { + break; + } + +- /* Keep track of this line so that we can parse it on +- * the next loop iteration. (In the folded case, last_field +- * has been updated already.) ++ /* Keep track of this new header line so that we can extend it across ++ * any obs-fold or parse it on the next loop iteration. We referenced ++ * our previously allocated buffer in r->headers_in, ++ * so allocate a fresh buffer if required. + */ +- if (!folded) { +- last_field = field; +- last_len = len; +- } ++ alloc_len = 0; ++ last_field = field; ++ last_len = len; + } + + /* Combine multiple message-header fields with the same +@@ -899,7 +1197,7 @@ + request_rec *r; + apr_pool_t *p; + const char *expect; +- int access_status = HTTP_OK; ++ int access_status; + apr_bucket_brigade *tmp_bb; + apr_socket_t *csd; + apr_interval_time_t cur_timeout; +@@ -958,35 +1256,39 @@ + + /* Get the request... */ + if (!read_request_line(r, tmp_bb)) { +- if (r->status == HTTP_REQUEST_URI_TOO_LARGE +- || r->status == HTTP_BAD_REQUEST) { ++ switch (r->status) { ++ case HTTP_REQUEST_URI_TOO_LARGE: ++ case HTTP_BAD_REQUEST: ++ case HTTP_VERSION_NOT_SUPPORTED: ++ case HTTP_NOT_IMPLEMENTED: + if (r->status == HTTP_REQUEST_URI_TOO_LARGE) { + ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(00565) + "request failed: client's request-line exceeds LimitRequestLine (longer than %d)", + r->server->limit_req_line); + } + else if (r->method == NULL) { +- ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(00566) +- "request failed: invalid characters in URI"); ++ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00566) ++ "request failed: malformed request line"); + } +- ap_send_error_response(r, 0); ++ access_status = r->status; ++ r->status = HTTP_OK; ++ ap_die(access_status, r); + ap_update_child_status(conn->sbh, SERVER_BUSY_LOG, r); + ap_run_log_transaction(r); ++ r = NULL; + apr_brigade_destroy(tmp_bb); + goto traceout; +- } +- else if (r->status == HTTP_REQUEST_TIME_OUT) { +- ap_update_child_status(conn->sbh, SERVER_BUSY_LOG, r); +- if (!r->connection->keepalives) { ++ case HTTP_REQUEST_TIME_OUT: ++ ap_update_child_status(conn->sbh, SERVER_BUSY_LOG, NULL); ++ if (!r->connection->keepalives) + ap_run_log_transaction(r); +- } + apr_brigade_destroy(tmp_bb); + goto traceout; ++ default: ++ apr_brigade_destroy(tmp_bb); ++ r = NULL; ++ goto traceout; + } +- +- apr_brigade_destroy(tmp_bb); +- r = NULL; +- goto traceout; + } + + /* We may have been in keep_alive_timeout mode, so toggle back +@@ -1003,7 +1305,7 @@ + if (!r->assbackwards) { + ap_get_mime_headers_core(r, tmp_bb); + if (r->status != HTTP_OK) { +- ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(00567) ++ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00567) + "request failed: error reading the headers"); + ap_send_error_response(r, 0); + ap_update_child_status(conn->sbh, SERVER_BUSY_LOG, r); +@@ -1021,25 +1323,6 @@ + apr_table_unset(r->headers_in, "Content-Length"); + } + } +- else { +- if (r->header_only) { +- /* +- * Client asked for headers only with HTTP/0.9, which doesn't send +- * headers! Have to dink things just to make sure the error message +- * comes through... +- */ +- ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(00568) +- "client sent invalid HTTP/0.9 request: HEAD %s", +- r->uri); +- r->header_only = 0; +- r->status = HTTP_BAD_REQUEST; +- ap_send_error_response(r, 0); +- ap_update_child_status(conn->sbh, SERVER_BUSY_LOG, r); +- ap_run_log_transaction(r); +- apr_brigade_destroy(tmp_bb); +- goto traceout; +- } +- } + + apr_brigade_destroy(tmp_bb); + +@@ -1071,7 +1354,7 @@ + * a Host: header, and the server MUST respond with 400 if it doesn't. + */ + access_status = HTTP_BAD_REQUEST; +- ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(00569) ++ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00569) + "client sent HTTP/1.1 request without hostname " + "(see RFC2616 section 14.23): %s", r->uri); + } +diff -uap httpd-2.4.6/server/util.c.cve8743 httpd-2.4.6/server/util.c +--- httpd-2.4.6/server/util.c.cve8743 ++++ httpd-2.4.6/server/util.c +@@ -79,7 +79,7 @@ + * char in here and get it to work, because if char is signed then it + * will first be sign extended. + */ +-#define TEST_CHAR(c, f) (test_char_table[(unsigned)(c)] & (f)) ++#define TEST_CHAR(c, f) (test_char_table[(unsigned char)(c)] & (f)) + + /* Win32/NetWare/OS2 need to check for both forward and back slashes + * in ap_getparents() and ap_escape_url. +@@ -1449,6 +1449,37 @@ + return find_list_item(p, line, tok, AP_ETAG_WEAK); + } + ++/* Scan a string for HTTP VCHAR/obs-text characters including HT and SP ++ * (as used in header values, for example, in RFC 7230 section 3.2) ++ * returning the pointer to the first non-HT ASCII ctrl character. ++ */ ++AP_DECLARE(const char *) ap_scan_http_field_content(const char *ptr) ++{ ++ for ( ; !TEST_CHAR(*ptr, T_HTTP_CTRLS); ++ptr) ; ++ ++ return ptr; ++} ++ ++/* Scan a string for HTTP token characters, returning the pointer to ++ * the first non-token character. ++ */ ++AP_DECLARE(const char *) ap_scan_http_token(const char *ptr) ++{ ++ for ( ; !TEST_CHAR(*ptr, T_HTTP_TOKEN_STOP); ++ptr) ; ++ ++ return ptr; ++} ++ ++/* Scan a string for visible ASCII (0x21-0x7E) or obstext (0x80+) ++ * and return a pointer to the first ctrl/space character encountered. ++ */ ++AP_DECLARE(const char *) ap_scan_vchar_obstext(const char *ptr) ++{ ++ for ( ; TEST_CHAR(*ptr, T_VCHAR_OBSTEXT); ++ptr) ; ++ ++ return ptr; ++} ++ + /* Retrieve a token, spacing over it and returning a pointer to + * the first non-white byte afterwards. Note that these tokens + * are delimited by semis and commas; and can also be delimited +diff -uap httpd-2.4.6/server/vhost.c.cve8743 httpd-2.4.6/server/vhost.c +--- httpd-2.4.6/server/vhost.c.cve8743 ++++ httpd-2.4.6/server/vhost.c +@@ -685,6 +685,116 @@ + * run-time vhost matching functions + */ + ++static apr_status_t fix_hostname_v6_literal(request_rec *r, char *host) ++{ ++ char *dst; ++ int double_colon = 0; ++ ++ for (dst = host; *dst; dst++) { ++ if (apr_isxdigit(*dst)) { ++ if (apr_isupper(*dst)) { ++ *dst = apr_tolower(*dst); ++ } ++ } ++ else if (*dst == ':') { ++ if (*(dst + 1) == ':') { ++ if (double_colon) ++ return APR_EINVAL; ++ double_colon = 1; ++ } ++ else if (*(dst + 1) == '.') { ++ return APR_EINVAL; ++ } ++ } ++ else if (*dst == '.') { ++ /* For IPv4-mapped IPv6 addresses like ::FFFF:129.144.52.38 */ ++ if (*(dst + 1) == ':' || *(dst + 1) == '.') ++ return APR_EINVAL; ++ } ++ else { ++ return APR_EINVAL; ++ } ++ } ++ return APR_SUCCESS; ++} ++ ++static apr_status_t fix_hostname_non_v6(request_rec *r, char *host) ++{ ++ char *dst; ++ ++ for (dst = host; *dst; dst++) { ++ if (apr_islower(*dst)) { ++ /* leave char unchanged */ ++ } ++ else if (*dst == '.') { ++ if (*(dst + 1) == '.') { ++ return APR_EINVAL; ++ } ++ } ++ else if (apr_isupper(*dst)) { ++ *dst = apr_tolower(*dst); ++ } ++ else if (*dst == '/' || *dst == '\\') { ++ return APR_EINVAL; ++ } ++ } ++ /* strip trailing gubbins */ ++ if (dst > host && dst[-1] == '.') { ++ dst[-1] = '\0'; ++ } ++ return APR_SUCCESS; ++} ++ ++/* ++ * If strict mode ever becomes the default, this should be folded into ++ * fix_hostname_non_v6() ++ */ ++static apr_status_t strict_hostname_check(request_rec *r, char *host) ++{ ++ char *ch; ++ int is_dotted_decimal = 1, leading_zeroes = 0, dots = 0; ++ ++ for (ch = host; *ch; ch++) { ++ if (!apr_isascii(*ch)) { ++ goto bad; ++ } ++ else if (apr_isalpha(*ch) || *ch == '-') { ++ is_dotted_decimal = 0; ++ } ++ else if (ch[0] == '.') { ++ dots++; ++ if (ch[1] == '0' && apr_isdigit(ch[2])) ++ leading_zeroes = 1; ++ } ++ else if (!apr_isdigit(*ch)) { ++ /* also takes care of multiple Host headers by denying commas */ ++ goto bad; ++ } ++ } ++ if (is_dotted_decimal) { ++ if (host[0] == '.' || (host[0] == '0' && apr_isdigit(host[1]))) ++ leading_zeroes = 1; ++ if (leading_zeroes || dots != 3) { ++ /* RFC 3986 7.4 */ ++ goto bad; ++ } ++ } ++ else { ++ /* The top-level domain must start with a letter (RFC 1123 2.1) */ ++ while (ch > host && *ch != '.') ++ ch--; ++ if (ch[0] == '.' && ch[1] != '\0' && !apr_isalpha(ch[1])) ++ goto bad; ++ } ++ return APR_SUCCESS; ++ ++bad: ++ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02415) ++ "[strict] Invalid host name '%s'%s%.6s", ++ host, *ch ? ", problem near: " : "", ch); ++ return APR_EINVAL; ++} ++ + /* Lowercase and remove any trailing dot and/or :port from the hostname, + * and check that it is sane. + * +@@ -698,79 +808,90 @@ + * Instead we just check for filesystem metacharacters: directory + * separators / and \ and sequences of more than one dot. + */ +-static void fix_hostname(request_rec *r) ++static int fix_hostname(request_rec *r, const char *host_header, ++ unsigned http_conformance) + { ++ const char *src; + char *host, *scope_id; +- char *dst; + apr_port_t port; + apr_status_t rv; + const char *c; ++ int is_v6literal = 0; ++ int strict = (http_conformance != AP_HTTP_CONFORMANCE_UNSAFE); + +- /* According to RFC 2616, Host header field CAN be blank. */ +- if (!*r->hostname) { +- return; ++ src = host_header ? host_header : r->hostname; ++ ++ /* According to RFC 2616, Host header field CAN be blank */ ++ if (!*src) { ++ return is_v6literal; + } + + /* apr_parse_addr_port will interpret a bare integer as a port + * which is incorrect in this context. So treat it separately. + */ +- for (c = r->hostname; apr_isdigit(*c); ++c); +- if (!*c) { /* pure integer */ +- return; ++ for (c = src; apr_isdigit(*c); ++c); ++ if (!*c) { ++ /* pure integer */ ++ if (strict) { ++ /* RFC 3986 7.4 */ ++ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02416) ++ "[strict] purely numeric host names not allowed: %s", ++ src); ++ goto bad_nolog; ++ } ++ r->hostname = src; ++ return is_v6literal; ++ } ++ ++ if (host_header) { ++ rv = apr_parse_addr_port(&host, &scope_id, &port, src, r->pool); ++ if (rv != APR_SUCCESS || scope_id) ++ goto bad; ++ if (port) { ++ /* Don't throw the Host: header's port number away: ++ save it in parsed_uri -- ap_get_server_port() needs it! */ ++ /* @@@ XXX there should be a better way to pass the port. ++ * Like r->hostname, there should be a r->portno ++ */ ++ r->parsed_uri.port = port; ++ r->parsed_uri.port_str = apr_itoa(r->pool, (int)port); ++ } ++ if (host_header[0] == '[') ++ is_v6literal = 1; ++ } ++ else { ++ /* ++ * Already parsed, surrounding [ ] (if IPv6 literal) and :port have ++ * already been removed. ++ */ ++ host = apr_pstrdup(r->pool, r->hostname); ++ if (ap_strchr(host, ':') != NULL) ++ is_v6literal = 1; + } + +- rv = apr_parse_addr_port(&host, &scope_id, &port, r->hostname, r->pool); +- if (rv != APR_SUCCESS || scope_id) { +- goto bad; ++ if (is_v6literal) { ++ rv = fix_hostname_v6_literal(r, host); + } +- +- if (port) { +- /* Don't throw the Host: header's port number away: +- save it in parsed_uri -- ap_get_server_port() needs it! */ +- /* @@@ XXX there should be a better way to pass the port. +- * Like r->hostname, there should be a r->portno +- */ +- r->parsed_uri.port = port; +- r->parsed_uri.port_str = apr_itoa(r->pool, (int)port); ++ else { ++ rv = fix_hostname_non_v6(r, host); ++ if (strict && rv == APR_SUCCESS) ++ rv = strict_hostname_check(r, host); + } ++ if (rv != APR_SUCCESS) ++ goto bad; + +- /* if the hostname is an IPv6 numeric address string, it was validated +- * already; otherwise, further validation is needed +- */ +- if (r->hostname[0] != '[') { +- for (dst = host; *dst; dst++) { +- if (apr_islower(*dst)) { +- /* leave char unchanged */ +- } +- else if (*dst == '.') { +- if (*(dst + 1) == '.') { +- goto bad; +- } +- } +- else if (apr_isupper(*dst)) { +- *dst = apr_tolower(*dst); +- } +- else if (*dst == '/' || *dst == '\\') { +- goto bad; +- } +- } +- /* strip trailing gubbins */ +- if (dst > host && dst[-1] == '.') { +- dst[-1] = '\0'; +- } +- } + r->hostname = host; +- return; ++ return is_v6literal; + + bad: +- r->status = HTTP_BAD_REQUEST; + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00550) + "Client sent malformed Host header: %s", +- r->hostname); +- return; ++ src); ++bad_nolog: ++ r->status = HTTP_BAD_REQUEST; ++ return is_v6literal; + } + +- + /* return 1 if host matches ServerName or ServerAliases */ + static int matches_aliases(server_rec *s, const char *host) + { +@@ -980,15 +1101,76 @@ + } + } + ++static APR_INLINE const char *construct_host_header(request_rec *r, ++ int is_v6literal) ++{ ++ struct iovec iov[5]; ++ apr_size_t nvec = 0; ++ /* ++ * We cannot use ap_get_server_name/port here, because we must ++ * ignore UseCanonicalName/Port. ++ */ ++ if (is_v6literal) { ++ iov[nvec].iov_base = "["; ++ iov[nvec].iov_len = 1; ++ nvec++; ++ } ++ iov[nvec].iov_base = (void *)r->hostname; ++ iov[nvec].iov_len = strlen(r->hostname); ++ nvec++; ++ if (is_v6literal) { ++ iov[nvec].iov_base = "]"; ++ iov[nvec].iov_len = 1; ++ nvec++; ++ } ++ if (r->parsed_uri.port_str) { ++ iov[nvec].iov_base = ":"; ++ iov[nvec].iov_len = 1; ++ nvec++; ++ iov[nvec].iov_base = r->parsed_uri.port_str; ++ iov[nvec].iov_len = strlen(r->parsed_uri.port_str); ++ nvec++; ++ } ++ return apr_pstrcatv(r->pool, iov, nvec, NULL); ++} + + AP_DECLARE(void) ap_update_vhost_from_headers(request_rec *r) + { +- /* must set this for HTTP/1.1 support */ +- if (r->hostname || (r->hostname = apr_table_get(r->headers_in, "Host"))) { +- fix_hostname(r); +- if (r->status != HTTP_OK) +- return; ++ core_server_config *conf = ap_get_core_module_config(r->server->module_config); ++ const char *host_header = apr_table_get(r->headers_in, "Host"); ++ int is_v6literal = 0; ++ int have_hostname_from_url = 0; ++ ++ if (r->hostname) { ++ /* ++ * If there was a host part in the Request-URI, ignore the 'Host' ++ * header. ++ */ ++ have_hostname_from_url = 1; ++ is_v6literal = fix_hostname(r, NULL, conf->http_conformance); ++ } ++ else if (host_header != NULL) { ++ is_v6literal = fix_hostname(r, host_header, conf->http_conformance); ++ } ++ if (r->status != HTTP_OK) ++ return; ++ ++ if (conf->http_conformance != AP_HTTP_CONFORMANCE_UNSAFE) { ++ /* ++ * If we have both hostname from an absoluteURI and a Host header, ++ * we must ignore the Host header (RFC 2616 5.2). ++ * To enforce this, we reset the Host header to the value from the ++ * request line. ++ */ ++ if (have_hostname_from_url && host_header != NULL) { ++ const char *repl = construct_host_header(r, is_v6literal); ++ apr_table_set(r->headers_in, "Host", repl); ++ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02417) ++ "Replacing host header '%s' with host '%s' given " ++ "in the request uri", host_header, repl); ++ } + } ++ + /* check if we tucked away a name_chain */ + if (r->connection->vhost_lookup_data) { + if (r->hostname) diff --git a/SOURCES/httpd-2.4.6-CVE-2017-15710.patch b/SOURCES/httpd-2.4.6-CVE-2017-15710.patch new file mode 100644 index 0000000..d8f02c9 --- /dev/null +++ b/SOURCES/httpd-2.4.6-CVE-2017-15710.patch @@ -0,0 +1,19 @@ +--- a/modules/aaa/mod_authnz_ldap.c 2018/02/15 17:33:04 1824335 ++++ b/modules/aaa/mod_authnz_ldap.c 2018/02/15 17:42:14 1824336 +@@ -126,9 +126,13 @@ + + charset = (char*) apr_hash_get(charset_conversions, language, APR_HASH_KEY_STRING); + +- if (!charset) { +- language[2] = '\0'; +- charset = (char*) apr_hash_get(charset_conversions, language, APR_HASH_KEY_STRING); ++ /* ++ * Test if language values like 'en-US' return a match from the charset ++ * conversion map when shortened to 'en'. ++ */ ++ if (!charset && strlen(language) > 3 && language[2] == '-') { ++ char *language_short = apr_pstrndup(p, language, 2); ++ charset = (char*) apr_hash_get(charset_conversions, language_short, APR_HASH_KEY_STRING); + } + + if (charset) { diff --git a/SOURCES/httpd-2.4.6-CVE-2017-3167.patch b/SOURCES/httpd-2.4.6-CVE-2017-3167.patch new file mode 100644 index 0000000..3272598 --- /dev/null +++ b/SOURCES/httpd-2.4.6-CVE-2017-3167.patch @@ -0,0 +1,343 @@ +diff --git a/include/http_protocol.h b/include/http_protocol.h +index 5ac0ce3..f3a5137 100644 +--- a/include/http_protocol.h ++++ b/include/http_protocol.h +@@ -558,7 +558,11 @@ AP_DECLARE(void) ap_note_digest_auth_failure(request_rec *r); + AP_DECLARE_HOOK(int, note_auth_failure, (request_rec *r, const char *auth_type)) + + /** +- * Get the password from the request headers ++ * Get the password from the request headers. This function has multiple side ++ * effects due to its prior use in the old authentication framework. ++ * ap_get_basic_auth_components() should be preferred. ++ * ++ * @deprecated @see ap_get_basic_auth_components + * @param r The current request + * @param pw The password as set in the headers + * @return 0 (OK) if it set the 'pw' argument (and assured +@@ -571,6 +575,25 @@ AP_DECLARE_HOOK(int, note_auth_failure, (request_rec *r, const char *auth_type)) + */ + AP_DECLARE(int) ap_get_basic_auth_pw(request_rec *r, const char **pw); + ++#define AP_GET_BASIC_AUTH_PW_NOTE "AP_GET_BASIC_AUTH_PW_NOTE" ++ ++/** ++ * Get the username and/or password from the request's Basic authentication ++ * headers. Unlike ap_get_basic_auth_pw(), calling this function has no side ++ * effects on the passed request_rec. ++ * ++ * @param r The current request ++ * @param username If not NULL, set to the username sent by the client ++ * @param password If not NULL, set to the password sent by the client ++ * @return APR_SUCCESS if the credentials were successfully parsed and returned; ++ * APR_EINVAL if there was no authentication header sent or if the ++ * client was not using the Basic authentication scheme. username and ++ * password are unchanged on failure. ++ */ ++AP_DECLARE(apr_status_t) ap_get_basic_auth_components(const request_rec *r, ++ const char **username, ++ const char **password); ++ + /** + * parse_uri: break apart the uri + * @warning Side Effects: +diff --git a/include/httpd.h b/include/httpd.h +index 652a212..176ef5e 100644 +--- a/include/httpd.h ++++ b/include/httpd.h +@@ -2272,6 +2272,34 @@ AP_DECLARE(char *) ap_get_exec_line(apr_pool_t *p, + + #define AP_NORESTART APR_OS_START_USEERR + 1 + ++/** ++ * Perform a case-insensitive comparison of two strings @a atr1 and @a atr2, ++ * treating upper and lower case values of the 26 standard C/POSIX alphabetic ++ * characters as equivalent. Extended latin characters outside of this set ++ * are treated as unique octets, irrespective of the current locale. ++ * ++ * Returns in integer greater than, equal to, or less than 0, ++ * according to whether @a str1 is considered greater than, equal to, ++ * or less than @a str2. ++ * ++ * @note Same code as apr_cstr_casecmp, which arrives in APR 1.6 ++ */ ++AP_DECLARE(int) ap_cstr_casecmp(const char *s1, const char *s2); ++ ++/** ++ * Perform a case-insensitive comparison of two strings @a atr1 and @a atr2, ++ * treating upper and lower case values of the 26 standard C/POSIX alphabetic ++ * characters as equivalent. Extended latin characters outside of this set ++ * are treated as unique octets, irrespective of the current locale. ++ * ++ * Returns in integer greater than, equal to, or less than 0, ++ * according to whether @a str1 is considered greater than, equal to, ++ * or less than @a str2. ++ * ++ * @note Same code as apr_cstr_casecmpn, which arrives in APR 1.6 ++ */ ++AP_DECLARE(int) ap_cstr_casecmpn(const char *s1, const char *s2, apr_size_t n); ++ + #ifdef __cplusplus + } + #endif +diff --git a/server/protocol.c b/server/protocol.c +index 24355c7..868c3e3 100644 +--- a/server/protocol.c ++++ b/server/protocol.c +@@ -1567,6 +1567,7 @@ AP_DECLARE(int) ap_get_basic_auth_pw(request_rec *r, const char **pw) + + t = ap_pbase64decode(r->pool, auth_line); + r->user = ap_getword_nulls (r->pool, &t, ':'); ++ apr_table_setn(r->notes, AP_GET_BASIC_AUTH_PW_NOTE, "1"); + r->ap_auth_type = "Basic"; + + *pw = t; +@@ -1574,6 +1575,53 @@ AP_DECLARE(int) ap_get_basic_auth_pw(request_rec *r, const char **pw) + return OK; + } + ++AP_DECLARE(apr_status_t) ap_get_basic_auth_components(const request_rec *r, ++ const char **username, ++ const char **password) ++{ ++ const char *auth_header; ++ const char *credentials; ++ const char *decoded; ++ const char *user; ++ ++ auth_header = (PROXYREQ_PROXY == r->proxyreq) ? "Proxy-Authorization" ++ : "Authorization"; ++ credentials = apr_table_get(r->headers_in, auth_header); ++ ++ if (!credentials) { ++ /* No auth header. */ ++ return APR_EINVAL; ++ } ++ ++ if (ap_cstr_casecmp(ap_getword(r->pool, &credentials, ' '), "Basic")) { ++ /* These aren't Basic credentials. */ ++ return APR_EINVAL; ++ } ++ ++ while (*credentials == ' ' || *credentials == '\t') { ++ credentials++; ++ } ++ ++ /* XXX Our base64 decoding functions don't actually error out if the string ++ * we give it isn't base64; they'll just silently stop and hand us whatever ++ * they've parsed up to that point. ++ * ++ * Since this function is supposed to be a drop-in replacement for the ++ * deprecated ap_get_basic_auth_pw(), don't fix this for 2.4.x. ++ */ ++ decoded = ap_pbase64decode(r->pool, credentials); ++ user = ap_getword_nulls(r->pool, &decoded, ':'); ++ ++ if (username) { ++ *username = user; ++ } ++ if (password) { ++ *password = decoded; ++ } ++ ++ return APR_SUCCESS; ++} ++ + struct content_length_ctx { + int data_sent; /* true if the C-L filter has already sent at + * least one bucket on to the next output filter +diff --git a/server/request.c b/server/request.c +index 2711bed..4eef097 100644 +--- a/server/request.c ++++ b/server/request.c +@@ -124,6 +124,8 @@ static int decl_die(int status, const char *phase, request_rec *r) + AP_DECLARE(int) ap_some_authn_required(request_rec *r) + { + int access_status; ++ char *olduser = r->user; ++ int rv = FALSE; + + switch (ap_satisfies(r)) { + case SATISFY_ALL: +@@ -134,7 +136,7 @@ AP_DECLARE(int) ap_some_authn_required(request_rec *r) + + access_status = ap_run_access_checker_ex(r); + if (access_status == DECLINED) { +- return TRUE; ++ rv = TRUE; + } + + break; +@@ -145,13 +147,14 @@ AP_DECLARE(int) ap_some_authn_required(request_rec *r) + + access_status = ap_run_access_checker_ex(r); + if (access_status == DECLINED) { +- return TRUE; ++ rv = TRUE; + } + + break; + } + +- return FALSE; ++ r->user = olduser; ++ return rv; + } + + /* This is the master logic for processing requests. Do NOT duplicate +@@ -259,6 +262,14 @@ AP_DECLARE(int) ap_process_request_internal(request_rec *r) + r->ap_auth_type = r->main->ap_auth_type; + } + else { ++ /* A module using a confusing API (ap_get_basic_auth_pw) caused ++ ** r->user to be filled out prior to check_authn hook. We treat ++ ** it is inadvertent. ++ */ ++ if (r->user && apr_table_get(r->notes, AP_GET_BASIC_AUTH_PW_NOTE)) { ++ r->user = NULL; ++ } ++ + switch (ap_satisfies(r)) { + case SATISFY_ALL: + case SATISFY_NOSPEC: +diff --git a/server/util.c b/server/util.c +index db22b50..70fd662 100644 +--- a/server/util.c ++++ b/server/util.c +@@ -96,7 +96,6 @@ + #undef APLOG_MODULE_INDEX + #define APLOG_MODULE_INDEX AP_CORE_MODULE_INDEX + +- + /* + * Examine a field value (such as a media-/content-type) string and return + * it sans any parameters; e.g., strip off any ';charset=foo' and the like. +@@ -3036,3 +3035,128 @@ AP_DECLARE(char *) ap_get_exec_line(apr_pool_t *p, + + return apr_pstrndup(p, buf, k); + } ++ ++#if !APR_CHARSET_EBCDIC ++/* ++ * Our own known-fast translation table for casecmp by character. ++ * Only ASCII alpha characters 41-5A are folded to 61-7A, other ++ * octets (such as extended latin alphabetics) are never case-folded. ++ * NOTE: Other than Alpha A-Z/a-z, each code point is unique! ++*/ ++static const short ucharmap[] = { ++ 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, ++ 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, ++ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, ++ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, ++ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, ++ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, ++ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, ++ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, ++ 0x40, 'a', 'b', 'c', 'd', 'e', 'f', 'g', ++ 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', ++ 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', ++ 'x', 'y', 'z', 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, ++ 0x60, 'a', 'b', 'c', 'd', 'e', 'f', 'g', ++ 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', ++ 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', ++ 'x', 'y', 'z', 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, ++ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, ++ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, ++ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, ++ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, ++ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, ++ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, ++ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, ++ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, ++ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, ++ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, ++ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, ++ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, ++ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, ++ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, ++ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, ++ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff ++}; ++#else /* APR_CHARSET_EBCDIC */ ++/* ++ * Derived from apr-iconv/ccs/cp037.c for EBCDIC case comparison, ++ * provides unique identity of every char value (strict ISO-646 ++ * conformance, arbitrary election of an ISO-8859-1 ordering, and ++ * very arbitrary control code assignments into C1 to achieve ++ * identity and a reversible mapping of code points), ++ * then folding the equivalences of ASCII 41-5A into 61-7A, ++ * presenting comparison results in a somewhat ISO/IEC 10646 ++ * (ASCII-like) order, depending on the EBCDIC code page in use. ++ * ++ * NOTE: Other than Alpha A-Z/a-z, each code point is unique! ++ */ ++static const short ucharmap[] = { ++ 0x00, 0x01, 0x02, 0x03, 0x9C, 0x09, 0x86, 0x7F, ++ 0x97, 0x8D, 0x8E, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, ++ 0x10, 0x11, 0x12, 0x13, 0x9D, 0x85, 0x08, 0x87, ++ 0x18, 0x19, 0x92, 0x8F, 0x1C, 0x1D, 0x1E, 0x1F, ++ 0x80, 0x81, 0x82, 0x83, 0x84, 0x0A, 0x17, 0x1B, ++ 0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x05, 0x06, 0x07, ++ 0x90, 0x91, 0x16, 0x93, 0x94, 0x95, 0x96, 0x04, ++ 0x98, 0x99, 0x9A, 0x9B, 0x14, 0x15, 0x9E, 0x1A, ++ 0x20, 0xA0, 0xE2, 0xE4, 0xE0, 0xE1, 0xE3, 0xE5, ++ 0xE7, 0xF1, 0xA2, 0x2E, 0x3C, 0x28, 0x2B, 0x7C, ++ 0x26, 0xE9, 0xEA, 0xEB, 0xE8, 0xED, 0xEE, 0xEF, ++ 0xEC, 0xDF, 0x21, 0x24, 0x2A, 0x29, 0x3B, 0xAC, ++ 0x2D, 0x2F, 0xC2, 0xC4, 0xC0, 0xC1, 0xC3, 0xC5, ++ 0xC7, 0xD1, 0xA6, 0x2C, 0x25, 0x5F, 0x3E, 0x3F, ++ 0xF8, 0xC9, 0xCA, 0xCB, 0xC8, 0xCD, 0xCE, 0xCF, ++ 0xCC, 0x60, 0x3A, 0x23, 0x40, 0x27, 0x3D, 0x22, ++ 0xD8, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, ++ 0x68, 0x69, 0xAB, 0xBB, 0xF0, 0xFD, 0xFE, 0xB1, ++ 0xB0, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70, ++ 0x71, 0x72, 0xAA, 0xBA, 0xE6, 0xB8, 0xC6, 0xA4, ++ 0xB5, 0x7E, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, ++ 0x79, 0x7A, 0xA1, 0xBF, 0xD0, 0xDD, 0xDE, 0xAE, ++ 0x5E, 0xA3, 0xA5, 0xB7, 0xA9, 0xA7, 0xB6, 0xBC, ++ 0xBD, 0xBE, 0x5B, 0x5D, 0xAF, 0xA8, 0xB4, 0xD7, ++ 0x7B, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, ++ 0x68, 0x69, 0xAD, 0xF4, 0xF6, 0xF2, 0xF3, 0xF5, ++ 0x7D, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70, ++ 0x71, 0x72, 0xB9, 0xFB, 0xFC, 0xF9, 0xFA, 0xFF, ++ 0x5C, 0xF7, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, ++ 0x79, 0x7A, 0xB2, 0xD4, 0xD6, 0xD2, 0xD3, 0xD5, ++ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, ++ 0x38, 0x39, 0xB3, 0xDB, 0xDC, 0xD9, 0xDA, 0x9F ++}; ++#endif ++ ++AP_DECLARE(int) ap_cstr_casecmp(const char *s1, const char *s2) ++{ ++ const unsigned char *str1 = (const unsigned char *)s1; ++ const unsigned char *str2 = (const unsigned char *)s2; ++ for (;;) ++ { ++ const int c1 = (int)(*str1); ++ const int c2 = (int)(*str2); ++ const int cmp = ucharmap[c1] - ucharmap[c2]; ++ /* Not necessary to test for !c2, this is caught by cmp */ ++ if (cmp || !c1) ++ return cmp; ++ str1++; ++ str2++; ++ } ++} ++ ++AP_DECLARE(int) ap_cstr_casecmpn(const char *s1, const char *s2, apr_size_t n) ++{ ++ const unsigned char *str1 = (const unsigned char *)s1; ++ const unsigned char *str2 = (const unsigned char *)s2; ++ while (n--) ++ { ++ const int c1 = (int)(*str1); ++ const int c2 = (int)(*str2); ++ const int cmp = ucharmap[c1] - ucharmap[c2]; ++ /* Not necessary to test for !c2, this is caught by cmp */ ++ if (cmp || !c1) ++ return cmp; ++ str1++; ++ str2++; ++ } ++ return 0; ++} diff --git a/SOURCES/httpd-2.4.6-CVE-2017-3169.patch b/SOURCES/httpd-2.4.6-CVE-2017-3169.patch new file mode 100644 index 0000000..36e2611 --- /dev/null +++ b/SOURCES/httpd-2.4.6-CVE-2017-3169.patch @@ -0,0 +1,64 @@ +diff --git a/modules/ssl/ssl_engine_io.c b/modules/ssl/ssl_engine_io.c +index 85c6ce7..4a9fc9a 100644 +--- a/modules/ssl/ssl_engine_io.c ++++ b/modules/ssl/ssl_engine_io.c +@@ -834,19 +834,20 @@ static apr_status_t ssl_filter_write(ap_filter_t *f, + * establish an outgoing SSL connection. */ + #define MODSSL_ERROR_BAD_GATEWAY (APR_OS_START_USERERR + 1) + +-static void ssl_io_filter_disable(SSLConnRec *sslconn, ap_filter_t *f) ++static void ssl_io_filter_disable(SSLConnRec *sslconn, ++ bio_filter_in_ctx_t *inctx) + { +- bio_filter_in_ctx_t *inctx = f->ctx; + SSL_free(inctx->ssl); + sslconn->ssl = NULL; + inctx->ssl = NULL; + inctx->filter_ctx->pssl = NULL; + } + +-static apr_status_t ssl_io_filter_error(ap_filter_t *f, ++static apr_status_t ssl_io_filter_error(bio_filter_in_ctx_t *inctx, + apr_bucket_brigade *bb, + apr_status_t status) + { ++ ap_filter_t *f = inctx->f; + SSLConnRec *sslconn = myConnConfig(f->c); + apr_bucket *bucket; + int send_eos = 1; +@@ -860,7 +861,7 @@ static apr_status_t ssl_io_filter_error(ap_filter_t *f, + ssl_log_ssl_error(SSLLOG_MARK, APLOG_INFO, sslconn->server); + + sslconn->non_ssl_request = NON_SSL_SEND_HDR_SEP; +- ssl_io_filter_disable(sslconn, f); ++ ssl_io_filter_disable(sslconn, inctx); + + /* fake the request line */ + bucket = HTTP_ON_HTTPS_PORT_BUCKET(f->c->bucket_alloc); +@@ -1342,7 +1343,7 @@ static apr_status_t ssl_io_filter_input(ap_filter_t *f, + * rather than have SSLEngine On configured. + */ + if ((status = ssl_io_filter_handshake(inctx->filter_ctx)) != APR_SUCCESS) { +- return ssl_io_filter_error(f, bb, status); ++ return ssl_io_filter_error(inctx, bb, status); + } + + if (is_init) { +@@ -1396,7 +1397,7 @@ static apr_status_t ssl_io_filter_input(ap_filter_t *f, + + /* Handle custom errors. */ + if (status != APR_SUCCESS) { +- return ssl_io_filter_error(f, bb, status); ++ return ssl_io_filter_error(inctx, bb, status); + } + + /* Create a transient bucket out of the decrypted data. */ +@@ -1613,7 +1614,7 @@ static apr_status_t ssl_io_filter_output(ap_filter_t *f, + inctx->block = APR_BLOCK_READ; + + if ((status = ssl_io_filter_handshake(filter_ctx)) != APR_SUCCESS) { +- return ssl_io_filter_error(f, bb, status); ++ return ssl_io_filter_error(inctx, bb, status); + } + + while (!APR_BRIGADE_EMPTY(bb)) { diff --git a/SOURCES/httpd-2.4.6-CVE-2017-7668.patch b/SOURCES/httpd-2.4.6-CVE-2017-7668.patch new file mode 100644 index 0000000..8dd73e5 --- /dev/null +++ b/SOURCES/httpd-2.4.6-CVE-2017-7668.patch @@ -0,0 +1,15 @@ +--- a/server/util.c 2017/05/30 12:27:41 1796855 ++++ b/server/util.c 2017/05/30 12:28:20 1796856 +@@ -1679,10 +1679,8 @@ + + s = (const unsigned char *)line; + for (;;) { +- /* find start of token, skip all stop characters, note NUL +- * isn't a token stop, so we don't need to test for it +- */ +- while (TEST_CHAR(*s, T_HTTP_TOKEN_STOP)) { ++ /* find start of token, skip all stop characters */ ++ while (*s && TEST_CHAR(*s, T_HTTP_TOKEN_STOP)) { + ++s; + } + if (!*s) { diff --git a/SOURCES/httpd-2.4.6-CVE-2017-7679.patch b/SOURCES/httpd-2.4.6-CVE-2017-7679.patch new file mode 100644 index 0000000..a68d3f6 --- /dev/null +++ b/SOURCES/httpd-2.4.6-CVE-2017-7679.patch @@ -0,0 +1,14 @@ +--- a/modules/http/mod_mime.c 2017/06/05 12:10:05 1797652 ++++ b/modules/http/mod_mime.c 2017/06/05 12:12:31 1797653 +@@ -528,9 +528,9 @@ + int res = -1; + int c; + +- if (((s + 1) != NULL) && (*s == '\\')) { ++ if (*s == '\\') { + c = (int) *(s + 1); +- if (apr_isascii(c)) { ++ if (c && apr_isascii(c)) { + res = 1; + } + } diff --git a/SOURCES/httpd-2.4.6-CVE-2017-9788.patch b/SOURCES/httpd-2.4.6-CVE-2017-9788.patch new file mode 100644 index 0000000..d1a3480 --- /dev/null +++ b/SOURCES/httpd-2.4.6-CVE-2017-9788.patch @@ -0,0 +1,29 @@ +diff --git a/modules/aaa/mod_auth_digest.c b/modules/aaa/mod_auth_digest.c +index 0ff47f7..cbb4434 100644 +--- a/modules/aaa/mod_auth_digest.c ++++ b/modules/aaa/mod_auth_digest.c +@@ -956,13 +956,13 @@ static int get_digest_rec(request_rec *r, digest_header_rec *resp) + + /* find value */ + ++ vv = 0; + if (auth_line[0] == '=') { + auth_line++; + while (apr_isspace(auth_line[0])) { + auth_line++; + } + +- vv = 0; + if (auth_line[0] == '\"') { /* quoted string */ + auth_line++; + while (auth_line[0] != '\"' && auth_line[0] != '\0') { +@@ -981,8 +981,8 @@ static int get_digest_rec(request_rec *r, digest_header_rec *resp) + value[vv++] = *auth_line++; + } + } +- value[vv] = '\0'; + } ++ value[vv] = '\0'; + + while (auth_line[0] != ',' && auth_line[0] != '\0') { + auth_line++; diff --git a/SOURCES/httpd-2.4.6-CVE-2017-9798.patch b/SOURCES/httpd-2.4.6-CVE-2017-9798.patch new file mode 100644 index 0000000..b615488 --- /dev/null +++ b/SOURCES/httpd-2.4.6-CVE-2017-9798.patch @@ -0,0 +1,17 @@ +diff --git a/server/core.c b/server/core.c +index f60e8fa..245fcb6 100644 +--- a/server/core.c ++++ b/server/core.c +@@ -2061,6 +2061,12 @@ AP_CORE_DECLARE_NONSTD(const char *) ap_limit_section(cmd_parms *cmd, + /* method has not been registered yet, but resorce restriction + * is always checked before method handling, so register it. + */ ++ if (cmd->pool == cmd->temp_pool) { ++ /* In .htaccess, we can't globally register new methods. */ ++ return apr_psprintf(cmd->pool, "Could not register method '%s' " ++ "for %s from .htaccess configuration", ++ method, cmd->cmd->name); ++ } + methnum = ap_method_register(cmd->pool, + apr_pstrdup(cmd->pool, method)); + } diff --git a/SOURCES/httpd-2.4.6-CVE-2018-1301.patch b/SOURCES/httpd-2.4.6-CVE-2018-1301.patch new file mode 100644 index 0000000..e03a444 --- /dev/null +++ b/SOURCES/httpd-2.4.6-CVE-2018-1301.patch @@ -0,0 +1,198 @@ +diff --git a/server/protocol.c b/server/protocol.c +index 9e23325..8428129 100644 +--- a/server/protocol.c ++++ b/server/protocol.c +@@ -222,6 +222,12 @@ AP_DECLARE(apr_status_t) ap_rgetline_core(char **s, apr_size_t n, + int fold = flags & AP_GETLINE_FOLD; + int crlf = flags & AP_GETLINE_CRLF; + ++ if (!n) { ++ /* Needs room for NUL byte at least */ ++ *read = 0; ++ return APR_BADARG; ++ } ++ + /* + * Initialize last_char as otherwise a random value will be compared + * against APR_ASCII_LF at the end of the loop if bb only contains +@@ -235,14 +241,15 @@ AP_DECLARE(apr_status_t) ap_rgetline_core(char **s, apr_size_t n, + rv = ap_get_brigade(r->proto_input_filters, bb, AP_MODE_GETLINE, + APR_BLOCK_READ, 0); + if (rv != APR_SUCCESS) { +- return rv; ++ goto cleanup; + } + + /* Something horribly wrong happened. Someone didn't block! + * (this also happens at the end of each keepalive connection) + */ + if (APR_BRIGADE_EMPTY(bb)) { +- return APR_EGENERAL; ++ rv = APR_EGENERAL; ++ goto cleanup; + } + + for (e = APR_BRIGADE_FIRST(bb); +@@ -260,7 +267,7 @@ AP_DECLARE(apr_status_t) ap_rgetline_core(char **s, apr_size_t n, + + rv = apr_bucket_read(e, &str, &len, APR_BLOCK_READ); + if (rv != APR_SUCCESS) { +- return rv; ++ goto cleanup; + } + + if (len == 0) { +@@ -273,17 +280,8 @@ AP_DECLARE(apr_status_t) ap_rgetline_core(char **s, apr_size_t n, + + /* Would this overrun our buffer? If so, we'll die. */ + if (n < bytes_handled + len) { +- *read = bytes_handled; +- if (*s) { +- /* ensure this string is NUL terminated */ +- if (bytes_handled > 0) { +- (*s)[bytes_handled-1] = '\0'; +- } +- else { +- (*s)[0] = '\0'; +- } +- } +- return APR_ENOSPC; ++ rv = APR_ENOSPC; ++ goto cleanup; + } + + /* Do we have to handle the allocation ourselves? */ +@@ -291,7 +289,7 @@ AP_DECLARE(apr_status_t) ap_rgetline_core(char **s, apr_size_t n, + /* We'll assume the common case where one bucket is enough. */ + if (!*s) { + current_alloc = len; +- *s = apr_palloc(r->pool, current_alloc); ++ *s = apr_palloc(r->pool, current_alloc + 1); + } + else if (bytes_handled + len > current_alloc) { + /* Increase the buffer size */ +@@ -302,7 +300,7 @@ AP_DECLARE(apr_status_t) ap_rgetline_core(char **s, apr_size_t n, + new_size = (bytes_handled + len) * 2; + } + +- new_buffer = apr_palloc(r->pool, new_size); ++ new_buffer = apr_palloc(r->pool, new_size + 1); + + /* Copy what we already had. */ + memcpy(new_buffer, *s, bytes_handled); +@@ -326,19 +324,15 @@ AP_DECLARE(apr_status_t) ap_rgetline_core(char **s, apr_size_t n, + } + } + +- if (crlf && (last_char <= *s || last_char[-1] != APR_ASCII_CR)) { +- *last_char = '\0'; +- bytes_handled = last_char - *s; +- *read = bytes_handled; +- return APR_EINVAL; +- } +- +- /* Now NUL-terminate the string at the end of the line; ++ /* Now terminate the string at the end of the line; + * if the last-but-one character is a CR, terminate there */ + if (last_char > *s && last_char[-1] == APR_ASCII_CR) { + last_char--; + } +- *last_char = '\0'; ++ else if (crlf) { ++ rv = APR_EINVAL; ++ goto cleanup; ++ } + bytes_handled = last_char - *s; + + /* If we're folding, we have more work to do. +@@ -358,7 +352,7 @@ AP_DECLARE(apr_status_t) ap_rgetline_core(char **s, apr_size_t n, + rv = ap_get_brigade(r->proto_input_filters, bb, AP_MODE_SPECULATIVE, + APR_BLOCK_READ, 1); + if (rv != APR_SUCCESS) { +- return rv; ++ goto cleanup; + } + + if (APR_BRIGADE_EMPTY(bb)) { +@@ -375,7 +369,7 @@ AP_DECLARE(apr_status_t) ap_rgetline_core(char **s, apr_size_t n, + rv = apr_bucket_read(e, &str, &len, APR_BLOCK_READ); + if (rv != APR_SUCCESS) { + apr_brigade_cleanup(bb); +- return rv; ++ goto cleanup; + } + + /* Found one, so call ourselves again to get the next line. +@@ -392,10 +386,8 @@ AP_DECLARE(apr_status_t) ap_rgetline_core(char **s, apr_size_t n, + if (c == APR_ASCII_BLANK || c == APR_ASCII_TAB) { + /* Do we have enough space? We may be full now. */ + if (bytes_handled >= n) { +- *read = n; +- /* ensure this string is terminated */ +- (*s)[n-1] = '\0'; +- return APR_ENOSPC; ++ rv = APR_ENOSPC; ++ goto cleanup; + } + else { + apr_size_t next_size, next_len; +@@ -408,7 +400,6 @@ AP_DECLARE(apr_status_t) ap_rgetline_core(char **s, apr_size_t n, + tmp = NULL; + } + else { +- /* We're null terminated. */ + tmp = last_char; + } + +@@ -417,7 +408,7 @@ AP_DECLARE(apr_status_t) ap_rgetline_core(char **s, apr_size_t n, + rv = ap_rgetline_core(&tmp, next_size, + &next_len, r, 0, bb); + if (rv != APR_SUCCESS) { +- return rv; ++ goto cleanup; + } + + if (do_alloc && next_len > 0) { +@@ -431,7 +422,7 @@ AP_DECLARE(apr_status_t) ap_rgetline_core(char **s, apr_size_t n, + memcpy(new_buffer, *s, bytes_handled); + + /* copy the new line, including the trailing null */ +- memcpy(new_buffer + bytes_handled, tmp, next_len + 1); ++ memcpy(new_buffer + bytes_handled, tmp, next_len); + *s = new_buffer; + } + +@@ -444,8 +435,21 @@ AP_DECLARE(apr_status_t) ap_rgetline_core(char **s, apr_size_t n, + } + } + } ++ ++cleanup: ++ if (bytes_handled >= n) { ++ bytes_handled = n - 1; ++ } ++ if (*s) { ++ /* ensure the string is NUL terminated */ ++ (*s)[bytes_handled] = '\0'; ++ } + *read = bytes_handled; + ++ if (rv != APR_SUCCESS) { ++ return rv; ++ } ++ + /* PR#43039: We shouldn't accept NULL bytes within the line */ + if (strlen(*s) < bytes_handled) { + return APR_EINVAL; +@@ -484,6 +488,11 @@ AP_DECLARE(int) ap_getline(char *s, int n, request_rec *r, int flags) + apr_size_t len; + apr_bucket_brigade *tmp_bb; + ++ if (n < 1) { ++ /* Can't work since we always NUL terminate */ ++ return -1; ++ } ++ + tmp_bb = apr_brigade_create(r->pool, r->connection->bucket_alloc); + rv = ap_rgetline(&tmp_s, n, &len, r, flags, tmp_bb); + apr_brigade_destroy(tmp_bb); diff --git a/SOURCES/httpd-2.4.6-CVE-2018-1312.patch b/SOURCES/httpd-2.4.6-CVE-2018-1312.patch new file mode 100644 index 0000000..4c694a5 --- /dev/null +++ b/SOURCES/httpd-2.4.6-CVE-2018-1312.patch @@ -0,0 +1,399 @@ +diff --git a/modules/aaa/mod_auth_digest.c b/modules/aaa/mod_auth_digest.c +index cbb4434..b50bcf9 100644 +--- a/modules/aaa/mod_auth_digest.c ++++ b/modules/aaa/mod_auth_digest.c +@@ -26,20 +26,13 @@ + * reports to the Apache bug-database, or send them directly to me + * at ronald@innovation.ch. + * +- * Requires either /dev/random (or equivalent) or the truerand library, +- * available for instance from +- * ftp://research.att.com/dist/mab/librand.shar +- * + * Open Issues: + * - qop=auth-int (when streams and trailer support available) + * - nonce-format configurability + * - Proxy-Authorization-Info header is set by this module, but is + * currently ignored by mod_proxy (needs patch to mod_proxy) +- * - generating the secret takes a while (~ 8 seconds) if using the +- * truerand library + * - The source of the secret should be run-time directive (with server +- * scope: RSRC_CONF). However, that could be tricky when trying to +- * choose truerand vs. file... ++ * scope: RSRC_CONF) + * - shared-mem not completely tested yet. Seems to work ok for me, + * but... (definitely won't work on Windoze) + * - Sharing a realm among multiple servers has following problems: +@@ -52,6 +45,8 @@ + * captures a packet sent to one server and sends it to another + * one. Should we add "AuthDigestNcCheck Strict"? + * - expired nonces give amaya fits. ++ * - MD5-sess and auth-int are not yet implemented. An incomplete ++ * implementation has been removed and can be retrieved from svn history. + */ + + #include "apr_sha1.h" +@@ -94,7 +89,6 @@ typedef struct digest_config_struct { + apr_array_header_t *qop_list; + apr_sha1_ctx_t nonce_ctx; + apr_time_t nonce_lifetime; +- const char *nonce_format; + int check_nc; + const char *algorithm; + char *uri_list; +@@ -112,7 +106,8 @@ typedef struct digest_config_struct { + #define NONCE_HASH_LEN (2*APR_SHA1_DIGESTSIZE) + #define NONCE_LEN (int )(NONCE_TIME_LEN + NONCE_HASH_LEN) + +-#define SECRET_LEN 20 ++#define SECRET_LEN 20 ++#define RETAINED_DATA_ID "mod_auth_digest" + + + /* client list definitions */ +@@ -121,7 +116,6 @@ typedef struct hash_entry { + unsigned long key; /* the key for this entry */ + struct hash_entry *next; /* next entry in the bucket */ + unsigned long nonce_count; /* for nonce-count checking */ +- char ha1[2*APR_MD5_DIGESTSIZE+1]; /* for algorithm=MD5-sess */ + char last_nonce[NONCE_LEN+1]; /* for one-time nonce's */ + } client_entry; + +@@ -170,7 +164,7 @@ typedef union time_union { + unsigned char arr[sizeof(apr_time_t)]; + } time_rec; + +-static unsigned char secret[SECRET_LEN]; ++static unsigned char *secret; + + /* client-list, opaque, and one-time-nonce stuff */ + +@@ -228,35 +222,11 @@ static apr_status_t cleanup_tables(void *not_used) + return APR_SUCCESS; + } + +-static apr_status_t initialize_secret(server_rec *s) +-{ +- apr_status_t status; +- +- ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, s, APLOGNO(01757) +- "generating secret for digest authentication ..."); +- +-#if APR_HAS_RANDOM +- status = apr_generate_random_bytes(secret, sizeof(secret)); +-#else +-#error APR random number support is missing; you probably need to install the truerand library. +-#endif +- +- if (status != APR_SUCCESS) { +- ap_log_error(APLOG_MARK, APLOG_CRIT, status, s, APLOGNO(01758) +- "error generating secret"); +- return status; +- } +- +- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(01759) "done"); +- +- return APR_SUCCESS; +-} +- + static void log_error_and_cleanup(char *msg, apr_status_t sts, server_rec *s) + { + ap_log_error(APLOG_MARK, APLOG_ERR, sts, s, APLOGNO(01760) +- "%s - all nonce-count checking, one-time nonces, and " +- "MD5-sess algorithm disabled", msg); ++ "%s - all nonce-count checking and one-time nonces" ++ "disabled", msg); + + cleanup_tables(NULL); + } +@@ -377,16 +347,32 @@ static int initialize_tables(server_rec *s, apr_pool_t *ctx) + static int pre_init(apr_pool_t *pconf, apr_pool_t *plog, apr_pool_t *ptemp) + { + apr_status_t rv; ++ void *retained; + + rv = ap_mutex_register(pconf, client_mutex_type, NULL, APR_LOCK_DEFAULT, 0); +- if (rv == APR_SUCCESS) { +- rv = ap_mutex_register(pconf, opaque_mutex_type, NULL, APR_LOCK_DEFAULT, +- 0); +- } +- if (rv != APR_SUCCESS) { +- return rv; +- } ++ if (rv != APR_SUCCESS) ++ return !OK; ++ rv = ap_mutex_register(pconf, opaque_mutex_type, NULL, APR_LOCK_DEFAULT, 0); ++ if (rv != APR_SUCCESS) ++ return !OK; + ++ retained = ap_retained_data_get(RETAINED_DATA_ID); ++ if (retained == NULL) { ++ retained = ap_retained_data_create(RETAINED_DATA_ID, SECRET_LEN); ++ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, NULL, APLOGNO(01757) ++ "generating secret for digest authentication"); ++#if APR_HAS_RANDOM ++ rv = apr_generate_random_bytes(retained, SECRET_LEN); ++#else ++#error APR random number support is missing ++#endif ++ if (rv != APR_SUCCESS) { ++ ap_log_error(APLOG_MARK, APLOG_CRIT, rv, NULL, APLOGNO(01758) ++ "error generating secret"); ++ return !OK; ++ } ++ } ++ secret = retained; + return OK; + } + +@@ -399,10 +385,6 @@ static int initialize_module(apr_pool_t *p, apr_pool_t *plog, + if (ap_state_query(AP_SQ_MAIN_STATE) == AP_SQ_MS_CREATE_PRE_CONFIG) + return OK; + +- if (initialize_secret(s) != APR_SUCCESS) { +- return !OK; +- } +- + #if APR_HAS_SHARED_MEMORY + /* Note: this stuff is currently fixed for the lifetime of the server, + * i.e. even across restarts. This means that A) any shmem-size +@@ -483,6 +465,16 @@ static void *create_digest_dir_config(apr_pool_t *p, char *dir) + static const char *set_realm(cmd_parms *cmd, void *config, const char *realm) + { + digest_config_rec *conf = (digest_config_rec *) config; ++#ifdef AP_DEBUG ++ int i; ++ ++ /* check that we got random numbers */ ++ for (i = 0; i < SECRET_LEN; i++) { ++ if (secret[i] != 0) ++ break; ++ } ++ ap_assert(i < SECRET_LEN); ++#endif + + /* The core already handles the realm, but it's just too convenient to + * grab it ourselves too and cache some setups. However, we need to +@@ -496,7 +488,7 @@ static const char *set_realm(cmd_parms *cmd, void *config, const char *realm) + * and directives outside a virtual host section) + */ + apr_sha1_init(&conf->nonce_ctx); +- apr_sha1_update_binary(&conf->nonce_ctx, secret, sizeof(secret)); ++ apr_sha1_update_binary(&conf->nonce_ctx, secret, SECRET_LEN); + apr_sha1_update_binary(&conf->nonce_ctx, (const unsigned char *) realm, + strlen(realm)); + +@@ -590,8 +582,7 @@ static const char *set_nonce_lifetime(cmd_parms *cmd, void *config, + static const char *set_nonce_format(cmd_parms *cmd, void *config, + const char *fmt) + { +- ((digest_config_rec *) config)->nonce_format = fmt; +- return "AuthDigestNonceFormat is not implemented (yet)"; ++ return "AuthDigestNonceFormat is not implemented"; + } + + static const char *set_nc_check(cmd_parms *cmd, void *config, int flag) +@@ -612,7 +603,7 @@ static const char *set_algorithm(cmd_parms *cmd, void *config, const char *alg) + { + if (!strcasecmp(alg, "MD5-sess")) { + return "AuthDigestAlgorithm: ERROR: algorithm `MD5-sess' " +- "is not fully implemented"; ++ "is not implemented"; + } + else if (strcasecmp(alg, "MD5")) { + return apr_pstrcat(cmd->pool, "Invalid algorithm in AuthDigestAlgorithm: ", alg, NULL); +@@ -1138,7 +1129,7 @@ static const char *gen_nonce(apr_pool_t *p, apr_time_t now, const char *opaque, + static client_entry *gen_client(const request_rec *r) + { + unsigned long op; +- client_entry new_entry = { 0, NULL, 0, "", "" }, *entry; ++ client_entry new_entry = { 0, NULL, 0, "" }, *entry; + + if (!opaque_cntr) { + return NULL; +@@ -1158,92 +1149,6 @@ static client_entry *gen_client(const request_rec *r) + } + + +-/* +- * MD5-sess code. +- * +- * If you want to use algorithm=MD5-sess you must write get_userpw_hash() +- * yourself (see below). The dummy provided here just uses the hash from +- * the auth-file, i.e. it is only useful for testing client implementations +- * of MD5-sess . +- */ +- +-/* +- * get_userpw_hash() will be called each time a new session needs to be +- * generated and is expected to return the equivalent of +- * +- * h_urp = ap_md5(r->pool, +- * apr_pstrcat(r->pool, username, ":", ap_auth_name(r), ":", passwd)) +- * ap_md5(r->pool, +- * (unsigned char *) apr_pstrcat(r->pool, h_urp, ":", resp->nonce, ":", +- * resp->cnonce, NULL)); +- * +- * or put differently, it must return +- * +- * MD5(MD5(username ":" realm ":" password) ":" nonce ":" cnonce) +- * +- * If something goes wrong, the failure must be logged and NULL returned. +- * +- * You must implement this yourself, which will probably consist of code +- * contacting the password server with the necessary information (typically +- * the username, realm, nonce, and cnonce) and receiving the hash from it. +- * +- * TBD: This function should probably be in a separate source file so that +- * people need not modify mod_auth_digest.c each time they install a new +- * version of apache. +- */ +-static const char *get_userpw_hash(const request_rec *r, +- const digest_header_rec *resp, +- const digest_config_rec *conf) +-{ +- return ap_md5(r->pool, +- (unsigned char *) apr_pstrcat(r->pool, conf->ha1, ":", resp->nonce, +- ":", resp->cnonce, NULL)); +-} +- +- +-/* Retrieve current session H(A1). If there is none and "generate" is +- * true then a new session for MD5-sess is generated and stored in the +- * client struct; if generate is false, or a new session could not be +- * generated then NULL is returned (in case of failure to generate the +- * failure reason will have been logged already). +- */ +-static const char *get_session_HA1(const request_rec *r, +- digest_header_rec *resp, +- const digest_config_rec *conf, +- int generate) +-{ +- const char *ha1 = NULL; +- +- /* return the current sessions if there is one */ +- if (resp->opaque && resp->client && resp->client->ha1[0]) { +- return resp->client->ha1; +- } +- else if (!generate) { +- return NULL; +- } +- +- /* generate a new session */ +- if (!resp->client) { +- resp->client = gen_client(r); +- } +- if (resp->client) { +- ha1 = get_userpw_hash(r, resp, conf); +- if (ha1) { +- memcpy(resp->client->ha1, ha1, sizeof(resp->client->ha1)); +- } +- } +- +- return ha1; +-} +- +- +-static void clear_session(const digest_header_rec *resp) +-{ +- if (resp->client) { +- resp->client->ha1[0] = '\0'; +- } +-} +- + /* + * Authorization challenge generation code (for WWW-Authenticate) + */ +@@ -1282,8 +1187,7 @@ static void note_digest_auth_failure(request_rec *r, + + if (resp->opaque == NULL) { + /* new client */ +- if ((conf->check_nc || conf->nonce_lifetime == 0 +- || !strcasecmp(conf->algorithm, "MD5-sess")) ++ if ((conf->check_nc || conf->nonce_lifetime == 0) + && (resp->client = gen_client(r)) != NULL) { + opaque = ltox(r->pool, resp->client->key); + } +@@ -1323,15 +1227,6 @@ static void note_digest_auth_failure(request_rec *r, + memcpy(resp->client->last_nonce, nonce, NONCE_LEN+1); + } + +- /* Setup MD5-sess stuff. Note that we just clear out the session +- * info here, since we can't generate a new session until the request +- * from the client comes in with the cnonce. +- */ +- +- if (!strcasecmp(conf->algorithm, "MD5-sess")) { +- clear_session(resp); +- } +- + /* setup domain attribute. We want to send this attribute wherever + * possible so that the client won't send the Authorization header + * unnecessarily (it's usually > 200 bytes!). +@@ -1597,24 +1492,9 @@ static const char *new_digest(const request_rec *r, + { + const char *ha1, *ha2, *a2; + +- if (resp->algorithm && !strcasecmp(resp->algorithm, "MD5-sess")) { +- ha1 = get_session_HA1(r, resp, conf, 1); +- if (!ha1) { +- return NULL; +- } +- } +- else { +- ha1 = conf->ha1; +- } ++ ha1 = conf->ha1; + +- if (resp->message_qop && !strcasecmp(resp->message_qop, "auth-int")) { +- a2 = apr_pstrcat(r->pool, resp->method, ":", resp->uri, ":", +- ap_md5(r->pool, (const unsigned char*) ""), NULL); +- /* TBD */ +- } +- else { +- a2 = apr_pstrcat(r->pool, resp->method, ":", resp->uri, NULL); +- } ++ a2 = apr_pstrcat(r->pool, resp->method, ":", resp->uri, NULL); + ha2 = ap_md5(r->pool, (const unsigned char *)a2); + + return ap_md5(r->pool, +@@ -1854,8 +1734,7 @@ static int authenticate_digest_user(request_rec *r) + } + + if (resp->algorithm != NULL +- && strcasecmp(resp->algorithm, "MD5") +- && strcasecmp(resp->algorithm, "MD5-sess")) { ++ && strcasecmp(resp->algorithm, "MD5")) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01789) + "unknown algorithm `%s' received: %s", + resp->algorithm, r->uri); +@@ -2007,27 +1886,9 @@ static int add_auth_info(request_rec *r) + + /* calculate rspauth attribute + */ +- if (resp->algorithm && !strcasecmp(resp->algorithm, "MD5-sess")) { +- ha1 = get_session_HA1(r, resp, conf, 0); +- if (!ha1) { +- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01795) +- "internal error: couldn't find session " +- "info for user %s", resp->username); +- return !OK; +- } +- } +- else { +- ha1 = conf->ha1; +- } ++ ha1 = conf->ha1; + +- if (resp->message_qop && !strcasecmp(resp->message_qop, "auth-int")) { +- a2 = apr_pstrcat(r->pool, ":", resp->uri, ":", +- ap_md5(r->pool,(const unsigned char *) ""), NULL); +- /* TBD */ +- } +- else { +- a2 = apr_pstrcat(r->pool, ":", resp->uri, NULL); +- } ++ a2 = apr_pstrcat(r->pool, ":", resp->uri, NULL); + ha2 = ap_md5(r->pool, (const unsigned char *)a2); + + resp_dig = ap_md5(r->pool, diff --git a/SOURCES/httpd-2.4.6-CVE-2019-0217.patch b/SOURCES/httpd-2.4.6-CVE-2019-0217.patch new file mode 100644 index 0000000..69702a1 --- /dev/null +++ b/SOURCES/httpd-2.4.6-CVE-2019-0217.patch @@ -0,0 +1,113 @@ +diff --git a/modules/aaa/mod_auth_digest.c b/modules/aaa/mod_auth_digest.c +index b50bcf9..5bfec82 100644 +--- a/modules/aaa/mod_auth_digest.c ++++ b/modules/aaa/mod_auth_digest.c +@@ -92,7 +92,6 @@ typedef struct digest_config_struct { + int check_nc; + const char *algorithm; + char *uri_list; +- const char *ha1; + } digest_config_rec; + + +@@ -153,6 +152,7 @@ typedef struct digest_header_struct { + apr_time_t nonce_time; + enum hdr_sts auth_hdr_sts; + int needed_auth; ++ const char *ha1; + client_entry *client; + } digest_header_rec; + +@@ -1295,7 +1295,7 @@ static int hook_note_digest_auth_failure(request_rec *r, const char *auth_type) + */ + + static authn_status get_hash(request_rec *r, const char *user, +- digest_config_rec *conf) ++ digest_config_rec *conf, const char **rethash) + { + authn_status auth_result; + char *password; +@@ -1347,7 +1347,7 @@ static authn_status get_hash(request_rec *r, const char *user, + } while (current_provider); + + if (auth_result == AUTH_USER_FOUND) { +- conf->ha1 = password; ++ *rethash = password; + } + + return auth_result; +@@ -1474,25 +1474,24 @@ static int check_nonce(request_rec *r, digest_header_rec *resp, + + /* RFC-2069 */ + static const char *old_digest(const request_rec *r, +- const digest_header_rec *resp, const char *ha1) ++ const digest_header_rec *resp) + { + const char *ha2; + + ha2 = ap_md5(r->pool, (unsigned char *)apr_pstrcat(r->pool, resp->method, ":", + resp->uri, NULL)); + return ap_md5(r->pool, +- (unsigned char *)apr_pstrcat(r->pool, ha1, ":", resp->nonce, +- ":", ha2, NULL)); ++ (unsigned char *)apr_pstrcat(r->pool, resp->ha1, ":", ++ resp->nonce, ":", ha2, NULL)); + } + + /* RFC-2617 */ + static const char *new_digest(const request_rec *r, +- digest_header_rec *resp, +- const digest_config_rec *conf) ++ digest_header_rec *resp) + { + const char *ha1, *ha2, *a2; + +- ha1 = conf->ha1; ++ ha1 = resp->ha1; + + a2 = apr_pstrcat(r->pool, resp->method, ":", resp->uri, NULL); + ha2 = ap_md5(r->pool, (const unsigned char *)a2); +@@ -1505,7 +1504,6 @@ static const char *new_digest(const request_rec *r, + NULL)); + } + +- + static void copy_uri_components(apr_uri_t *dst, + apr_uri_t *src, request_rec *r) { + if (src->scheme && src->scheme[0] != '\0') { +@@ -1742,7 +1740,7 @@ static int authenticate_digest_user(request_rec *r) + return HTTP_UNAUTHORIZED; + } + +- return_code = get_hash(r, r->user, conf); ++ return_code = get_hash(r, r->user, conf, &resp->ha1); + + if (return_code == AUTH_USER_NOT_FOUND) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01790) +@@ -1772,7 +1770,7 @@ static int authenticate_digest_user(request_rec *r) + + if (resp->message_qop == NULL) { + /* old (rfc-2069) style digest */ +- if (strcmp(resp->digest, old_digest(r, resp, conf->ha1))) { ++ if (strcmp(resp->digest, old_digest(r, resp))) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01792) + "user %s: password mismatch: %s", r->user, + r->uri); +@@ -1802,7 +1800,7 @@ static int authenticate_digest_user(request_rec *r) + return HTTP_UNAUTHORIZED; + } + +- exp_digest = new_digest(r, resp, conf); ++ exp_digest = new_digest(r, resp); + if (!exp_digest) { + /* we failed to allocate a client struct */ + return HTTP_INTERNAL_SERVER_ERROR; +@@ -1886,7 +1884,7 @@ static int add_auth_info(request_rec *r) + + /* calculate rspauth attribute + */ +- ha1 = conf->ha1; ++ ha1 = resp->ha1; + + a2 = apr_pstrcat(r->pool, ":", resp->uri, NULL); + ha2 = ap_md5(r->pool, (const unsigned char *)a2); diff --git a/SOURCES/httpd-2.4.6-CVE-2019-0220.patch b/SOURCES/httpd-2.4.6-CVE-2019-0220.patch new file mode 100644 index 0000000..94a76ee --- /dev/null +++ b/SOURCES/httpd-2.4.6-CVE-2019-0220.patch @@ -0,0 +1,244 @@ +diff --git a/docs/manual/mod/core.html.en b/docs/manual/mod/core.html.en +index 86d9bee..e08034b 100644 +--- a/docs/manual/mod/core.html.en ++++ b/docs/manual/mod/core.html.en +@@ -90,6 +90,7 @@ available
    ++ ++ ++ ++ ++ ++ ++ ++
    Description:Controls whether the server merges consecutive slashes in URLs.
    Syntax:MergeSlashes ON | OFF
    Default:MergeSlashes ON
    Context:server config, virtual host
    Status:Core
    Module:core
    Compatibility:Available in Apache HTTP Server 2.4.6 in Red Hat Enterprise Linux 7
    ++

    By default, the server merges (or collapses) multiple consecutive slash ++ ('/') characters in the path component of the request URL.

    ++ ++

    When mapping URL's to the filesystem, these multiple slashes are not ++ significant. However, URL's handled other ways, such as by CGI or proxy, ++ might prefer to retain the significance of multiple consecutive slashes. ++ In these cases MergeSlashes can be set to ++ OFF to retain the multiple consecutive slashes. In these ++ configurations, regular expressions used in the configuration file that match ++ the path component of the URL (LocationMatch, ++ RewriteRule, ...) need to take into account multiple ++ consecutive slashes.

    ++
    ++
    top
    +

    Mutex Directive

    + + + ldap-filter. Other authorization types may also be + used but may require that additional authorization modules be loaded.

    + ++

    Since v2.5.0, expressions are supported ++ within the LDAP require directives.

    ++ +

    Require ldap-user

    + +

    The Require ldap-user directive specifies what +@@ -576,6 +579,16 @@ Require ldap-group cn=Administrators, o=Example + + +

  • ++ Grant access to anybody in the group whose name matches the ++ hostname of the virtual host. In this example an ++ expression is used to build the filter. ++ ++AuthLDAPURL ldap://ldap.example.com/o=Example?uid ++Require ldap-group cn=%{SERVER_NAME}, o=Example ++ ++
  • ++ ++
  • + The next example assumes that everyone at Example who + carries an alphanumeric pager will have an LDAP attribute + of qpagePagerID. The example will grant access +diff --git a/modules/aaa/mod_authnz_ldap.c b/modules/aaa/mod_authnz_ldap.c +index 2c25dbc..063debe 100644 +--- a/modules/aaa/mod_authnz_ldap.c ++++ b/modules/aaa/mod_authnz_ldap.c +@@ -607,6 +607,10 @@ static authz_status ldapuser_check_authorization(request_rec *r, + + util_ldap_connection_t *ldc = NULL; + ++ const char *err = NULL; ++ const ap_expr_info_t *expr = parsed_require_args; ++ const char *require; ++ + const char *t; + char *w; + +@@ -680,11 +684,19 @@ static authz_status ldapuser_check_authorization(request_rec *r, + return AUTHZ_DENIED; + } + ++ require = ap_expr_str_exec(r, expr, &err); ++ if (err) { ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02585) ++ "auth_ldap authorize: require user: Can't evaluate expression: %s", ++ err); ++ return AUTHZ_DENIED; ++ } ++ + /* + * First do a whole-line compare, in case it's something like + * require user Babs Jensen + */ +- result = util_ldap_cache_compare(r, ldc, sec->url, req->dn, sec->attribute, require_args); ++ result = util_ldap_cache_compare(r, ldc, sec->url, req->dn, sec->attribute, require); + switch(result) { + case LDAP_COMPARE_TRUE: { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01703) +@@ -704,7 +716,7 @@ static authz_status ldapuser_check_authorization(request_rec *r, + /* + * Now break apart the line and compare each word on it + */ +- t = require_args; ++ t = require; + while ((w = ap_getword_conf(r->pool, &t)) && w[0]) { + result = util_ldap_cache_compare(r, ldc, sec->url, req->dn, sec->attribute, w); + switch(result) { +@@ -744,6 +756,10 @@ static authz_status ldapgroup_check_authorization(request_rec *r, + + util_ldap_connection_t *ldc = NULL; + ++ const char *err = NULL; ++ const ap_expr_info_t *expr = parsed_require_args; ++ const char *require; ++ + const char *t; + + char filtbuf[FILTER_LENGTH]; +@@ -863,7 +879,15 @@ static authz_status ldapgroup_check_authorization(request_rec *r, + } + } + +- t = require_args; ++ require = ap_expr_str_exec(r, expr, &err); ++ if (err) { ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02586) ++ "auth_ldap authorize: require group: Can't evaluate expression: %s", ++ err); ++ return AUTHZ_DENIED; ++ } ++ ++ t = require; + + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01713) + "auth_ldap authorize: require group: testing for group " +@@ -959,6 +983,10 @@ static authz_status ldapdn_check_authorization(request_rec *r, + + util_ldap_connection_t *ldc = NULL; + ++ const char *err = NULL; ++ const ap_expr_info_t *expr = parsed_require_args; ++ const char *require; ++ + const char *t; + + char filtbuf[FILTER_LENGTH]; +@@ -1021,7 +1049,15 @@ static authz_status ldapdn_check_authorization(request_rec *r, + req->user = r->user; + } + +- t = require_args; ++ require = ap_expr_str_exec(r, expr, &err); ++ if (err) { ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02587) ++ "auth_ldap authorize: require dn: Can't evaluate expression: %s", ++ err); ++ return AUTHZ_DENIED; ++ } ++ ++ t = require; + + if (req->dn == NULL || strlen(req->dn) == 0) { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01725) +@@ -1068,6 +1104,10 @@ static authz_status ldapattribute_check_authorization(request_rec *r, + + util_ldap_connection_t *ldc = NULL; + ++ const char *err = NULL; ++ const ap_expr_info_t *expr = parsed_require_args; ++ const char *require; ++ + const char *t; + char *w, *value; + +@@ -1138,7 +1178,16 @@ static authz_status ldapattribute_check_authorization(request_rec *r, + return AUTHZ_DENIED; + } + +- t = require_args; ++ require = ap_expr_str_exec(r, expr, &err); ++ if (err) { ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02588) ++ "auth_ldap authorize: require ldap-attribute: Can't " ++ "evaluate expression: %s", err); ++ return AUTHZ_DENIED; ++ } ++ ++ t = require; ++ + while (t[0]) { + w = ap_getword(r->pool, &t, '='); + value = ap_getword_conf(r->pool, &t); +@@ -1183,6 +1232,11 @@ static authz_status ldapfilter_check_authorization(request_rec *r, + (authn_ldap_config_t *)ap_get_module_config(r->per_dir_config, &authnz_ldap_module); + + util_ldap_connection_t *ldc = NULL; ++ ++ const char *err = NULL; ++ const ap_expr_info_t *expr = parsed_require_args; ++ const char *require; ++ + const char *t; + + char filtbuf[FILTER_LENGTH]; +@@ -1252,7 +1306,15 @@ static authz_status ldapfilter_check_authorization(request_rec *r, + return AUTHZ_DENIED; + } + +- t = require_args; ++ require = ap_expr_str_exec(r, expr, &err); ++ if (err) { ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02589) ++ "auth_ldap authorize: require ldap-filter: Can't " ++ "evaluate require expression: %s", err); ++ return AUTHZ_DENIED; ++ } ++ ++ t = require; + + if (t[0]) { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01743) +@@ -1311,6 +1373,25 @@ static authz_status ldapfilter_check_authorization(request_rec *r, + return AUTHZ_DENIED; + } + ++static const char *ldap_parse_config(cmd_parms *cmd, const char *require_line, ++ const void **parsed_require_line) ++{ ++ const char *expr_err = NULL; ++ ap_expr_info_t *expr; ++ ++ expr = ap_expr_parse_cmd(cmd, require_line, AP_EXPR_FLAG_STRING_RESULT, ++ &expr_err, NULL); ++ ++ if (expr_err) ++ return apr_pstrcat(cmd->temp_pool, ++ "Cannot parse expression in require line: ", ++ expr_err, NULL); ++ ++ *parsed_require_line = expr; ++ ++ return NULL; ++} ++ + + /* + * Use the ldap url parsing routines to break up the ldap url into +@@ -1769,30 +1850,30 @@ static const authn_provider authn_ldap_provider = + static const authz_provider authz_ldapuser_provider = + { + &ldapuser_check_authorization, +- NULL, ++ &ldap_parse_config, + }; + static const authz_provider authz_ldapgroup_provider = + { + &ldapgroup_check_authorization, +- NULL, ++ &ldap_parse_config, + }; + + static const authz_provider authz_ldapdn_provider = + { + &ldapdn_check_authorization, +- NULL, ++ &ldap_parse_config, + }; + + static const authz_provider authz_ldapattribute_provider = + { + &ldapattribute_check_authorization, +- NULL, ++ &ldap_parse_config, + }; + + static const authz_provider authz_ldapfilter_provider = + { + &ldapfilter_check_authorization, +- NULL, ++ &ldap_parse_config, + }; + + static void ImportULDAPOptFn(void) diff --git a/SOURCES/httpd-2.4.6-r1556473.patch b/SOURCES/httpd-2.4.6-r1556473.patch new file mode 100644 index 0000000..009baa7 --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1556473.patch @@ -0,0 +1,31 @@ +# ./pullrev.sh 1556473 + +https://bugzilla.redhat.com/show_bug.cgi?id=1036666 + +http://svn.apache.org/viewvc?view=revision&revision=1556473 + +--- httpd-2.4.6/modules/ssl/ssl_engine_config.c ++++ httpd-2.4.6/modules/ssl/ssl_engine_config.c +@@ -699,9 +699,20 @@ + #ifndef SSL_OP_NO_COMPRESSION + const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY); + if (err) +- return "This version of openssl does not support configuring " +- "compression within sections."; ++ return "This version of OpenSSL does not support enabling " ++ "SSLCompression within sections."; + #endif ++ if (flag) { ++ /* Some (packaged) versions of OpenSSL do not support ++ * compression by default. Enabling this directive would not ++ * have the desired effect, so fail with an error. */ ++ STACK_OF(SSL_COMP) *meths = SSL_COMP_get_compression_methods(); ++ ++ if (sk_SSL_COMP_num(meths) == 0) { ++ return "This version of OpenSSL does not have any compression methods " ++ "available, cannot enable SSLCompression."; ++ } ++ } + sc->compression = flag ? TRUE : FALSE; + return NULL; + #else diff --git a/SOURCES/httpd-2.4.6-r1556818.patch b/SOURCES/httpd-2.4.6-r1556818.patch new file mode 100644 index 0000000..93195e1 --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1556818.patch @@ -0,0 +1,26 @@ +# ./pullrev.sh r1556818 +http://svn.apache.org/viewvc?view=revision&revision=r1556818 +--- httpd-2.4.6/modules/aaa/mod_authn_core.c 2014/01/09 14:30:23 1556817 ++++ httpd-2.4.6/modules/aaa/mod_authn_core.c 2014/01/09 14:32:47 1556818 +@@ -179,6 +179,12 @@ + return (void *) authcfg; + } + ++/* Only per-server directive we have is GLOBAL_ONLY */ ++static void *merge_authn_alias_svr_config(apr_pool_t *p, void *basev, void *overridesv) ++{ ++ return basev; ++} ++ + static const authn_provider authn_alias_provider = + { + &authn_alias_check_password, +@@ -373,7 +379,7 @@ + create_authn_core_dir_config, /* dir config creater */ + merge_authn_core_dir_config, /* dir merger --- default is to override */ + create_authn_alias_svr_config, /* server config */ +- NULL, /* merge server config */ ++ merge_authn_alias_svr_config, /* merge server config */ + authn_cmds, + register_hooks /* register hooks */ + }; diff --git a/SOURCES/httpd-2.4.6-r1560093.patch b/SOURCES/httpd-2.4.6-r1560093.patch new file mode 100644 index 0000000..664699a --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1560093.patch @@ -0,0 +1,16 @@ +# ./pullrev.sh 1560093 + +https://bugzilla.redhat.com/show_bug.cgi?id=1331341 + +http://svn.apache.org/viewvc?view=revision&revision=1560093 + +--- httpd-2.4.6/modules/cache/mod_cache.c ++++ httpd-2.4.6/modules/cache/mod_cache.c +@@ -1130,7 +1130,6 @@ + "Content-Range"))) + || ((reason = cache_header_cmp(r->pool, left, right, + "Content-Type"))) +- || ((reason = cache_header_cmp(r->pool, left, right, "Expires"))) + || ((reason = cache_header_cmp(r->pool, left, right, "ETag"))) + || ((reason = cache_header_cmp(r->pool, left, right, + "Last-Modified")))) { diff --git a/SOURCES/httpd-2.4.6-r1569006.patch b/SOURCES/httpd-2.4.6-r1569006.patch new file mode 100644 index 0000000..32dd900 --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1569006.patch @@ -0,0 +1,19 @@ +--- a/modules/metadata/mod_remoteip.c 2014/02/17 14:11:38 1569005 ++++ b/modules/metadata/mod_remoteip.c 2014/02/17 14:12:30 1569006 +@@ -246,14 +246,14 @@ + + while (remote) { + +- /* verify c->client_addr is trusted if there is a trusted proxy list ++ /* verify user agent IP against the trusted proxy list + */ + if (config->proxymatch_ip) { + int i; + remoteip_proxymatch_t *match; + match = (remoteip_proxymatch_t *)config->proxymatch_ip->elts; + for (i = 0; i < config->proxymatch_ip->nelts; ++i) { +- if (apr_ipsubnet_test(match[i].ip, c->client_addr)) { ++ if (apr_ipsubnet_test(match[i].ip, temp_sa)) { + internal = match[i].internal; + break; + } diff --git a/SOURCES/httpd-2.4.6-r1570327.patch b/SOURCES/httpd-2.4.6-r1570327.patch new file mode 100644 index 0000000..35cdce8 --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1570327.patch @@ -0,0 +1,20 @@ +# ./pullrev.sh 1570327 +http://svn.apache.org/viewvc?view=revision&revision=1570327 + +https://bugzilla.redhat.com/show_bug.cgi?id=1327624 + +--- httpd-2.4.6/server/mpm_unix.c ++++ httpd-2.4.6/server/mpm_unix.c +@@ -742,7 +742,12 @@ + * readers stranded (a number of them could be tied up for + * a while serving time-consuming requests) + */ ++ /* Recall: we only worry about IDLE child processes here */ + for (i = 0; i < num && rv == APR_SUCCESS; i++) { ++ if (ap_scoreboard_image->servers[i][0].status != SERVER_READY || ++ ap_scoreboard_image->servers[i][0].pid == 0) { ++ continue; ++ } + rv = dummy_connection(pod); + } + } diff --git a/SOURCES/httpd-2.4.6-r1573626.patch b/SOURCES/httpd-2.4.6-r1573626.patch new file mode 100644 index 0000000..58d116b --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1573626.patch @@ -0,0 +1,29 @@ +--- a/modules/proxy/mod_proxy.c 2014/03/03 17:28:10 1573625 ++++ b/modules/proxy/mod_proxy.c 2014/03/03 17:28:17 1573626 +@@ -927,8 +927,25 @@ + struct dirconn_entry *list = (struct dirconn_entry *)conf->dirconn->elts; + + /* is this for us? */ +- if (!r->proxyreq || !r->filename || strncmp(r->filename, "proxy:", 6) != 0) ++ if (!r->filename) { + return DECLINED; ++ } ++ ++ if (!r->proxyreq) { ++ /* We may have forced the proxy handler via config or .htaccess */ ++ if (r->handler && ++ strncmp(r->handler, "proxy:", 6) == 0 && ++ strncmp(r->filename, "proxy:", 6) != 0) { ++ r->proxyreq = PROXYREQ_REVERSE; ++ r->filename = apr_pstrcat(r->pool, r->handler, r->filename, NULL); ++ apr_table_setn(r->notes, "rewrite-proxy", "1"); ++ } ++ else { ++ return DECLINED; ++ } ++ } else if (strncmp(r->filename, "proxy:", 6) != 0) { ++ return DECLINED; ++ } + + /* handle max-forwards / OPTIONS / TRACE */ + if ((str = apr_table_get(r->headers_in, "Max-Forwards"))) { diff --git a/SOURCES/httpd-2.4.6-r1583175.patch b/SOURCES/httpd-2.4.6-r1583175.patch new file mode 100644 index 0000000..4270a4c --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1583175.patch @@ -0,0 +1,19 @@ +--- a/modules/mappers/mod_alias.c 2014/03/30 18:15:25 1583174 ++++ b/modules/mappers/mod_alias.c 2014/03/30 18:20:09 1583175 +@@ -371,15 +371,11 @@ + } + } + else { +- int pathlen = strlen(found) - +- (strlen(r->uri + regm[0].rm_eo)); +- AP_DEBUG_ASSERT(pathlen >= 0); +- AP_DEBUG_ASSERT(pathlen <= strlen(found)); + ap_set_context_info(r, + apr_pstrmemdup(r->pool, r->uri, + regm[0].rm_eo), + apr_pstrmemdup(r->pool, found, +- pathlen)); ++ strlen(found))); + } + } + else { diff --git a/SOURCES/httpd-2.4.6-r1587053.patch b/SOURCES/httpd-2.4.6-r1587053.patch new file mode 100644 index 0000000..5bee0b1 --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1587053.patch @@ -0,0 +1,92 @@ +diff --git a/modules/proxy/mod_proxy_wstunnel.c b/modules/proxy/mod_proxy_wstunnel.c +index 525109a..eb34eee 100644 +--- a/modules/proxy/mod_proxy_wstunnel.c ++++ b/modules/proxy/mod_proxy_wstunnel.c +@@ -103,10 +103,12 @@ static int proxy_wstunnel_transfer(request_rec *r, conn_rec *c_i, conn_rec *c_o, + rv = ap_get_brigade(c_i->input_filters, bb, AP_MODE_READBYTES, + APR_NONBLOCK_READ, AP_IOBUFSIZE); + if (rv == APR_SUCCESS) { +- if (c_o->aborted) ++ if (c_o->aborted) { + return APR_EPIPE; +- if (APR_BRIGADE_EMPTY(bb)) ++ } ++ if (APR_BRIGADE_EMPTY(bb)){ + break; ++ } + #ifdef DEBUGGING + len = -1; + apr_brigade_length(bb, 0, &len); +@@ -178,7 +180,6 @@ static int ap_proxy_wstunnel_request(apr_pool_t *p, request_rec *r, + conn_rec *c = r->connection; + apr_socket_t *sock = conn->sock; + conn_rec *backconn = conn->connection; +- int client_error = 0; + char *buf; + apr_bucket_brigade *header_brigade; + apr_bucket *e; +@@ -224,7 +225,7 @@ static int ap_proxy_wstunnel_request(apr_pool_t *p, request_rec *r, + + pollfd.p = p; + pollfd.desc_type = APR_POLL_SOCKET; +- pollfd.reqevents = APR_POLLIN; ++ pollfd.reqevents = APR_POLLIN | APR_POLLHUP; + pollfd.desc.s = sock; + pollfd.client_data = NULL; + apr_pollset_add(pollset, &pollfd); +@@ -237,6 +238,9 @@ static int ap_proxy_wstunnel_request(apr_pool_t *p, request_rec *r, + r->proto_output_filters = c->output_filters; + r->input_filters = c->input_filters; + r->proto_input_filters = c->input_filters; ++ /* This handler should take care of the entire connection; make it so that ++ * nothing else is attempted on the connection after returning. */ ++ c->keepalive = AP_CONN_CLOSE; + + remove_reqtimeout(r->input_filters); + +@@ -257,26 +261,28 @@ static int ap_proxy_wstunnel_request(apr_pool_t *p, request_rec *r, + + if (cur->desc.s == sock) { + pollevent = cur->rtnevents; +- if (pollevent & APR_POLLIN) { ++ if (pollevent & (APR_POLLIN | APR_POLLHUP)) { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02446) + "sock was readable"); + rv = proxy_wstunnel_transfer(r, backconn, c, bb, "sock"); + } +- else if ((pollevent & APR_POLLERR) +- || (pollevent & APR_POLLHUP)) { ++ else if (pollevent & APR_POLLERR) { + rv = APR_EPIPE; + ap_log_rerror(APLOG_MARK, APLOG_NOTICE, 0, r, APLOGNO(02447) +- "err/hup on backconn"); ++ "err on backconn"); + } +- if (rv != APR_SUCCESS) +- client_error = 1; + } + else if (cur->desc.s == client_socket) { + pollevent = cur->rtnevents; +- if (pollevent & APR_POLLIN) { ++ if (pollevent & (APR_POLLIN | APR_POLLHUP)) { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02448) + "client was readable"); + rv = proxy_wstunnel_transfer(r, c, backconn, bb, "client"); ++ } else if (pollevent & APR_POLLERR) { ++ rv = APR_EPIPE; ++ c->aborted = 1; ++ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, APLOGNO(02607) ++ "error on client conn"); + } + } + else { +@@ -294,9 +300,6 @@ static int ap_proxy_wstunnel_request(apr_pool_t *p, request_rec *r, + ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, + "finished with poll() - cleaning up"); + +- if (client_error) { +- return HTTP_INTERNAL_SERVER_ERROR; +- } + return OK; + } + diff --git a/SOURCES/httpd-2.4.6-r1593002.patch b/SOURCES/httpd-2.4.6-r1593002.patch new file mode 100644 index 0000000..6aa0688 --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1593002.patch @@ -0,0 +1,35 @@ +--- a/modules/ssl/ssl_util_stapling.c 2014/05/07 12:51:38 1593001 ++++ b/modules/ssl/ssl_util_stapling.c 2014/05/07 12:52:13 1593002 +@@ -145,14 +145,15 @@ + X509_digest(x, EVP_sha1(), cinf->idx, NULL); + + aia = X509_get1_ocsp(x); +- if (aia) ++ if (aia) { + cinf->uri = sk_OPENSSL_STRING_pop(aia); ++ X509_email_free(aia); ++ } + if (!cinf->uri && !mctx->stapling_force_url) { + ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(02218) + "ssl_stapling_init_cert: no responder URL"); ++ return 0; + } +- if (aia) +- X509_email_free(aia); + return 1; + } + +@@ -403,6 +404,13 @@ + else + ocspuri = cinf->uri; + ++ if (!ocspuri) { ++ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(02621) ++ "stapling_renew_response: no uri for responder"); ++ rv = FALSE; ++ goto done; ++ } ++ + /* Create a temporary pool to constrain memory use */ + apr_pool_create(&vpool, conn->pool); + diff --git a/SOURCES/httpd-2.4.6-r1594625.patch b/SOURCES/httpd-2.4.6-r1594625.patch new file mode 100644 index 0000000..487b2ad --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1594625.patch @@ -0,0 +1,25 @@ +Index: modules/proxy/mod_proxy_wstunnel.c +=================================================================== +--- a/modules/proxy/mod_proxy_wstunnel.c (revision 1593857) ++++ b/modules/proxy/mod_proxy_wstunnel.c (revision 1594625) +@@ -477,9 +477,11 @@ + conn_rec *c = r->connection; + apr_pool_t *p = r->pool; + apr_uri_t *uri; ++ int is_ssl = 0; + + if (strncasecmp(url, "wss:", 4) == 0) { + scheme = "WSS"; ++ is_ssl = 1; + } + else if (strncasecmp(url, "ws:", 3) == 0) { + scheme = "WS"; +@@ -503,7 +505,7 @@ + return status; + } + +- backend->is_ssl = 0; ++ backend->is_ssl = is_ssl; + backend->close = 0; + + retry = 0; diff --git a/SOURCES/httpd-2.4.6-r1604460.patch b/SOURCES/httpd-2.4.6-r1604460.patch new file mode 100644 index 0000000..465005d --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1604460.patch @@ -0,0 +1,22 @@ +--- a/modules/filters/mod_deflate.c 2014/02/26 15:24:07 1572091 ++++ b/modules/filters/mod_deflate.c 2014/02/26 15:30:25 1572092 +@@ -1125,7 +1125,8 @@ + } + ctx->stream.next_in += 4; + compLen = getLong(ctx->stream.next_in); +- if (ctx->stream.total_out != compLen) { ++ /* gzip stores original size only as 4 byte value */ ++ if ((ctx->stream.total_out & 0xFFFFFFFF) != compLen) { + inflateEnd(&ctx->stream); + ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01395) + "Zlib: Length %ld of inflated data does " +@@ -1322,7 +1323,8 @@ + } + ctx->validation_buffer += VALIDATION_SIZE / 2; + compLen = getLong(ctx->validation_buffer); +- if (ctx->stream.total_out != compLen) { ++ /* gzip stores original size only as 4 byte value */ ++ if ((ctx->stream.total_out & 0xFFFFFFFF) != compLen) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01400) + "Zlib: Length of inflated stream invalid"); + return APR_EGENERAL; diff --git a/SOURCES/httpd-2.4.6-r1610013.patch b/SOURCES/httpd-2.4.6-r1610013.patch new file mode 100644 index 0000000..b53ae60 --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1610013.patch @@ -0,0 +1,141 @@ +Index: modules/dav/main/mod_dav.c +=================================================================== +--- a/modules/dav/main/mod_dav.c (revision 1610012) ++++ b/modules/dav/main/mod_dav.c (revision 1610013) +@@ -396,9 +396,11 @@ + */ + static const char *dav_xml_escape_uri(apr_pool_t *p, const char *uri) + { ++ const char *e_uri = ap_escape_uri(p, uri); ++ + /* check the easy case... */ +- if (ap_strchr_c(uri, '&') == NULL) +- return uri; ++ if (ap_strchr_c(e_uri, '&') == NULL) ++ return e_uri; + + /* there was a '&', so more work is needed... sigh. */ + +@@ -406,7 +408,7 @@ + * Note: this is a teeny bit of overkill since we know there are no + * '<' or '>' characters, but who cares. + */ +- return apr_xml_quote_string(p, uri, 0); ++ return apr_xml_quote_string(p, e_uri, 0); + } + + +Index: modules/dav/main/mod_dav.h +=================================================================== +--- a/modules/dav/main/mod_dav.h (revision 1610012) ++++ b/modules/dav/main/mod_dav.h (revision 1610013) +@@ -386,7 +386,9 @@ + * REGULAR and WORKSPACE resources, + * and is always 1 for WORKING */ + +- const char *uri; /* the escaped URI for this resource */ ++ const char *uri; /* the URI for this resource; ++ * currently has an ABI flaw where sometimes it is ++ * assumed to be encoded and sometimes not */ + + dav_resource_private *info; /* the provider's private info */ + +Index: modules/dav/main/props.c +=================================================================== +--- a/modules/dav/main/props.c (revision 1610012) ++++ b/modules/dav/main/props.c (revision 1610013) +@@ -321,10 +321,14 @@ + /* do a sub-request to fetch properties for the target resource's URI. */ + static void dav_do_prop_subreq(dav_propdb *propdb) + { ++ /* need to escape the uri that's in the resource struct because during ++ * the property walker it's not encoded. */ ++ const char *e_uri = ap_escape_uri(propdb->resource->pool, ++ propdb->resource->uri); ++ + /* perform a "GET" on the resource's URI (note that the resource + may not correspond to the current request!). */ +- propdb->subreq = ap_sub_req_lookup_uri(propdb->resource->uri, propdb->r, +- NULL); ++ propdb->subreq = ap_sub_req_lookup_uri(e_uri, propdb->r, NULL); + } + + static dav_error * dav_insert_coreprop(dav_propdb *propdb, +Index: modules/dav/fs/repos.c +=================================================================== +--- a/modules/dav/fs/repos.c (revision 1610012) ++++ b/modules/dav/fs/repos.c (revision 1610013) +@@ -717,13 +717,13 @@ + resource->pool = r->pool; + + /* make sure the URI does not have a trailing "/" */ +- len = strlen(r->unparsed_uri); +- if (len > 1 && r->unparsed_uri[len - 1] == '/') { +- s = apr_pstrmemdup(r->pool, r->unparsed_uri, len-1); ++ len = strlen(r->uri); ++ if (len > 1 && r->uri[len - 1] == '/') { ++ s = apr_pstrmemdup(r->pool, r->uri, len-1); + resource->uri = s; + } + else { +- resource->uri = r->unparsed_uri; ++ resource->uri = r->uri; + } + + if (r->finfo.filetype != APR_NOFILE) { +@@ -1482,18 +1482,6 @@ + return dav_fs_deleteset(info->pool, resource); + } + +-/* Take an unescaped path component and escape it and append it onto a +- * dav_buffer for a URI */ +-static apr_size_t dav_fs_append_uri(apr_pool_t *p, dav_buffer *pbuf, +- const char *path, apr_size_t pad) +-{ +- const char *epath = ap_escape_uri(p, path); +- apr_size_t epath_len = strlen(epath); +- +- dav_buffer_place_mem(p, pbuf, epath, epath_len + 1, pad); +- return epath_len; +-} +- + /* ### move this to dav_util? */ + /* Walk recursively down through directories, * + * including lock-null resources as we go. */ +@@ -1549,7 +1537,6 @@ + } + while ((apr_dir_read(&dirent, APR_FINFO_DIRENT, dirp)) == APR_SUCCESS) { + apr_size_t len; +- apr_size_t escaped_len; + + len = strlen(dirent.name); + +@@ -1592,7 +1579,7 @@ + + /* copy the file to the URI, too. NOTE: we will pad an extra byte + for the trailing slash later. */ +- escaped_len = dav_fs_append_uri(pool, &fsctx->uri_buf, dirent.name, 1); ++ dav_buffer_place_mem(pool, &fsctx->uri_buf, dirent.name, len + 1, 1); + + /* if there is a secondary path, then do that, too */ + if (fsctx->path2.buf != NULL) { +@@ -1625,7 +1612,7 @@ + fsctx->path2.cur_len += len; + + /* adjust URI length to incorporate subdir and a slash */ +- fsctx->uri_buf.cur_len += escaped_len + 1; ++ fsctx->uri_buf.cur_len += len + 1; + fsctx->uri_buf.buf[fsctx->uri_buf.cur_len - 1] = '/'; + fsctx->uri_buf.buf[fsctx->uri_buf.cur_len] = '\0'; + +@@ -1691,8 +1678,8 @@ + */ + dav_buffer_place_mem(pool, &fsctx->path1, + fsctx->locknull_buf.buf + offset, len + 1, 0); +- dav_fs_append_uri(pool, &fsctx->uri_buf, +- fsctx->locknull_buf.buf + offset, 0); ++ dav_buffer_place_mem(pool, &fsctx->uri_buf, ++ fsctx->locknull_buf.buf + offset, len + 1, 0); + if (fsctx->path2.buf != NULL) { + dav_buffer_place_mem(pool, &fsctx->path2, + fsctx->locknull_buf.buf + offset, diff --git a/SOURCES/httpd-2.4.6-r1610396.patch b/SOURCES/httpd-2.4.6-r1610396.patch new file mode 100644 index 0000000..39edd6b --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1610396.patch @@ -0,0 +1,189 @@ +Index: modules/ldap/util_ldap.c +=================================================================== +--- a/modules/ldap/util_ldap.c (revision 1610395) ++++ b/modules/ldap/util_ldap.c (revision 1610396) +@@ -157,10 +157,12 @@ + */ + if (!ldc->keep) { + uldap_connection_unbind(ldc); ++ ldc->r = NULL; + } + else { + /* mark our connection as available for reuse */ + ldc->freed = apr_time_now(); ++ ldc->r = NULL; + #if APR_HAS_THREADS + apr_thread_mutex_unlock(ldc->lock); + #endif +@@ -179,6 +181,9 @@ + + if (ldc) { + if (ldc->ldap) { ++ if (ldc->r) { ++ ap_log_rerror(APLOG_MARK, APLOG_TRACE5, 0, ldc->r, "LDC %pp unbind", ldc); ++ } + ldap_unbind_s(ldc->ldap); + ldc->ldap = NULL; + } +@@ -319,6 +324,8 @@ + return(result->rc); + } + ++ ap_log_rerror(APLOG_MARK, APLOG_TRACE5, 0, r, "LDC %pp init", ldc); ++ + if (ldc->ChaseReferrals == AP_LDAP_CHASEREFERRALS_ON) { + /* Now that we have an ldap struct, add it to the referral list for rebinds. */ + rc = apr_ldap_rebind_add(ldc->rebind_pool, ldc->ldap, ldc->binddn, ldc->bindpw); +@@ -516,6 +523,10 @@ + ldc->reason = "LDAP: ldap_simple_bind() parse result failed"; + return uldap_ld_errno(ldc); + } ++ else { ++ ldc->last_backend_conn = ldc->r->request_time; ++ ap_log_rerror(APLOG_MARK, APLOG_TRACE5, 0, ldc->r, "LDC %pp bind", ldc); ++ } + return rc; + } + +@@ -540,7 +551,7 @@ + + /* If the connection is already bound, return + */ +- if (ldc->bound) ++ if (ldc->bound && !ldc->must_rebind) + { + ldc->reason = "LDAP: connection open successful (already bound)"; + return LDAP_SUCCESS; +@@ -621,6 +632,7 @@ + } + else { + ldc->bound = 1; ++ ldc->must_rebind = 0; + ldc->reason = "LDAP: connection open successful"; + } + +@@ -718,13 +730,17 @@ + && !compare_client_certs(dc->client_certs, l->client_certs)) + { + if (st->connection_pool_ttl > 0) { +- if (l->bound && (now - l->freed) > st->connection_pool_ttl) { ++ if (l->bound && (now - l->last_backend_conn) > st->connection_pool_ttl) { + ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, + "Removing LDAP connection last used %" APR_TIME_T_FMT " seconds ago", +- (now - l->freed) / APR_USEC_PER_SEC); ++ (now - l->last_backend_conn) / APR_USEC_PER_SEC); ++ l->r = r; + uldap_connection_unbind(l); + /* Go ahead (by falling through) and use it, so we don't create more just to unbind some other old ones */ + } ++ ap_log_rerror(APLOG_MARK, APLOG_TRACE5, 0, r, ++ "Reuse %s LDC %pp", ++ l->bound ? "bound" : "unbound", l); + } + break; + } +@@ -751,12 +767,25 @@ + (l->deref == deref) && (l->secure == secureflag) && + !compare_client_certs(dc->client_certs, l->client_certs)) + { ++ if (st->connection_pool_ttl > 0) { ++ if (l->bound && (now - l->last_backend_conn) > st->connection_pool_ttl) { ++ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, ++ "Removing LDAP connection last used %" APR_TIME_T_FMT " seconds ago", ++ (now - l->last_backend_conn) / APR_USEC_PER_SEC); ++ l->r = r; ++ uldap_connection_unbind(l); ++ /* Go ahead (by falling through) and use it, so we don't create more just to unbind some other old ones */ ++ } ++ ap_log_rerror(APLOG_MARK, APLOG_TRACE5, 0, r, ++ "Reuse %s LDC %pp (will rebind)", ++ l->bound ? "bound" : "unbound", l); ++ } ++ + /* the bind credentials have changed */ +- /* no check for connection_pool_ttl, since we are unbinding any way */ +- uldap_connection_unbind(l); +- ++ l->must_rebind = 1; + util_ldap_strdup((char**)&(l->binddn), binddn); + util_ldap_strdup((char**)&(l->bindpw), bindpw); ++ + break; + } + #if APR_HAS_THREADS +@@ -846,6 +875,7 @@ + #if APR_HAS_THREADS + apr_thread_mutex_unlock(st->mutex); + #endif ++ l->r = r; + return l; + } + +@@ -965,6 +995,7 @@ + return result; + } + ++ ldc->last_backend_conn = r->request_time; + entry = ldap_first_entry(ldc->ldap, res); + searchdn = ldap_get_dn(ldc->ldap, entry); + +@@ -1116,6 +1147,7 @@ + goto start_over; + } + ++ ldc->last_backend_conn = r->request_time; + ldc->reason = "Comparison complete"; + if ((LDAP_COMPARE_TRUE == result) || + (LDAP_COMPARE_FALSE == result) || +@@ -1241,6 +1273,7 @@ + return res; + } + ++ ldc->last_backend_conn = r->request_time; + entry = ldap_first_entry(ldc->ldap, sga_res); + + /* +@@ -1723,6 +1756,7 @@ + * We should have found exactly one entry; to find a different + * number is an error. + */ ++ ldc->last_backend_conn = r->request_time; + count = ldap_count_entries(ldc->ldap, res); + if (count != 1) + { +@@ -1788,10 +1822,10 @@ + /* + * We have just bound the connection to a different user and password + * combination, which might be reused unintentionally next time this +- * connection is used from the connection pool. To ensure no confusion, +- * we mark the connection as unbound. ++ * connection is used from the connection pool. + */ +- ldc->bound = 0; ++ ldc->must_rebind = 0; ++ ap_log_rerror(APLOG_MARK, APLOG_TRACE5, 0, r, "LDC %pp used for authn, must be rebound", ldc); + } + + /* +@@ -1983,6 +2017,7 @@ + * We should have found exactly one entry; to find a different + * number is an error. + */ ++ ldc->last_backend_conn = r->request_time; + count = ldap_count_entries(ldc->ldap, res); + if (count != 1) + { +Index: include/util_ldap.h +=================================================================== +--- a/include/util_ldap.h (revision 1610395) ++++ b/include/util_ldap.h (revision 1610396) +@@ -133,6 +133,9 @@ + int ReferralHopLimit; /* # of referral hops to follow (default = AP_LDAP_DEFAULT_HOPLIMIT) */ + apr_time_t freed; /* the time this conn was placed back in the pool */ + apr_pool_t *rebind_pool; /* frequently cleared pool for rebind data */ ++ int must_rebind; /* The connection was last bound with other then binddn/bindpw */ ++ request_rec *r; /* request_rec used to find this util_ldap_connection_t */ ++ apr_time_t last_backend_conn; /* the approximate time of the last backend LDAP requst */ + } util_ldap_connection_t; + + typedef struct util_ldap_config_t { diff --git a/SOURCES/httpd-2.4.6-r1618851.patch b/SOURCES/httpd-2.4.6-r1618851.patch new file mode 100644 index 0000000..fa84b0c --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1618851.patch @@ -0,0 +1,28 @@ +# ./pullrev.sh r1618851 +http://svn.apache.org/viewvc?view=revision&revision=r1618851 + +--- httpd-2.4.6/modules/aaa/mod_authz_core.c ++++ httpd-2.4.6/modules/aaa/mod_authz_core.c +@@ -168,6 +168,13 @@ + return (void*)conf; + } + ++/* Only per-server directive we have is GLOBAL_ONLY */ ++static void *merge_authz_core_svr_config(apr_pool_t *p, ++ void *basev, void *newv) ++{ ++ return basev; ++} ++ + static void *create_authz_core_svr_config(apr_pool_t *p, server_rec *s) + { + authz_core_srv_conf *authcfg; +@@ -1140,7 +1148,7 @@ AP_DECLARE_MODULE(authz_core) = + create_authz_core_dir_config, /* dir config creater */ + merge_authz_core_dir_config, /* dir merger */ + create_authz_core_svr_config, /* server config */ +- NULL, /* merge server config */ ++ merge_authz_core_svr_config , /* merge server config */ + authz_cmds, + register_hooks /* register hooks */ + }; diff --git a/SOURCES/httpd-2.4.6-r1621601.patch b/SOURCES/httpd-2.4.6-r1621601.patch new file mode 100644 index 0000000..4296804 --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1621601.patch @@ -0,0 +1,21 @@ +--- a/modules/proxy/mod_proxy_http.c 2014/08/31 16:06:36 1621600 ++++ b/modules/proxy/mod_proxy_http.c 2014/08/31 16:07:45 1621601 +@@ -1652,6 +1652,18 @@ + if (!r->header_only && /* not HEAD request */ + (proxy_status != HTTP_NO_CONTENT) && /* not 204 */ + (proxy_status != HTTP_NOT_MODIFIED)) { /* not 304 */ ++ const char *tmp; ++ /* Add minimal headers needed to allow http_in filter ++ * detecting end of body without waiting for a timeout. */ ++ if ((tmp = apr_table_get(r->headers_out, "Transfer-Encoding"))) { ++ apr_table_set(backend->r->headers_in, "Transfer-Encoding", tmp); ++ } ++ else if ((tmp = apr_table_get(r->headers_out, "Content-Length"))) { ++ apr_table_set(backend->r->headers_in, "Content-Length", tmp); ++ } ++ else if (te) { ++ apr_table_set(backend->r->headers_in, "Transfer-Encoding", te); ++ } + ap_discard_request_body(backend->r); + } + return proxy_status; diff --git a/SOURCES/httpd-2.4.6-r1624349.patch b/SOURCES/httpd-2.4.6-r1624349.patch new file mode 100644 index 0000000..c2e93f8 --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1624349.patch @@ -0,0 +1,21 @@ +diff --git a/server/scoreboard.c b/server/scoreboard.c +index bef2b90..c8ef6a3 100644 +--- a/server/scoreboard.c ++++ b/server/scoreboard.c +@@ -484,8 +484,14 @@ static int update_child_status_internal(int child_num, + ws->conn_bytes = 0; + } + if (r) { +- apr_cpystrn(ws->client, ap_get_remote_host(c, r->per_dir_config, +- REMOTE_NOLOOKUP, NULL), sizeof(ws->client)); ++ const char *client = ap_get_remote_host(c, r->per_dir_config, ++ REMOTE_NOLOOKUP, NULL); ++ if (!client || !strcmp(client, c->client_ip)) { ++ apr_cpystrn(ws->client, r->useragent_ip, sizeof(ws->client)); ++ } ++ else { ++ apr_cpystrn(ws->client, client, sizeof(ws->client)); ++ } + copy_request(ws->request, sizeof(ws->request), r); + if (r->server) { + apr_snprintf(ws->vhost, sizeof(ws->vhost), "%s:%d", diff --git a/SOURCES/httpd-2.4.6-r1631119.patch b/SOURCES/httpd-2.4.6-r1631119.patch new file mode 100644 index 0000000..f0f48e4 --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1631119.patch @@ -0,0 +1,16 @@ +# ./pullrev.sh 1631119 +http://svn.apache.org/viewvc?view=revision&revision=1631119 + +https://bugzilla.redhat.com/show_bug.cgi?id=1415257 + +--- httpd-2.4.6/modules/ldap/util_ldap.c ++++ httpd-2.4.6/modules/ldap/util_ldap.c +@@ -1824,7 +1824,7 @@ + * combination, which might be reused unintentionally next time this + * connection is used from the connection pool. + */ +- ldc->must_rebind = 0; ++ ldc->must_rebind = 1; + ap_log_rerror(APLOG_MARK, APLOG_TRACE5, 0, r, "LDC %pp used for authn, must be rebound", ldc); + } + diff --git a/SOURCES/httpd-2.4.6-r1633085.patch b/SOURCES/httpd-2.4.6-r1633085.patch new file mode 100644 index 0000000..ec10a6a --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1633085.patch @@ -0,0 +1,14 @@ +--- a/modules/ssl/ssl_engine_io.c 2014/10/20 09:11:19 1633084 ++++ b/modules/ssl/ssl_engine_io.c 2014/10/20 09:18:22 1633085 +@@ -1322,6 +1322,11 @@ + "\"SSLVerifyClient optional_no_ca\" " + "configuration"); + ssl_log_ssl_error(SSLLOG_MARK, APLOG_INFO, server); ++ ++ /* on session resumption ssl_callback_SSLVerify() ++ * will not be called, therefore we have to set it here ++ */ ++ sslconn->verify_info = "GENEROUS"; + } + else { + const char *error = sslconn->verify_error ? diff --git a/SOURCES/httpd-2.4.6-r1634529.patch b/SOURCES/httpd-2.4.6-r1634529.patch new file mode 100644 index 0000000..9b831c6 --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1634529.patch @@ -0,0 +1,275 @@ +diff --git a/modules/ssl/ssl_engine_init.c b/modules/ssl/ssl_engine_init.c +index 9811af8..568627f 100644 +--- a/modules/ssl/ssl_engine_init.c ++++ b/modules/ssl/ssl_engine_init.c +@@ -276,7 +276,7 @@ int ssl_init_Module(apr_pool_t *p, apr_pool_t *plog, + return HTTP_INTERNAL_SERVER_ERROR; + } + #ifdef HAVE_OCSP_STAPLING +- ssl_stapling_ex_init(); ++ ssl_stapling_certinfo_hash_init(p); + #endif + + /* +@@ -899,6 +899,8 @@ static void ssl_init_ctx(server_rec *s, + } + + static int ssl_server_import_cert(server_rec *s, ++ apr_pool_t *p, ++ apr_pool_t *ptemp, + modssl_ctx_t *mctx, + const char *id, + int idx) +@@ -933,7 +935,7 @@ static int ssl_server_import_cert(server_rec *s, + + #ifdef HAVE_OCSP_STAPLING + if ((mctx->pkp == FALSE) && (mctx->stapling_enabled == TRUE)) { +- if (!ssl_stapling_init_cert(s, mctx, cert)) { ++ if (!ssl_stapling_init_cert(s, p, ptemp, mctx, cert)) { + ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(02235) + "Unable to configure server certificate for stapling"); + } +@@ -1081,10 +1083,10 @@ static void ssl_init_server_certs(server_rec *s, + ecc_id = ssl_asn1_table_keyfmt(ptemp, vhost_id, SSL_AIDX_ECC); + #endif + +- have_rsa = ssl_server_import_cert(s, mctx, rsa_id, SSL_AIDX_RSA); +- have_dsa = ssl_server_import_cert(s, mctx, dsa_id, SSL_AIDX_DSA); ++ have_rsa = ssl_server_import_cert(s, p, ptemp, mctx, rsa_id, SSL_AIDX_RSA); ++ have_dsa = ssl_server_import_cert(s, p, ptemp, mctx, dsa_id, SSL_AIDX_DSA); + #ifndef OPENSSL_NO_EC +- have_ecc = ssl_server_import_cert(s, mctx, ecc_id, SSL_AIDX_ECC); ++ have_ecc = ssl_server_import_cert(s, p, ptemp, mctx, ecc_id, SSL_AIDX_ECC); + #endif + + if (!(have_rsa || have_dsa +diff --git a/modules/ssl/ssl_private.h b/modules/ssl/ssl_private.h +index 80e1e8e..0cc6d3f 100644 +--- a/modules/ssl/ssl_private.h ++++ b/modules/ssl/ssl_private.h +@@ -132,6 +132,13 @@ + #if OPENSSL_VERSION_NUMBER >= 0x00908080 && !defined(OPENSSL_NO_OCSP) \ + && !defined(OPENSSL_NO_TLSEXT) + #define HAVE_OCSP_STAPLING ++/* backward compatibility with OpenSSL < 1.0 */ ++#ifndef sk_OPENSSL_STRING_num ++#define sk_OPENSSL_STRING_num sk_num ++#endif ++#ifndef sk_OPENSSL_STRING_value ++#define sk_OPENSSL_STRING_value sk_value ++#endif + #if (OPENSSL_VERSION_NUMBER < 0x10000000) + #define sk_OPENSSL_STRING_pop sk_pop + #endif +@@ -862,10 +869,10 @@ const char *ssl_cmd_SSLStaplingErrorCacheTimeout(cmd_parms *, void *, const char + const char *ssl_cmd_SSLStaplingReturnResponderErrors(cmd_parms *, void *, int); + const char *ssl_cmd_SSLStaplingFakeTryLater(cmd_parms *, void *, int); + const char *ssl_cmd_SSLStaplingResponderTimeout(cmd_parms *, void *, const char *); +-const char *ssl_cmd_SSLStaplingForceURL(cmd_parms *, void *, const char *); ++const char *ssl_cmd_SSLStaplingForceURL(cmd_parms *, void *, const char *); + void modssl_init_stapling(server_rec *, apr_pool_t *, apr_pool_t *, modssl_ctx_t *); +-void ssl_stapling_ex_init(void); +-int ssl_stapling_init_cert(server_rec *s, modssl_ctx_t *mctx, X509 *x); ++void ssl_stapling_certinfo_hash_init(apr_pool_t *); ++int ssl_stapling_init_cert(server_rec *, apr_pool_t *, apr_pool_t *, modssl_ctx_t *, X509 *); + #endif + #ifndef OPENSSL_NO_SRP + int ssl_callback_SRPServerParams(SSL *, int *, void *); +diff --git a/modules/ssl/ssl_util_stapling.c b/modules/ssl/ssl_util_stapling.c +index 2be2c36..2387ae1 100644 +--- a/modules/ssl/ssl_util_stapling.c ++++ b/modules/ssl/ssl_util_stapling.c +@@ -43,36 +43,32 @@ + + #define MAX_STAPLING_DER 10240 + +-/* Cached info stored in certificate ex_info. */ ++/* Cached info stored in the global stapling_certinfo hash. */ + typedef struct { +- /* Index in session cache SHA1 hash of certificate */ +- UCHAR idx[20]; +- /* Certificate ID for OCSP requests or NULL if ID cannot be determined */ ++ /* Index in session cache (SHA-1 digest of DER encoded certificate) */ ++ UCHAR idx[SHA_DIGEST_LENGTH]; ++ /* Certificate ID for OCSP request */ + OCSP_CERTID *cid; +- /* Responder details */ ++ /* URI of the OCSP responder */ + char *uri; + } certinfo; + +-static void certinfo_free(void *parent, void *ptr, CRYPTO_EX_DATA *ad, +- int idx, long argl, void *argp) ++static apr_status_t ssl_stapling_certid_free(void *data) + { +- certinfo *cinf = ptr; ++ OCSP_CERTID *cid = data; + +- if (!cinf) +- return; +- if (cinf->uri) +- OPENSSL_free(cinf->uri); +- OPENSSL_free(cinf); ++ if (cid) { ++ OCSP_CERTID_free(cid); ++ } ++ ++ return APR_SUCCESS; + } + +-static int stapling_ex_idx = -1; ++static apr_hash_t *stapling_certinfo; + +-void ssl_stapling_ex_init(void) ++void ssl_stapling_certinfo_hash_init(apr_pool_t *p) + { +- if (stapling_ex_idx != -1) +- return; +- stapling_ex_idx = X509_get_ex_new_index(0, "X509 cached OCSP info", 0, 0, +- certinfo_free); ++ stapling_certinfo = apr_hash_make(p); + } + + static X509 *stapling_get_issuer(modssl_ctx_t *mctx, X509 *x) +@@ -106,70 +102,97 @@ static X509 *stapling_get_issuer(modssl_ctx_t *mctx, X509 *x) + + } + +-int ssl_stapling_init_cert(server_rec *s, modssl_ctx_t *mctx, X509 *x) ++int ssl_stapling_init_cert(server_rec *s, apr_pool_t *p, apr_pool_t *ptemp, ++ modssl_ctx_t *mctx, X509 *x) + { +- certinfo *cinf; ++ UCHAR idx[SHA_DIGEST_LENGTH]; ++ certinfo *cinf = NULL; + X509 *issuer = NULL; ++ OCSP_CERTID *cid = NULL; + STACK_OF(OPENSSL_STRING) *aia = NULL; + +- if (x == NULL) ++ if ((x == NULL) || (X509_digest(x, EVP_sha1(), idx, NULL) != 1)) + return 0; +- cinf = X509_get_ex_data(x, stapling_ex_idx); ++ ++ cinf = apr_hash_get(stapling_certinfo, idx, sizeof(idx)); + if (cinf) { +- ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(02215) +- "ssl_stapling_init_cert: certificate already initialized!"); +- return 0; +- } +- cinf = OPENSSL_malloc(sizeof(certinfo)); +- if (!cinf) { +- ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(02216) +- "ssl_stapling_init_cert: error allocating memory!"); +- return 0; ++ /* ++ * We already parsed the certificate, and no OCSP URI was found. ++ * The certificate might be used for multiple vhosts, though, ++ * so we check for a ForceURL for this vhost. ++ */ ++ if (!cinf->uri && !mctx->stapling_force_url) { ++ ssl_log_xerror(SSLLOG_MARK, APLOG_ERR, 0, ptemp, s, x, ++ APLOGNO(02814) "ssl_stapling_init_cert: no OCSP URI " ++ "in certificate and no SSLStaplingForceURL " ++ "configured for server %s", mctx->sc->vhost_id); ++ return 0; ++ } ++ return 1; + } +- cinf->cid = NULL; +- cinf->uri = NULL; +- X509_set_ex_data(x, stapling_ex_idx, cinf); +- +- issuer = stapling_get_issuer(mctx, x); + +- if (issuer == NULL) { +- ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(02217) +- "ssl_stapling_init_cert: Can't retrieve issuer certificate!"); ++ if (!(issuer = stapling_get_issuer(mctx, x))) { ++ ssl_log_xerror(SSLLOG_MARK, APLOG_ERR, 0, ptemp, s, x, APLOGNO(02217) ++ "ssl_stapling_init_cert: can't retrieve issuer " ++ "certificate!"); + return 0; + } + +- cinf->cid = OCSP_cert_to_id(NULL, x, issuer); ++ cid = OCSP_cert_to_id(NULL, x, issuer); + X509_free(issuer); +- if (!cinf->cid) ++ if (!cid) { ++ ssl_log_xerror(SSLLOG_MARK, APLOG_ERR, 0, ptemp, s, x, APLOGNO(02815) ++ "ssl_stapling_init_cert: can't create CertID " ++ "for OCSP request"); + return 0; +- X509_digest(x, EVP_sha1(), cinf->idx, NULL); ++ } + + aia = X509_get1_ocsp(x); +- if (aia) { +- cinf->uri = sk_OPENSSL_STRING_pop(aia); +- X509_email_free(aia); +- } +- if (!cinf->uri && !mctx->stapling_force_url) { +- ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(02218) +- "ssl_stapling_init_cert: no responder URL"); ++ ++ if (!aia && !mctx->stapling_force_url) { ++ OCSP_CERTID_free(cid); ++ ssl_log_xerror(SSLLOG_MARK, APLOG_ERR, 0, ptemp, s, x, ++ APLOGNO(02218) "ssl_stapling_init_cert: no OCSP URI " ++ "in certificate and no SSLStaplingForceURL set"); + return 0; + } ++ ++ /* At this point, we have determined that there's something to store */ ++ cinf = apr_pcalloc(p, sizeof(certinfo)); ++ memcpy (cinf->idx, idx, sizeof(idx)); ++ cinf->cid = cid; ++ /* make sure cid is also freed at pool cleanup */ ++ apr_pool_cleanup_register(p, cid, ssl_stapling_certid_free, ++ apr_pool_cleanup_null); ++ if (aia) { ++ /* allocate uri from the pconf pool */ ++ cinf->uri = apr_pstrdup(p, sk_OPENSSL_STRING_value(aia, 0)); ++ X509_email_free(aia); ++ } ++ ++ ssl_log_xerror(SSLLOG_MARK, APLOG_TRACE1, 0, ptemp, s, x, ++ "ssl_stapling_init_cert: storing certinfo for server %s", ++ mctx->sc->vhost_id); ++ ++ apr_hash_set(stapling_certinfo, cinf->idx, sizeof(cinf->idx), cinf); ++ + return 1; + } + +-static certinfo *stapling_get_cert_info(server_rec *s, modssl_ctx_t *mctx, ++static certinfo *stapling_get_certinfo(server_rec *s, modssl_ctx_t *mctx, + SSL *ssl) + { + certinfo *cinf; + X509 *x; ++ UCHAR idx[SHA_DIGEST_LENGTH]; + x = SSL_get_certificate(ssl); +- if (x == NULL) ++ if ((x == NULL) || (X509_digest(x, EVP_sha1(), idx, NULL) != 1)) + return NULL; +- cinf = X509_get_ex_data(x, stapling_ex_idx); ++ cinf = apr_hash_get(stapling_certinfo, idx, sizeof(idx)); + if (cinf && cinf->cid) + return cinf; + ap_log_error(APLOG_MARK, APLOG_INFO, 0, s, APLOGNO(01926) +- "stapling_get_cert_info: stapling not supported for certificate"); ++ "stapling_get_certinfo: stapling not supported for certificate"); + return NULL; + } + +@@ -585,7 +608,7 @@ static int stapling_cb(SSL *ssl, void *arg) + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(01951) + "stapling_cb: OCSP Stapling callback called"); + +- cinf = stapling_get_cert_info(s, mctx, ssl); ++ cinf = stapling_get_certinfo(s, mctx, ssl); + if (cinf == NULL) { + return SSL_TLSEXT_ERR_NOACK; + } diff --git a/SOURCES/httpd-2.4.6-r1650310.patch b/SOURCES/httpd-2.4.6-r1650310.patch new file mode 100644 index 0000000..1561282 --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1650310.patch @@ -0,0 +1,132 @@ +diff --git a/docs/manual/mod/mod_ssl.html.en b/docs/manual/mod/mod_ssl.html.en +index ca178ab..4580f1c 100644 +--- a/docs/manual/mod/mod_ssl.html.en ++++ b/docs/manual/mod/mod_ssl.html.en +@@ -57,6 +57,7 @@ to provide the cryptography engine.

    +
  • SSLCertificateKeyFile
  • +
  • SSLCipherSuite
  • +
  • SSLCompression
  • ++
  • SSLSessionTickets
  • +
  • SSLCryptoDevice
  • +
  • SSLEngine
  • +
  • SSLFIPS
  • +@@ -797,6 +798,26 @@ CRIME attack).

    + + + ++ ++
    top
    ++
    Description:Configures mutex mechanism and lock file directory for all +diff --git a/include/http_core.h b/include/http_core.h +index c05d06e..76bf5a4 100644 +--- a/include/http_core.h ++++ b/include/http_core.h +@@ -465,6 +465,17 @@ typedef unsigned long etag_components_t; + /* This is the default value used */ + #define ETAG_BACKWARD (ETAG_MTIME | ETAG_SIZE) + ++/* Generic ON/OFF/UNSET for unsigned int foo :2 */ ++#define AP_CORE_CONFIG_OFF (0) ++#define AP_CORE_CONFIG_ON (1) ++#define AP_CORE_CONFIG_UNSET (2) ++ ++/* Generic merge of flag */ ++#define AP_CORE_MERGE_FLAG(field, to, base, over) to->field = \ ++ over->field != AP_CORE_CONFIG_UNSET \ ++ ? over->field \ ++ : base->field ++ + /** + * @brief Server Signature Enumeration + */ +@@ -682,7 +693,7 @@ typedef struct { + #define AP_HTTP_METHODS_LENIENT 1 + #define AP_HTTP_METHODS_REGISTERED 2 + char http_methods; +- ++ unsigned int merge_slashes; + } core_server_config; + + /* for AddOutputFiltersByType in core.c */ +diff --git a/include/httpd.h b/include/httpd.h +index 176ef5e..a552358 100644 +--- a/include/httpd.h ++++ b/include/httpd.h +@@ -1622,11 +1622,21 @@ AP_DECLARE(int) ap_unescape_url_keep2f(char *url, int decode_slashes); + AP_DECLARE(int) ap_unescape_urlencoded(char *query); + + /** +- * Convert all double slashes to single slashes +- * @param name The string to convert ++ * Convert all double slashes to single slashes, except where significant ++ * to the filesystem on the current platform. ++ * @param name The string to convert, assumed to be a filesystem path + */ + AP_DECLARE(void) ap_no2slash(char *name); + ++/** ++ * Convert all double slashes to single slashes, except where significant ++ * to the filesystem on the current platform. ++ * @param name The string to convert ++ * @param is_fs_path if set to 0, the significance of any double-slashes is ++ * ignored. ++ */ ++AP_DECLARE(void) ap_no2slash_ex(char *name, int is_fs_path); ++ + /** + * Remove all ./ and xx/../ substrings from a file name. Also remove + * any leading ../ or /../ substrings. +diff --git a/server/core.c b/server/core.c +index 0e69f8c..67efd7e 100644 +--- a/server/core.c ++++ b/server/core.c +@@ -476,6 +476,7 @@ static void *create_core_server_config(apr_pool_t *a, server_rec *s) + */ + + conf->trace_enable = AP_TRACE_UNSET; ++ conf->merge_slashes = AP_CORE_CONFIG_UNSET; + + return (void *)conf; + } +@@ -536,6 +537,8 @@ static void *merge_core_server_configs(apr_pool_t *p, void *basev, void *virtv) + ? virt->merge_trailers + : base->merge_trailers; + ++ AP_CORE_MERGE_FLAG(merge_slashes, conf, base, virt); ++ + return conf; + } + +@@ -1673,6 +1676,13 @@ static const char *set_override(cmd_parms *cmd, void *d_, const char *l) + return NULL; + } + ++static const char *set_core_server_flag(cmd_parms *cmd, void *s_, int flag) ++{ ++ core_server_config *conf = ++ ap_get_core_module_config(cmd->server->module_config); ++ return ap_set_flag_slot(cmd, conf, flag); ++} ++ + static const char *set_override_list(cmd_parms *cmd, void *d_, int argc, char *const argv[]) + { + core_dir_config *d = d_; +@@ -4216,6 +4226,10 @@ AP_INIT_ITERATE("HttpProtocolOptions", set_http_protocol_options, NULL, RSRC_CON + , + AP_INIT_ITERATE("RegisterHttpMethod", set_http_method, NULL, RSRC_CONF, + "Registers non-standard HTTP methods"), ++AP_INIT_FLAG("MergeSlashes", set_core_server_flag, ++ (void *)APR_OFFSETOF(core_server_config, merge_slashes), ++ RSRC_CONF, ++ "Controls whether consecutive slashes in the URI path are merged"), + { NULL } + }; + +diff --git a/server/request.c b/server/request.c +index 4eef097..cba3891 100644 +--- a/server/request.c ++++ b/server/request.c +@@ -167,6 +167,8 @@ AP_DECLARE(int) ap_process_request_internal(request_rec *r) + int file_req = (r->main && r->filename); + int access_status; + core_dir_config *d; ++ core_server_config *sconf = ++ ap_get_core_module_config(r->server->module_config); + + /* Ignore embedded %2F's in path for proxy requests */ + if (!r->proxyreq && r->parsed_uri.path) { +@@ -191,6 +193,12 @@ AP_DECLARE(int) ap_process_request_internal(request_rec *r) + } + + ap_getparents(r->uri); /* OK --- shrinking transformations... */ ++ if (sconf->merge_slashes != AP_CORE_CONFIG_OFF) { ++ ap_no2slash(r->uri); ++ if (r->parsed_uri.path) { ++ ap_no2slash(r->parsed_uri.path); ++ } ++ } + + /* All file subrequests are a huge pain... they cannot bubble through the + * next several steps. Only file subrequests are allowed an empty uri, +@@ -1383,20 +1391,7 @@ AP_DECLARE(int) ap_location_walk(request_rec *r) + + cache = prep_walk_cache(AP_NOTE_LOCATION_WALK, r); + cached = (cache->cached != NULL); +- +- /* Location and LocationMatch differ on their behaviour w.r.t. multiple +- * slashes. Location matches multiple slashes with a single slash, +- * LocationMatch doesn't. An exception, for backwards brokenness is +- * absoluteURIs... in which case neither match multiple slashes. +- */ +- if (r->uri[0] != '/') { +- entry_uri = r->uri; +- } +- else { +- char *uri = apr_pstrdup(r->pool, r->uri); +- ap_no2slash(uri); +- entry_uri = uri; +- } ++ entry_uri = r->uri; + + /* If we have an cache->cached location that matches r->uri, + * and the vhost's list of locations hasn't changed, we can skip +@@ -1449,7 +1444,7 @@ AP_DECLARE(int) ap_location_walk(request_rec *r) + * terminated (or at the end of the string) to match. + */ + if (entry_core->r +- ? ap_regexec(entry_core->r, r->uri, 0, NULL, 0) ++ ? ap_regexec(entry_core->r, entry_uri, 0, NULL, 0) + : (entry_core->d_is_fnmatch + ? apr_fnmatch(entry_core->d, cache->cached, APR_FNM_PATHNAME) + : (strncmp(entry_core->d, cache->cached, len) +diff --git a/server/util.c b/server/util.c +index f9e3b51..4eac462 100644 +--- a/server/util.c ++++ b/server/util.c +@@ -561,16 +561,20 @@ AP_DECLARE(void) ap_getparents(char *name) + name[l] = '\0'; + } + } +- +-AP_DECLARE(void) ap_no2slash(char *name) ++AP_DECLARE(void) ap_no2slash_ex(char *name, int is_fs_path) + { ++ + char *d, *s; + ++ if (!*name) { ++ return; ++ } ++ + s = d = name; + + #ifdef HAVE_UNC_PATHS + /* Check for UNC names. Leave leading two slashes. */ +- if (s[0] == '/' && s[1] == '/') ++ if (is_fs_path && s[0] == '/' && s[1] == '/') + *d++ = *s++; + #endif + +@@ -587,6 +591,10 @@ AP_DECLARE(void) ap_no2slash(char *name) + *d = '\0'; + } + ++AP_DECLARE(void) ap_no2slash(char *name) ++{ ++ ap_no2slash_ex(name, 1); ++} + + /* + * copy at most n leading directories of s into d diff --git a/SOURCES/httpd-2.4.6-ab-overflow.patch b/SOURCES/httpd-2.4.6-ab-overflow.patch new file mode 100644 index 0000000..91a76b2 --- /dev/null +++ b/SOURCES/httpd-2.4.6-ab-overflow.patch @@ -0,0 +1,20 @@ +--- a/support/ab.c 2014/08/14 12:12:38 1617912 ++++ b/support/ab.c 2014/08/14 12:15:31 1617913 +@@ -1029,7 +1029,7 @@ + ap_round_ms(stats[done - 1].time)); + else + printf(" %d%% %5" APR_TIME_T_FMT "\n", percs[i], +- ap_round_ms(stats[(int) (done * percs[i] / 100)].time)); ++ ap_round_ms(stats[(unsigned long)done * percs[i] / 100].time)); + } + } + if (csvperc) { +@@ -1046,7 +1046,7 @@ + else if (i == 100) + t = ap_double_ms(stats[done - 1].time); + else +- t = ap_double_ms(stats[(int) (0.5 + done * i / 100.0)].time); ++ t = ap_double_ms(stats[(unsigned long) (0.5 + (double)done * i / 100.0)].time); + fprintf(out, "%d,%.3f\n", i, t); + } + fclose(out); diff --git a/SOURCES/httpd-2.4.6-ab-ssl-error.patch b/SOURCES/httpd-2.4.6-ab-ssl-error.patch new file mode 100644 index 0000000..8f169f7 --- /dev/null +++ b/SOURCES/httpd-2.4.6-ab-ssl-error.patch @@ -0,0 +1,27 @@ +diff --git a/support/ab.c b/support/ab.c +index bf76406..80c1b74 100644 +--- a/support/ab.c ++++ b/support/ab.c +@@ -1346,11 +1346,21 @@ static void read_connection(struct connection * c) + && good == 0) { + return; + } ++ else if (scode == SSL_ERROR_SYSCALL ++ && status == 0 ++ && c->read != 0) { ++ /* connection closed, but in violation of the protocol, after ++ * some data has already been read; this commonly happens, so ++ * let the length check catch any response errors ++ */ ++ good++; ++ close_connection(c); ++ } + else if (scode != SSL_ERROR_WANT_WRITE + && scode != SSL_ERROR_WANT_READ) { + /* some fatal error: */ + c->read = 0; +- BIO_printf(bio_err, "SSL read failed (%d) - closing connection\n", scode); ++ BIO_printf(bio_err, "SSL read failed (%d) - closing connection\n", scode); + ERR_print_errors(bio_err); + close_connection(c); + } diff --git a/SOURCES/httpd-2.4.6-ap-ipv6.patch b/SOURCES/httpd-2.4.6-ap-ipv6.patch new file mode 100644 index 0000000..e1eba14 --- /dev/null +++ b/SOURCES/httpd-2.4.6-ap-ipv6.patch @@ -0,0 +1,139 @@ +diff --git a/support/ab.c b/support/ab.c +index f54c402..93c9066 100644 +--- a/support/ab.c ++++ b/support/ab.c +@@ -344,6 +344,7 @@ apr_time_t start, lasttime, stoptime; + char _request[2048]; + char *request = _request; + apr_size_t reqlen; ++int requests_initialized = 0; + + /* one global throw-away buffer to read stuff into */ + char buffer[8192]; +@@ -1253,12 +1254,18 @@ static void start_connect(struct connection * c) + else { + set_conn_state(c, STATE_UNCONNECTED); + apr_socket_close(c->aprsock); +- err_conn++; +- if (bad++ > 10) { ++ if (good == 0 && destsa->next) { ++ destsa = destsa->next; ++ err_conn = 0; ++ } ++ else if (bad++ > 10) { + fprintf(stderr, + "\nTest aborted after 10 failures\n\n"); + apr_err("apr_socket_connect()", rv); + } ++ else { ++ err_conn++; ++ } + + start_connect(c); + return; +@@ -1339,6 +1346,7 @@ static void read_connection(struct connection * c) + apr_status_t status; + char *part; + char respcode[4]; /* 3 digits and null */ ++ int i; + + r = sizeof(buffer); + #ifdef USE_SSL +@@ -1362,6 +1370,13 @@ static void read_connection(struct connection * c) + good++; + close_connection(c); + } ++ else if (scode == SSL_ERROR_SYSCALL ++ && c->read == 0 ++ && destsa->next ++ && c->state == STATE_CONNECTING ++ && good == 0) { ++ return; ++ } + else if (scode != SSL_ERROR_WANT_WRITE + && scode != SSL_ERROR_WANT_READ) { + /* some fatal error: */ +@@ -1387,8 +1402,8 @@ static void read_connection(struct connection * c) + } + /* catch legitimate fatal apr_socket_recv errors */ + else if (status != APR_SUCCESS) { +- err_recv++; + if (recverrok) { ++ err_recv++; + bad++; + close_connection(c); + if (verbosity >= 1) { +@@ -1396,7 +1411,12 @@ static void read_connection(struct connection * c) + fprintf(stderr,"%s: %s (%d)\n", "apr_socket_recv", apr_strerror(status, buf, sizeof buf), status); + } + return; +- } else { ++ } else if (destsa->next && c->state == STATE_CONNECTING ++ && c->read == 0 && good == 0) { ++ return; ++ } ++ else { ++ err_recv++; + apr_err("apr_socket_recv", status); + } + } +@@ -1523,6 +1543,16 @@ static void read_connection(struct connection * c) + } + c->bread += c->cbx - (s + l - c->cbuff) + r - tocopy; + totalbread += c->bread; ++ ++ /* We have received the header, so we know this destination socket ++ * address is working, so initialize all remaining requests. */ ++ if (!requests_initialized) { ++ for (i = 1; i < concurrency; i++) { ++ con[i].socknum = i; ++ start_connect(&con[i]); ++ } ++ requests_initialized = 1; ++ } + } + } + else { +@@ -1734,11 +1764,10 @@ static void test(void) + apr_signal(SIGINT, output_results); + #endif + +- /* initialise lots of requests */ +- for (i = 0; i < concurrency; i++) { +- con[i].socknum = i; +- start_connect(&con[i]); +- } ++ /* initialise first connection to determine destination socket address ++ * which should be used for next connections. */ ++ con[0].socknum = 0; ++ start_connect(&con[0]); + + do { + apr_int32_t n; +@@ -1786,14 +1815,20 @@ static void test(void) + if ((rtnev & APR_POLLIN) || (rtnev & APR_POLLPRI) || (rtnev & APR_POLLHUP)) + read_connection(c); + if ((rtnev & APR_POLLERR) || (rtnev & APR_POLLNVAL)) { +- bad++; +- err_except++; +- /* avoid apr_poll/EINPROGRESS loop on HP-UX, let recv discover ECONNREFUSED */ +- if (c->state == STATE_CONNECTING) { +- read_connection(c); ++ if (destsa->next && c->state == STATE_CONNECTING && good == 0) { ++ destsa = destsa->next; ++ start_connect(c); + } + else { +- start_connect(c); ++ bad++; ++ err_except++; ++ /* avoid apr_poll/EINPROGRESS loop on HP-UX, let recv discover ECONNREFUSED */ ++ if (c->state == STATE_CONNECTING) { ++ read_connection(c); ++ } ++ else { ++ start_connect(c); ++ } + } + continue; + } diff --git a/SOURCES/httpd-2.4.6-apachectl-httpd-env.patch b/SOURCES/httpd-2.4.6-apachectl-httpd-env.patch new file mode 100644 index 0000000..3e884b9 --- /dev/null +++ b/SOURCES/httpd-2.4.6-apachectl-httpd-env.patch @@ -0,0 +1,38 @@ +diff --git a/docs/man/apachectl.8 b/docs/man/apachectl.8 +index 054550f..4bfc7cb 100644 +--- a/docs/man/apachectl.8 ++++ b/docs/man/apachectl.8 +@@ -77,7 +77,7 @@ status + Displays a brief status report\&. Similar to the fullstatus option, except that the list of requests currently being served is omitted\&. + .TP + graceful +-Gracefully restarts the Apache httpd daemon\&. If the daemon is not running, it is started\&. This differs from a normal restart in that currently open connections are not aborted\&. A side effect is that old log files will not be closed immediately\&. This means that if used in a log rotation script, a substantial delay may be necessary to ensure that the old log files are closed before processing them\&. This command automatically checks the configuration files as in configtest before initiating the restart to make sure Apache doesn't die\&. This is equivalent to apachectl -k graceful\&. ++Gracefully restarts the Apache httpd daemon\&. If the daemon is not running, it is not started\&. This differs from a normal restart in that currently open connections are not aborted\&. A side effect is that old log files will not be closed immediately\&. This means that if used in a log rotation script, a substantial delay may be necessary to ensure that the old log files are closed before processing them\&. This command automatically checks the configuration files as in configtest before initiating the restart to make sure Apache doesn't die\&. This is equivalent to apachectl -k graceful\&. + .TP + graceful-stop + Gracefully stops the Apache httpd daemon\&. This differs from a normal stop in that currently open connections are not aborted\&. A side effect is that old log files will not be closed immediately\&. This is equivalent to apachectl -k graceful-stop\&. +diff --git a/support/apachectl.in b/support/apachectl.in +index 2d59623..10fc280 100644 +--- a/support/apachectl.in ++++ b/support/apachectl.in +@@ -93,9 +93,9 @@ function testconfig() { + # httpd is denied terminal access in SELinux, so run in the + # current context to get stdout from $HTTPD -t. + if test -x /usr/sbin/selinuxenabled && /usr/sbin/selinuxenabled; then +- runcon -- `id -Z` $HTTPD $OPTIONS -t ++ runcon -- `id -Z` /usr/sbin/httpd $OPTIONS -t + else +- $HTTPD $OPTIONS -t ++ /usr/sbin/httpd $OPTIONS -t + fi + ERROR=$? + } +@@ -134,7 +134,7 @@ fullstatus) + $LYNX $STATUSURL + ;; + *) +- $HTTPD $OPTIONS "$@" ++ /usr/sbin/httpd $OPTIONS "$@" + ERROR=$? + esac + diff --git a/SOURCES/httpd-2.4.6-apachectl-status.patch b/SOURCES/httpd-2.4.6-apachectl-status.patch new file mode 100644 index 0000000..779a9ab --- /dev/null +++ b/SOURCES/httpd-2.4.6-apachectl-status.patch @@ -0,0 +1,13 @@ +diff --git a/docs/man/apachectl.8 b/docs/man/apachectl.8 +index 4bfc7cb..372c08e 100644 +--- a/docs/man/apachectl.8 ++++ b/docs/man/apachectl.8 +@@ -74,7 +74,7 @@ fullstatus + Displays a full status report from mod_status\&. For this to work, you need to have mod_status enabled on your server and a text-based browser such as lynx available on your system\&. The URL used to access the status report can be set by editing the STATUSURL variable in the script\&. + .TP + status +-Displays a brief status report\&. Similar to the fullstatus option, except that the list of requests currently being served is omitted\&. ++Displays a brief status report using systemd\&. + .TP + graceful + Gracefully restarts the Apache httpd daemon\&. If the daemon is not running, it is not started\&. This differs from a normal restart in that currently open connections are not aborted\&. A side effect is that old log files will not be closed immediately\&. This means that if used in a log rotation script, a substantial delay may be necessary to ensure that the old log files are closed before processing them\&. This command automatically checks the configuration files as in configtest before initiating the restart to make sure Apache doesn't die\&. This is equivalent to apachectl -k graceful\&. diff --git a/SOURCES/httpd-2.4.6-bomb.patch b/SOURCES/httpd-2.4.6-bomb.patch new file mode 100644 index 0000000..42dbb93 --- /dev/null +++ b/SOURCES/httpd-2.4.6-bomb.patch @@ -0,0 +1,14 @@ +diff --git a/docs/conf/extra/httpd-autoindex.conf.in b/docs/conf/extra/httpd-autoindex.conf.in +index 0e8b626..dd6f2c6 100644 +--- a/docs/conf/extra/httpd-autoindex.conf.in ++++ b/docs/conf/extra/httpd-autoindex.conf.in +@@ -53,7 +53,8 @@ AddIcon /icons/dvi.gif .dvi + AddIcon /icons/uuencoded.gif .uu + AddIcon /icons/script.gif .conf .sh .shar .csh .ksh .tcl + AddIcon /icons/tex.gif .tex +-AddIcon /icons/bomb.gif core. ++AddIcon /icons/bomb.gif /core ++AddIcon /icons/bomb.gif */core.* + + AddIcon /icons/back.gif .. + AddIcon /icons/hand.right.gif README diff --git a/SOURCES/httpd-2.4.6-default-port-worker.patch b/SOURCES/httpd-2.4.6-default-port-worker.patch new file mode 100644 index 0000000..25315e2 --- /dev/null +++ b/SOURCES/httpd-2.4.6-default-port-worker.patch @@ -0,0 +1,13 @@ +diff --git a/modules/proxy/proxy_util.c b/modules/proxy/proxy_util.c +index e672e4a..8be833a 100644 +--- a/modules/proxy/proxy_util.c ++++ b/modules/proxy/proxy_util.c +@@ -3557,6 +3557,8 @@ static proxy_schemes_t pschemes[] = + {"fcgi", 8000}, + {"ajp", AJP13_DEF_PORT}, + {"scgi", 4000}, ++ {"ws", 80}, ++ {"wss", 443}, + { NULL, 0xFFFF } /* unknown port */ + }; + diff --git a/SOURCES/httpd-2.4.6-dhparams-free.patch b/SOURCES/httpd-2.4.6-dhparams-free.patch new file mode 100644 index 0000000..2319ff8 --- /dev/null +++ b/SOURCES/httpd-2.4.6-dhparams-free.patch @@ -0,0 +1,31 @@ +diff --git a/modules/ssl/ssl_engine_init.c b/modules/ssl/ssl_engine_init.c +index ee46db4..c560422 100644 +--- a/modules/ssl/ssl_engine_init.c ++++ b/modules/ssl/ssl_engine_init.c +@@ -915,7 +915,7 @@ static void ssl_init_server_certs(server_rec *s, + const char *rsa_id, *dsa_id; + #ifndef OPENSSL_NO_EC + const char *ecc_id; +- EC_GROUP *ecparams; ++ EC_GROUP *ecparams = NULL; + int nid; + EC_KEY *eckey = NULL; + #endif +@@ -988,6 +988,7 @@ static void ssl_init_server_certs(server_rec *s, + "Custom DH parameters (%d bits) for %s loaded from %s", + BN_num_bits(dhparams->p), vhost_id, + mctx->pks->cert_files[0]); ++ DH_free(dhparams); + } + + #ifndef OPENSSL_NO_EC +@@ -1012,6 +1013,9 @@ static void ssl_init_server_certs(server_rec *s, + SSL_CTX_set_tmp_ecdh(mctx->ssl_ctx, eckey); + #endif + } ++ if (ecparams) { ++ EC_GROUP_free(ecparams); ++ } + EC_KEY_free(eckey); + #endif + } diff --git a/SOURCES/httpd-2.4.6-full-release.patch b/SOURCES/httpd-2.4.6-full-release.patch new file mode 100644 index 0000000..118c57c --- /dev/null +++ b/SOURCES/httpd-2.4.6-full-release.patch @@ -0,0 +1,48 @@ +diff --git a/server/core.c b/server/core.c +index fb5e34a..e71f716 100644 +--- a/server/core.c ++++ b/server/core.c +@@ -3115,7 +3115,8 @@ enum server_token_type { + SrvTk_MINIMAL, /* eg: Apache/2.0.41 */ + SrvTk_OS, /* eg: Apache/2.0.41 (UNIX) */ + SrvTk_FULL, /* eg: Apache/2.0.41 (UNIX) PHP/4.2.2 FooBar/1.2b */ +- SrvTk_PRODUCT_ONLY /* eg: Apache */ ++ SrvTk_FULL_RELEASE, /* eg: Apache/2.0.41 (UNIX) (Release 32.el7) PHP/4.2.2 FooBar/1.2b */ ++ SrvTk_PRODUCT_ONLY /* eg: Apache */ + }; + static enum server_token_type ap_server_tokens = SrvTk_FULL; + +@@ -3191,7 +3192,10 @@ static void set_banner(apr_pool_t *pconf) + else if (ap_server_tokens == SrvTk_MAJOR) { + ap_add_version_component(pconf, AP_SERVER_BASEPRODUCT "/" AP_SERVER_MAJORVERSION); + } +- else { ++ else if (ap_server_tokens == SrvTk_FULL_RELEASE) { ++ ap_add_version_component(pconf, AP_SERVER_BASEVERSION " (" PLATFORM ") (Release @RELEASE@)"); ++ } ++ else { + ap_add_version_component(pconf, AP_SERVER_BASEVERSION " (" PLATFORM ")"); + } + +@@ -3199,7 +3203,7 @@ static void set_banner(apr_pool_t *pconf) + * Lock the server_banner string if we're not displaying + * the full set of tokens + */ +- if (ap_server_tokens != SrvTk_FULL) { ++ if (ap_server_tokens != SrvTk_FULL && ap_server_tokens != SrvTk_FULL_RELEASE) { + banner_locked++; + } + server_description = AP_SERVER_BASEVERSION " (" PLATFORM ")"; +@@ -3232,8 +3236,11 @@ static const char *set_serv_tokens(cmd_parms *cmd, void *dummy, + else if (!strcasecmp(arg1, "Full")) { + ap_server_tokens = SrvTk_FULL; + } ++ else if (!strcasecmp(arg1, "Full-Release")) { ++ ap_server_tokens = SrvTk_FULL_RELEASE; ++ } + else { +- return "ServerTokens takes 1 argument, 'Prod', 'Major', 'Minor', 'Min', 'OS', or 'Full'"; ++ return "ServerTokens takes 1 argument, 'Prod', 'Major', 'Minor', 'Min', 'OS', 'Full' or 'Full-Release'"; + } + + return NULL; diff --git a/SOURCES/httpd-2.4.6-http-protocol-options-define.patch b/SOURCES/httpd-2.4.6-http-protocol-options-define.patch new file mode 100644 index 0000000..eb6e5e5 --- /dev/null +++ b/SOURCES/httpd-2.4.6-http-protocol-options-define.patch @@ -0,0 +1,17 @@ +diff --git a/server/main.c b/server/main.c +index 28d1872..544882d 100644 +--- a/server/main.c ++++ b/server/main.c +@@ -478,6 +478,12 @@ int main(int argc, const char * const argv[]) + ap_server_post_read_config = apr_array_make(pcommands, 1, sizeof(char *)); + ap_server_config_defines = apr_array_make(pcommands, 1, sizeof(char *)); + ++ { ++ char **new = (char **)apr_array_push(ap_server_config_defines); ++ ++ *new = "_RH_HAS_HTTPPROTOCOLOPTIONS"; ++ } ++ + error = ap_setup_prelinked_modules(process); + if (error) { + ap_log_error(APLOG_MARK, APLOG_STARTUP|APLOG_EMERG, 0, NULL, APLOGNO(00012) diff --git a/SOURCES/httpd-2.4.6-ldaprefer.patch b/SOURCES/httpd-2.4.6-ldaprefer.patch new file mode 100644 index 0000000..29b79c9 --- /dev/null +++ b/SOURCES/httpd-2.4.6-ldaprefer.patch @@ -0,0 +1,73 @@ +http://svn.apache.org/viewvc/httpd/httpd/trunk/modules/ldap/util_ldap.c?r1=1517388&r2=1517387&pathrev=1517388&view=patch + +--- trunk/modules/ldap/util_ldap.c 2013/08/25 21:46:27 1517387 ++++ trunk/modules/ldap/util_ldap.c 2013/08/25 22:42:29 1517388 +@@ -60,6 +60,7 @@ + #endif + + #define AP_LDAP_HOPLIMIT_UNSET -1 ++#define AP_LDAP_CHASEREFERRALS_SDKDEFAULT -1 + #define AP_LDAP_CHASEREFERRALS_OFF 0 + #define AP_LDAP_CHASEREFERRALS_ON 1 + +@@ -371,7 +372,7 @@ + ldap_option = ldc->deref; + ldap_set_option(ldc->ldap, LDAP_OPT_DEREF, &ldap_option); + +- if (ldc->ChaseReferrals == AP_LDAP_CHASEREFERRALS_ON) { ++ if (ldc->ChaseReferrals != AP_LDAP_CHASEREFERRALS_SDKDEFAULT) { + /* Set options for rebind and referrals. */ + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, APLOGNO(01278) + "LDAP: Setting referrals to %s.", +@@ -391,7 +392,9 @@ + uldap_connection_unbind(ldc); + return(result->rc); + } ++ } + ++ if (ldc->ChaseReferrals == AP_LDAP_CHASEREFERRALS_ON) { + if ((ldc->ReferralHopLimit != AP_LDAP_HOPLIMIT_UNSET) && ldc->ChaseReferrals == AP_LDAP_CHASEREFERRALS_ON) { + /* Referral hop limit - only if referrals are enabled and a hop limit is explicitly requested */ + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, APLOGNO(01280) +@@ -2584,15 +2587,25 @@ + + static const char *util_ldap_set_chase_referrals(cmd_parms *cmd, + void *config, +- int mode) ++ const char *arg) + { + util_ldap_config_t *dc = config; + + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, cmd->server, APLOGNO(01311) +- "LDAP: Setting referral chasing %s", +- (mode == AP_LDAP_CHASEREFERRALS_ON) ? "ON" : "OFF"); ++ "LDAP: Setting referral chasing %s", arg); + +- dc->ChaseReferrals = mode; ++ if (0 == strcasecmp(arg, "on")) { ++ dc->ChaseReferrals = AP_LDAP_CHASEREFERRALS_ON; ++ } ++ else if (0 == strcasecmp(arg, "off")) { ++ dc->ChaseReferrals = AP_LDAP_CHASEREFERRALS_OFF; ++ } ++ else if (0 == strcasecmp(arg, "default")) { ++ dc->ChaseReferrals = AP_LDAP_CHASEREFERRALS_SDKDEFAULT; ++ } ++ else { ++ return "LDAPReferrals must be 'on', 'off', or 'default'"; ++ } + + return(NULL); + } +@@ -3116,9 +3129,9 @@ + "Specify the LDAP socket connection timeout in seconds " + "(default: 10)"), + +- AP_INIT_FLAG("LDAPReferrals", util_ldap_set_chase_referrals, ++ AP_INIT_TAKE1("LDAPReferrals", util_ldap_set_chase_referrals, + NULL, OR_AUTHCFG, +- "Choose whether referrals are chased ['ON'|'OFF']. Default 'ON'"), ++ "Choose whether referrals are chased ['ON'|'OFF'|'DEFAULT']. Default 'ON'"), + + AP_INIT_TAKE1("LDAPReferralHopLimit", util_ldap_set_referral_hop_limit, + NULL, OR_AUTHCFG, diff --git a/SOURCES/httpd-2.4.6-mod_authz_dbd-missing-query.patch b/SOURCES/httpd-2.4.6-mod_authz_dbd-missing-query.patch new file mode 100644 index 0000000..a763f89 --- /dev/null +++ b/SOURCES/httpd-2.4.6-mod_authz_dbd-missing-query.patch @@ -0,0 +1,55 @@ +diff --git a/modules/aaa/mod_authz_dbd.c b/modules/aaa/mod_authz_dbd.c +index 1a456fe..6a0f705 100644 +--- a/modules/aaa/mod_authz_dbd.c ++++ b/modules/aaa/mod_authz_dbd.c +@@ -116,7 +116,7 @@ static int authz_dbd_login(request_rec *r, authz_dbd_cfg *cfg, + const char *newuri = NULL; + int nrows; + const char *message; +- ap_dbd_t *dbd = dbd_handle(r); ++ ap_dbd_t *dbd; + apr_dbd_prepared_t *query; + apr_dbd_results_t *res = NULL; + apr_dbd_row_t *row = NULL; +@@ -126,6 +126,16 @@ static int authz_dbd_login(request_rec *r, authz_dbd_cfg *cfg, + "No query configured for %s!", action); + return HTTP_INTERNAL_SERVER_ERROR; + } ++ ++ dbd = dbd_handle(r); ++ if (dbd == NULL) { ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02902) ++ "No db handle available for %s! " ++ "Check your database access", ++ action); ++ return HTTP_INTERNAL_SERVER_ERROR; ++ } ++ + query = apr_hash_get(dbd->prepared, cfg->query, APR_HASH_KEY_STRING); + if (query == NULL) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01643) +@@ -202,7 +212,7 @@ static int authz_dbd_group_query(request_rec *r, authz_dbd_cfg *cfg, + /* SELECT group FROM authz WHERE user = %s */ + int rv; + const char *message; +- ap_dbd_t *dbd = dbd_handle(r); ++ ap_dbd_t *dbd; + apr_dbd_prepared_t *query; + apr_dbd_results_t *res = NULL; + apr_dbd_row_t *row = NULL; +@@ -212,6 +222,15 @@ static int authz_dbd_group_query(request_rec *r, authz_dbd_cfg *cfg, + "No query configured for dbd-group!"); + return HTTP_INTERNAL_SERVER_ERROR; + } ++ ++ dbd = dbd_handle(r); ++ if (dbd == NULL) { ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02903) ++ "No db handle available for dbd-query! " ++ "Check your database access"); ++ return HTTP_INTERNAL_SERVER_ERROR; ++ } ++ + query = apr_hash_get(dbd->prepared, cfg->query, APR_HASH_KEY_STRING); + if (query == NULL) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01650) diff --git a/SOURCES/httpd-2.4.6-mpm-segfault.patch b/SOURCES/httpd-2.4.6-mpm-segfault.patch new file mode 100644 index 0000000..55a6d08 --- /dev/null +++ b/SOURCES/httpd-2.4.6-mpm-segfault.patch @@ -0,0 +1,21 @@ +--- a/server/mpm/event/event.c ++++ a/server/mpm/event/event.c +@@ -2735,6 +2735,7 @@ static int event_run(apr_pool_t * _pconf, apr_pool_t * plog, server_rec * s) + + /* we've been told to restart */ + apr_signal(SIGHUP, SIG_IGN); ++ apr_signal(AP_SIG_GRACEFUL, SIG_IGN); + + if (one_process) { + /* not worth thinking about */ + +--- a/server/mpm/worker/worker.c ++++ b/server/mpm/worker/worker.c +@@ -1902,6 +1902,7 @@ static int worker_run(apr_pool_t *_pconf, apr_pool_t *plog, server_rec *s) + + /* we've been told to restart */ + apr_signal(SIGHUP, SIG_IGN); ++ apr_signal(AP_SIG_GRACEFUL, SIG_IGN); + + if (one_process) { + /* not worth thinking about */ diff --git a/SOURCES/httpd-2.4.6-pre_htaccess.patch b/SOURCES/httpd-2.4.6-pre_htaccess.patch new file mode 100644 index 0000000..ff7bd5a --- /dev/null +++ b/SOURCES/httpd-2.4.6-pre_htaccess.patch @@ -0,0 +1,140 @@ +diff --git a/include/ap_mmn.h b/include/ap_mmn.h +index 89c4140..82a0acb 100644 +--- a/include/ap_mmn.h ++++ b/include/ap_mmn.h +@@ -418,6 +418,7 @@ + * ap_proxy_pass_brigade() + * 20120211.22 (2.4.5-dev) No longer prevent usage of strtoul() + * 20120211.23 (2.4.5-dev) Add ap_proxy_clear_connection() ++ * 20120211.24 (2.4.7-dev) add open_htaccess hook. + */ + + #define MODULE_MAGIC_COOKIE 0x41503234UL /* "AP24" */ +@@ -425,7 +426,7 @@ + #ifndef MODULE_MAGIC_NUMBER_MAJOR + #define MODULE_MAGIC_NUMBER_MAJOR 20120211 + #endif +-#define MODULE_MAGIC_NUMBER_MINOR 23 /* 0...n */ ++#define MODULE_MAGIC_NUMBER_MINOR 24 /* 0...n */ + + /** + * Determine if the server's current MODULE_MAGIC_NUMBER is at least a +diff --git a/include/http_config.h b/include/http_config.h +index 7ee3760..c93c3b2 100644 +--- a/include/http_config.h ++++ b/include/http_config.h +@@ -1322,6 +1322,31 @@ AP_DECLARE_HOOK(int,quick_handler,(request_rec *r, int lookup_uri)) + AP_DECLARE_HOOK(void,optional_fn_retrieve,(void)) + + /** ++ * Allow modules to open htaccess files or perform operations before doing so ++ * @param r The current request ++ * @param dir_name The directory for which the htaccess file should be opened ++ * @param access_name The filename for which the htaccess file should be opened ++ * @param conffile Where the pointer to the opened ap_configfile_t must be ++ * stored ++ * @param full_name Where the full file name of the htaccess file must be ++ * stored. ++ * @return APR_SUCCESS on success, ++ * APR_ENOENT or APR_ENOTDIR if no htaccess file exists, ++ * AP_DECLINED to let later modules do the opening, ++ * any other error code on error. ++ */ ++AP_DECLARE_HOOK(apr_status_t,open_htaccess, ++ (request_rec *r, const char *dir_name, const char *access_name, ++ ap_configfile_t **conffile, const char **full_name)) ++ ++/** ++ * Core internal function, use ap_run_open_htaccess() instead. ++ */ ++apr_status_t ap_open_htaccess(request_rec *r, const char *dir_name, ++ const char *access_name, ap_configfile_t **conffile, ++ const char **full_name); ++ ++/** + * A generic pool cleanup that will reset a pointer to NULL. For use with + * apr_pool_cleanup_register. + * @param data The address of the pointer +diff --git a/server/config.c b/server/config.c +index c1aae17..265744e 100644 +--- a/server/config.c ++++ b/server/config.c +@@ -80,6 +80,7 @@ APR_HOOK_STRUCT( + APR_HOOK_LINK(quick_handler) + APR_HOOK_LINK(optional_fn_retrieve) + APR_HOOK_LINK(test_config) ++ APR_HOOK_LINK(open_htaccess) + ) + + AP_IMPLEMENT_HOOK_RUN_ALL(int, header_parser, +@@ -171,6 +172,12 @@ AP_IMPLEMENT_HOOK_RUN_FIRST(int, handler, (request_rec *r), + AP_IMPLEMENT_HOOK_RUN_FIRST(int, quick_handler, (request_rec *r, int lookup), + (r, lookup), DECLINED) + ++AP_IMPLEMENT_HOOK_RUN_FIRST(apr_status_t, open_htaccess, ++ (request_rec *r, const char *dir_name, const char *access_name, ++ ap_configfile_t **conffile, const char **full_name), ++ (r, dir_name, access_name, conffile, full_name), ++ AP_DECLINED) ++ + /* hooks with no args are implemented last, after disabling APR hook probes */ + #if defined(APR_HOOK_PROBES_ENABLED) + #undef APR_HOOK_PROBES_ENABLED +@@ -2073,14 +2080,23 @@ AP_DECLARE(int) ap_process_config_tree(server_rec *s, + return OK; + } + ++apr_status_t ap_open_htaccess(request_rec *r, const char *dir_name, ++ const char *access_name, ++ ap_configfile_t **conffile, ++ const char **full_name) ++{ ++ *full_name = ap_make_full_path(r->pool, dir_name, access_name); ++ return ap_pcfg_openfile(conffile, r->pool, *full_name); ++} ++ + AP_CORE_DECLARE(int) ap_parse_htaccess(ap_conf_vector_t **result, + request_rec *r, int override, + int override_opts, apr_table_t *override_list, +- const char *d, const char *access_name) ++ const char *d, const char *access_names) + { + ap_configfile_t *f = NULL; + cmd_parms parms; +- char *filename = NULL; ++ const char *filename; + const struct htaccess_result *cache; + struct htaccess_result *new; + ap_conf_vector_t *dc = NULL; +@@ -2104,15 +2120,11 @@ AP_CORE_DECLARE(int) ap_parse_htaccess(ap_conf_vector_t **result, + parms.path = apr_pstrdup(r->pool, d); + + /* loop through the access names and find the first one */ +- while (access_name[0]) { +- /* AFAICT; there is no use of the actual 'filename' against +- * any canonicalization, so we will simply take the given +- * name, ignoring case sensitivity and aliases +- */ +- filename = ap_make_full_path(r->pool, d, +- ap_getword_conf(r->pool, &access_name)); +- status = ap_pcfg_openfile(&f, r->pool, filename); ++ while (access_names[0]) { ++ const char *access_name = ap_getword_conf(r->pool, &access_names); + ++ filename = NULL; ++ status = ap_run_open_htaccess(r, d, access_name, &f, &filename); + if (status == APR_SUCCESS) { + const char *errmsg; + ap_directive_t *temptree = NULL; +diff --git a/server/core.c b/server/core.c +index f3965ca..85f876b 100644 +--- a/server/core.c ++++ b/server/core.c +@@ -4930,6 +4930,7 @@ static void register_hooks(apr_pool_t *p) + ap_hook_insert_network_bucket(core_insert_network_bucket, NULL, NULL, + APR_HOOK_REALLY_LAST); + ap_hook_dirwalk_stat(core_dirwalk_stat, NULL, NULL, APR_HOOK_REALLY_LAST); ++ ap_hook_open_htaccess(ap_open_htaccess, NULL, NULL, APR_HOOK_REALLY_LAST); + + /* register the core's insert_filter hook and register core-provided + * filters diff --git a/SOURCES/httpd-2.4.6-r1332643+.patch b/SOURCES/httpd-2.4.6-r1332643+.patch new file mode 100644 index 0000000..cfe6d7b --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1332643+.patch @@ -0,0 +1,313 @@ +# ./pullrev.sh 1332643 1345599 1487772 + +https://bugzilla.redhat.com//show_bug.cgi?id=809599 + +http://svn.apache.org/viewvc?view=revision&revision=1332643 +http://svn.apache.org/viewvc?view=revision&revision=1345599 +http://svn.apache.org/viewvc?view=revision&revision=1487772 + +--- httpd-2.4.6/modules/ssl/mod_ssl.c.r1332643+ ++++ httpd-2.4.6/modules/ssl/mod_ssl.c +@@ -413,6 +413,37 @@ int ssl_engine_disable(conn_rec *c) + return 1; + } + ++static int modssl_register_npn(conn_rec *c, ++ ssl_npn_advertise_protos advertisefn, ++ ssl_npn_proto_negotiated negotiatedfn) ++{ ++#ifdef HAVE_TLS_NPN ++ SSLConnRec *sslconn = myConnConfig(c); ++ ++ if (!sslconn) { ++ return DECLINED; ++ } ++ ++ if (!sslconn->npn_advertfns) { ++ sslconn->npn_advertfns = ++ apr_array_make(c->pool, 5, sizeof(ssl_npn_advertise_protos)); ++ sslconn->npn_negofns = ++ apr_array_make(c->pool, 5, sizeof(ssl_npn_proto_negotiated)); ++ } ++ ++ if (advertisefn) ++ APR_ARRAY_PUSH(sslconn->npn_advertfns, ssl_npn_advertise_protos) = ++ advertisefn; ++ if (negotiatedfn) ++ APR_ARRAY_PUSH(sslconn->npn_negofns, ssl_npn_proto_negotiated) = ++ negotiatedfn; ++ ++ return OK; ++#else ++ return DECLINED; ++#endif ++} ++ + int ssl_init_ssl_connection(conn_rec *c, request_rec *r) + { + SSLSrvConfigRec *sc; +@@ -584,6 +615,7 @@ static void ssl_register_hooks(apr_pool_ + + APR_REGISTER_OPTIONAL_FN(ssl_proxy_enable); + APR_REGISTER_OPTIONAL_FN(ssl_engine_disable); ++ APR_REGISTER_OPTIONAL_FN(modssl_register_npn); + + ap_register_auth_provider(p, AUTHZ_PROVIDER_GROUP, "ssl", + AUTHZ_PROVIDER_VERSION, +--- httpd-2.4.6/modules/ssl/mod_ssl.h.r1332643+ ++++ httpd-2.4.6/modules/ssl/mod_ssl.h +@@ -63,5 +63,40 @@ APR_DECLARE_OPTIONAL_FN(int, ssl_proxy_e + + APR_DECLARE_OPTIONAL_FN(int, ssl_engine_disable, (conn_rec *)); + ++/** The npn_advertise_protos callback allows another modules to add ++ * entries to the list of protocol names advertised by the server ++ * during the Next Protocol Negotiation (NPN) portion of the SSL ++ * handshake. The callback is given the connection and an APR array; ++ * it should push one or more char*'s pointing to NUL-terminated ++ * strings (such as "http/1.1" or "spdy/2") onto the array and return ++ * OK. To prevent further processing of (other modules') callbacks, ++ * return DONE. */ ++typedef int (*ssl_npn_advertise_protos)(conn_rec *connection, ++ apr_array_header_t *protos); ++ ++/** The npn_proto_negotiated callback allows other modules to discover ++ * the name of the protocol that was chosen during the Next Protocol ++ * Negotiation (NPN) portion of the SSL handshake. Note that this may ++ * be the empty string (in which case modules should probably assume ++ * HTTP), or it may be a protocol that was never even advertised by ++ * the server. The callback is given the connection, a ++ * non-NUL-terminated string containing the protocol name, and the ++ * length of the string; it should do something appropriate ++ * (i.e. insert or remove filters) and return OK. To prevent further ++ * processing of (other modules') callbacks, return DONE. */ ++typedef int (*ssl_npn_proto_negotiated)(conn_rec *connection, ++ const char *proto_name, ++ apr_size_t proto_name_len); ++ ++/* An optional function which can be used to register a pair of ++ * callbacks for NPN handling. This optional function should be ++ * invoked from a pre_connection hook which runs *after* mod_ssl.c's ++ * pre_connection hook. The function returns OK if the callbacks are ++ * register, or DECLINED otherwise (for example if mod_ssl does not ++l * support NPN). */ ++APR_DECLARE_OPTIONAL_FN(int, modssl_register_npn, (conn_rec *conn, ++ ssl_npn_advertise_protos advertisefn, ++ ssl_npn_proto_negotiated negotiatedfn)); ++ + #endif /* __MOD_SSL_H__ */ + /** @} */ +--- httpd-2.4.6/modules/ssl/ssl_engine_init.c.r1332643+ ++++ httpd-2.4.6/modules/ssl/ssl_engine_init.c +@@ -725,6 +725,11 @@ static void ssl_init_ctx_callbacks(serve + #endif + + SSL_CTX_set_info_callback(ctx, ssl_callback_Info); ++ ++#ifdef HAVE_TLS_NPN ++ SSL_CTX_set_next_protos_advertised_cb( ++ ctx, ssl_callback_AdvertiseNextProtos, NULL); ++#endif + } + + static void ssl_init_ctx_verify(server_rec *s, +--- httpd-2.4.6/modules/ssl/ssl_engine_io.c.r1332643+ ++++ httpd-2.4.6/modules/ssl/ssl_engine_io.c +@@ -297,6 +297,7 @@ typedef struct { + apr_pool_t *pool; + char buffer[AP_IOBUFSIZE]; + ssl_filter_ctx_t *filter_ctx; ++ int npn_finished; /* 1 if NPN has finished, 0 otherwise */ + } bio_filter_in_ctx_t; + + /* +@@ -1400,6 +1401,37 @@ static apr_status_t ssl_io_filter_input( + APR_BRIGADE_INSERT_TAIL(bb, bucket); + } + ++#ifdef HAVE_TLS_NPN ++ /* By this point, Next Protocol Negotiation (NPN) should be completed (if ++ * our version of OpenSSL supports it). If we haven't already, find out ++ * which protocol was decided upon and inform other modules by calling ++ * npn_proto_negotiated_hook. */ ++ if (!inctx->npn_finished) { ++ SSLConnRec *sslconn = myConnConfig(f->c); ++ const unsigned char *next_proto = NULL; ++ unsigned next_proto_len = 0; ++ int n; ++ ++ if (sslconn->npn_negofns) { ++ SSL_get0_next_proto_negotiated( ++ inctx->ssl, &next_proto, &next_proto_len); ++ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, f->c, ++ APLOGNO(02306) "SSL NPN negotiated protocol: '%*s'", ++ next_proto_len, (const char*)next_proto); ++ ++ for (n = 0; n < sslconn->npn_negofns->nelts; n++) { ++ ssl_npn_proto_negotiated fn = ++ APR_ARRAY_IDX(sslconn->npn_negofns, n, ssl_npn_proto_negotiated); ++ ++ if (fn(f->c, (const char *)next_proto, next_proto_len) == DONE) ++ break; ++ } ++ } ++ ++ inctx->npn_finished = 1; ++ } ++#endif ++ + return APR_SUCCESS; + } + +@@ -1881,6 +1913,7 @@ static void ssl_io_input_add_filter(ssl_ + inctx->block = APR_BLOCK_READ; + inctx->pool = c->pool; + inctx->filter_ctx = filter_ctx; ++ inctx->npn_finished = 0; + } + + /* The request_rec pointer is passed in here only to ensure that the +--- httpd-2.4.6/modules/ssl/ssl_engine_kernel.c.r1332643+ ++++ httpd-2.4.6/modules/ssl/ssl_engine_kernel.c +@@ -2161,6 +2161,97 @@ int ssl_callback_SessionTicket(SSL *ssl, + } + #endif /* HAVE_TLS_SESSION_TICKETS */ + ++#ifdef HAVE_TLS_NPN ++/* ++ * This callback function is executed when SSL needs to decide what protocols ++ * to advertise during Next Protocol Negotiation (NPN). It must produce a ++ * string in wire format -- a sequence of length-prefixed strings -- indicating ++ * the advertised protocols. Refer to SSL_CTX_set_next_protos_advertised_cb ++ * in OpenSSL for reference. ++ */ ++int ssl_callback_AdvertiseNextProtos(SSL *ssl, const unsigned char **data_out, ++ unsigned int *size_out, void *arg) ++{ ++ conn_rec *c = (conn_rec*)SSL_get_app_data(ssl); ++ SSLConnRec *sslconn = myConnConfig(c); ++ apr_array_header_t *protos; ++ int num_protos; ++ unsigned int size; ++ int i; ++ unsigned char *data; ++ unsigned char *start; ++ ++ *data_out = NULL; ++ *size_out = 0; ++ ++ /* If the connection object is not available, or there are no NPN ++ * hooks registered, then there's nothing for us to do. */ ++ if (c == NULL || sslconn->npn_advertfns == NULL) { ++ return SSL_TLSEXT_ERR_NOACK; ++ } ++ ++ /* Invoke our npn_advertise_protos hook, giving other modules a chance to ++ * add alternate protocol names to advertise. */ ++ protos = apr_array_make(c->pool, 0, sizeof(char *)); ++ for (i = 0; i < sslconn->npn_advertfns->nelts; i++) { ++ ssl_npn_advertise_protos fn = ++ APR_ARRAY_IDX(sslconn->npn_advertfns, i, ssl_npn_advertise_protos); ++ ++ if (fn(c, protos) == DONE) ++ break; ++ } ++ num_protos = protos->nelts; ++ ++ /* We now have a list of null-terminated strings; we need to concatenate ++ * them together into a single string, where each protocol name is prefixed ++ * by its length. First, calculate how long that string will be. */ ++ size = 0; ++ for (i = 0; i < num_protos; ++i) { ++ const char *string = APR_ARRAY_IDX(protos, i, const char*); ++ unsigned int length = strlen(string); ++ /* If the protocol name is too long (the length must fit in one byte), ++ * then log an error and skip it. */ ++ if (length > 255) { ++ ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c, APLOGNO(02307) ++ "SSL NPN protocol name too long (length=%u): %s", ++ length, string); ++ continue; ++ } ++ /* Leave room for the length prefix (one byte) plus the protocol name ++ * itself. */ ++ size += 1 + length; ++ } ++ ++ /* If there is nothing to advertise (either because no modules added ++ * anything to the protos array, or because all strings added to the array ++ * were skipped), then we're done. */ ++ if (size == 0) { ++ return SSL_TLSEXT_ERR_NOACK; ++ } ++ ++ /* Now we can build the string. Copy each protocol name string into the ++ * larger string, prefixed by its length. */ ++ data = apr_palloc(c->pool, size * sizeof(unsigned char)); ++ start = data; ++ for (i = 0; i < num_protos; ++i) { ++ const char *string = APR_ARRAY_IDX(protos, i, const char*); ++ apr_size_t length = strlen(string); ++ if (length > 255) ++ continue; ++ *start = (unsigned char)length; ++ ++start; ++ memcpy(start, string, length * sizeof(unsigned char)); ++ start += length; ++ } ++ ++ /* Success. */ ++ *data_out = data; ++ *size_out = size; ++ return SSL_TLSEXT_ERR_OK; ++} ++ ++#endif /* HAVE_TLS_NPN */ ++ + #ifndef OPENSSL_NO_SRP + + int ssl_callback_SRPServerParams(SSL *ssl, int *ad, void *arg) +--- httpd-2.4.6/modules/ssl/ssl_private.h.r1332643+ ++++ httpd-2.4.6/modules/ssl/ssl_private.h +@@ -98,6 +98,8 @@ + #include + #include + ++#include "mod_ssl.h" ++ + /* Avoid tripping over an engine build installed globally and detected + * when the user points at an explicit non-engine flavor of OpenSSL + */ +@@ -139,6 +141,11 @@ + #define HAVE_FIPS + #endif + ++#if OPENSSL_VERSION_NUMBER >= 0x10001000L && !defined(OPENSSL_NO_NEXTPROTONEG) \ ++ && !defined(OPENSSL_NO_TLSEXT) ++#define HAVE_TLS_NPN ++#endif ++ + #if (OPENSSL_VERSION_NUMBER >= 0x10000000) + #define MODSSL_SSL_CIPHER_CONST const + #define MODSSL_SSL_METHOD_CONST const +@@ -487,6 +494,12 @@ typedef struct { + * connection */ + } reneg_state; + ++#ifdef HAVE_TLS_NPN ++ /* Poor man's inter-module optional hooks for NPN. */ ++ apr_array_header_t *npn_advertfns; /* list of ssl_npn_advertise_protos callbacks */ ++ apr_array_header_t *npn_negofns; /* list of ssl_npn_proto_negotiated callbacks. */ ++#endif ++ + server_rec *server; + } SSLConnRec; + +@@ -842,6 +855,7 @@ int ssl_callback_ServerNameIndi + int ssl_callback_SessionTicket(SSL *, unsigned char *, unsigned char *, + EVP_CIPHER_CTX *, HMAC_CTX *, int); + #endif ++int ssl_callback_AdvertiseNextProtos(SSL *ssl, const unsigned char **data, unsigned int *len, void *arg); + + /** Session Cache Support */ + void ssl_scache_init(server_rec *, apr_pool_t *); diff --git a/SOURCES/httpd-2.4.6-r1348019.patch b/SOURCES/httpd-2.4.6-r1348019.patch new file mode 100644 index 0000000..b8cca1c --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1348019.patch @@ -0,0 +1,77 @@ +diff --git a/modules/proxy/proxy_util.c b/modules/proxy/proxy_util.c +index 2121892..6f904b2 100644 +--- a/modules/proxy/proxy_util.c ++++ b/modules/proxy/proxy_util.c +@@ -2838,33 +2838,48 @@ PROXY_DECLARE(int) ap_proxy_connect_backend(const char *proxy_function, + + connected = 1; + } +- /* +- * Put the entire worker to error state if +- * the PROXY_WORKER_IGNORE_ERRORS flag is not set. +- * Altrough some connections may be alive +- * no further connections to the worker could be made +- */ +- if (!connected && PROXY_WORKER_IS_USABLE(worker) && +- !(worker->s->status & PROXY_WORKER_IGNORE_ERRORS)) { +- worker->s->error_time = apr_time_now(); +- worker->s->status |= PROXY_WORKER_IN_ERROR; +- ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(00959) +- "ap_proxy_connect_backend disabling worker for (%s) for %" +- APR_TIME_T_FMT "s", +- worker->s->hostname, apr_time_sec(worker->s->retry)); ++ ++ if (PROXY_WORKER_IS_USABLE(worker)) { ++ /* ++ * Put the entire worker to error state if ++ * the PROXY_WORKER_IGNORE_ERRORS flag is not set. ++ * Although some connections may be alive ++ * no further connections to the worker could be made ++ */ ++ if (!connected) { ++ if (!(worker->s->status & PROXY_WORKER_IGNORE_ERRORS)) { ++ worker->s->error_time = apr_time_now(); ++ worker->s->status |= PROXY_WORKER_IN_ERROR; ++ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(00959) ++ "ap_proxy_connect_backend disabling worker for (%s) for %" ++ APR_TIME_T_FMT "s", ++ worker->s->hostname, apr_time_sec(worker->s->retry)); ++ } ++ } ++ else { ++ if (worker->s->retries) { ++ /* ++ * A worker came back. So here is where we need to ++ * either reset all params to initial conditions or ++ * apply some sort of aging ++ */ ++ } ++ worker->s->error_time = 0; ++ worker->s->retries = 0; ++ } ++ return connected ? OK : DECLINED; + } + else { +- if (worker->s->retries) { +- /* +- * A worker came back. So here is where we need to +- * either reset all params to initial conditions or +- * apply some sort of aging +- */ +- } +- worker->s->error_time = 0; +- worker->s->retries = 0; ++ /* ++ * The worker is in error likely done by a different thread / process ++ * e.g. for a timeout or bad status. We should respect this and should ++ * not continue with a connection via this worker even if we got one. ++ */ ++ if (connected) { ++ socket_cleanup(conn); ++ } ++ return DECLINED; + } +- return connected ? OK : DECLINED; + } + + PROXY_DECLARE(int) ap_proxy_connection_create(const char *proxy_function, diff --git a/SOURCES/httpd-2.4.6-r1420184.patch b/SOURCES/httpd-2.4.6-r1420184.patch new file mode 100644 index 0000000..7de3031 --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1420184.patch @@ -0,0 +1,154 @@ +Index: modules/aaa/mod_authz_owner.c +=================================================================== +--- a/modules/aaa/mod_authz_owner.c (revision 1420183) ++++ b/modules/aaa/mod_authz_owner.c (revision 1420184) +@@ -28,9 +28,8 @@ + #include "http_request.h" + + #include "mod_auth.h" ++#include "mod_authz_owner.h" + +-APR_DECLARE_OPTIONAL_FN(char*, authz_owner_get_file_group, (request_rec *r)); +- + static const command_rec authz_owner_cmds[] = + { + {NULL} +Index: modules/aaa/mod_authz_owner.h +=================================================================== +--- a/modules/aaa/mod_authz_owner.h (revision 0) ++++ b/modules/aaa/mod_authz_owner.h (revision 1420184) +@@ -0,0 +1,27 @@ ++/* Licensed to the Apache Software Foundation (ASF) under one or more ++ * contributor license agreements. See the NOTICE file distributed with ++ * this work for additional information regarding copyright ownership. ++ * The ASF licenses this file to You under the Apache License, Version 2.0 ++ * (the "License"); you may not use this file except in compliance with ++ * the License. You may obtain a copy of the License at ++ * ++ * http://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ */ ++ ++#ifndef MOD_AUTHZ_OWNER_H ++#define MOD_AUTHZ_OWNER_H ++ ++#include "http_request.h" ++ ++/* mod_authz_owner exports an optional function which retrieves the ++ * group name of the file identified by r->filename, if available, or ++ * else returns NULL. */ ++APR_DECLARE_OPTIONAL_FN(char*, authz_owner_get_file_group, (request_rec *r)); ++ ++#endif /* MOD_AUTHZ_OWNER_H */ +Index: modules/aaa/mod_authz_groupfile.c +=================================================================== +--- a/modules/aaa/mod_authz_groupfile.c (revision 1420183) ++++ b/modules/aaa/mod_authz_groupfile.c (revision 1420184) +@@ -55,13 +55,12 @@ + #include "util_varbuf.h" + + #include "mod_auth.h" ++#include "mod_authz_owner.h" + + typedef struct { + char *groupfile; + } authz_groupfile_config_rec; + +-APR_DECLARE_OPTIONAL_FN(char*, authz_owner_get_file_group, (request_rec *r)); +- + static void *create_authz_groupfile_dir_config(apr_pool_t *p, char *d) + { + authz_groupfile_config_rec *conf = apr_palloc(p, sizeof(*conf)); +@@ -200,7 +199,7 @@ + return AUTHZ_DENIED; + } + +-APR_OPTIONAL_FN_TYPE(authz_owner_get_file_group) *authz_owner_get_file_group; ++static APR_OPTIONAL_FN_TYPE(authz_owner_get_file_group) *authz_owner_get_file_group; + + static authz_status filegroup_check_authorization(request_rec *r, + const char *require_args, +@@ -279,10 +278,14 @@ + NULL, + }; + +-static void register_hooks(apr_pool_t *p) ++ ++static void authz_groupfile_getfns(void) + { + authz_owner_get_file_group = APR_RETRIEVE_OPTIONAL_FN(authz_owner_get_file_group); ++} + ++static void register_hooks(apr_pool_t *p) ++{ + ap_register_auth_provider(p, AUTHZ_PROVIDER_GROUP, "group", + AUTHZ_PROVIDER_VERSION, + &authz_group_provider, +@@ -291,6 +294,7 @@ + AUTHZ_PROVIDER_VERSION, + &authz_filegroup_provider, + AP_AUTH_INTERNAL_PER_CONF); ++ ap_hook_optional_fn_retrieve(authz_groupfile_getfns, NULL, NULL, APR_HOOK_MIDDLE); + } + + AP_DECLARE_MODULE(authz_groupfile) = +Index: modules/aaa/mod_authz_dbm.c +=================================================================== +--- a/modules/aaa/mod_authz_dbm.c (revision 1420183) ++++ b/modules/aaa/mod_authz_dbm.c (revision 1420184) +@@ -29,6 +29,7 @@ + #include "http_request.h" /* for ap_hook_(check_user_id | auth_checker)*/ + + #include "mod_auth.h" ++#include "mod_authz_owner.h" + + typedef struct { + const char *grpfile; +@@ -35,9 +36,7 @@ + const char *dbmtype; + } authz_dbm_config_rec; + +-APR_DECLARE_OPTIONAL_FN(char*, authz_owner_get_file_group, (request_rec *r)); + +- + /* This should go into APR; perhaps with some nice + * caching/locking/flocking of the open dbm file. + */ +@@ -199,7 +198,7 @@ + return AUTHZ_DENIED; + } + +-APR_OPTIONAL_FN_TYPE(authz_owner_get_file_group) *authz_owner_get_file_group; ++static APR_OPTIONAL_FN_TYPE(authz_owner_get_file_group) *authz_owner_get_file_group; + + static authz_status dbmfilegroup_check_authorization(request_rec *r, + const char *require_args, +@@ -279,11 +278,13 @@ + NULL, + }; + ++static void authz_dbm_getfns(void) ++{ ++ authz_owner_get_file_group = APR_RETRIEVE_OPTIONAL_FN(authz_owner_get_file_group); ++} + + static void register_hooks(apr_pool_t *p) + { +- authz_owner_get_file_group = APR_RETRIEVE_OPTIONAL_FN(authz_owner_get_file_group); +- + ap_register_auth_provider(p, AUTHZ_PROVIDER_GROUP, "dbm-group", + AUTHZ_PROVIDER_VERSION, + &authz_dbmgroup_provider, +@@ -292,6 +293,7 @@ + AUTHZ_PROVIDER_VERSION, + &authz_dbmfilegroup_provider, + AP_AUTH_INTERNAL_PER_CONF); ++ ap_hook_optional_fn_retrieve(authz_dbm_getfns, NULL, NULL, APR_HOOK_MIDDLE); + } + + AP_DECLARE_MODULE(authz_dbm) = diff --git a/SOURCES/httpd-2.4.6-r1506474.patch b/SOURCES/httpd-2.4.6-r1506474.patch new file mode 100644 index 0000000..182bc04 --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1506474.patch @@ -0,0 +1,13 @@ +--- a/server/core.c 2013/07/24 09:49:38 1506473 ++++ b/server/core.c 2013/07/24 09:51:14 1506474 +@@ -1481,7 +1481,9 @@ + conf->ap_document_root = arg; + } + else { +- return "DocumentRoot must be a directory"; ++ return apr_psprintf(cmd->pool, ++ "DocumentRoot '%s' is not a directory, or is not readable", ++ arg); + } + } + return NULL; diff --git a/SOURCES/httpd-2.4.6-r1507681+.patch b/SOURCES/httpd-2.4.6-r1507681+.patch new file mode 100644 index 0000000..82a3b41 --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1507681+.patch @@ -0,0 +1,62 @@ +# ./pullrev.sh 1507681 1533447 + +https://bugzilla.redhat.com/show_bug.cgi?id=1004046 + +http://svn.apache.org/viewvc?view=revision&revision=1507681 +http://svn.apache.org/viewvc?view=revision&revision=1533447 + +--- httpd-2.4.6/modules/dav/main/mod_dav.c ++++ httpd-2.4.6/modules/dav/main/mod_dav.c +@@ -2756,7 +2756,7 @@ + * The multistatus responses will contain the information about any + * resource that fails the validation. + * +- * We check the parent resource, too, since this is a MOVE. Moving the ++ * We check the parent resource, too, if this is a MOVE. Moving the + * resource effectively removes it from the parent collection, so we + * must ensure that we have met the appropriate conditions. + * +@@ -2765,7 +2765,9 @@ + */ + if ((err = dav_validate_request(r, resource, depth, NULL, + &multi_response, +- DAV_VALIDATE_PARENT ++ (is_move ? DAV_VALIDATE_PARENT ++ : DAV_VALIDATE_RESOURCE ++ | DAV_VALIDATE_NO_MODIFY) + | DAV_VALIDATE_USE_424, + NULL)) != NULL) { + err = dav_push_error(r->pool, err->status, 0, +--- httpd-2.4.6/modules/dav/main/util.c ++++ httpd-2.4.6/modules/dav/main/util.c +@@ -954,13 +954,16 @@ + /* + ** For methods other than LOCK: + ** +- ** If we have no locks, then can be set to true -- ++ ** If we have no locks or if the resource is not being modified ++ ** (per RFC 4918 the lock token is not required on resources ++ ** we are not changing), then can be set to true -- + ** pretending that we've already met the requirement of seeing one + ** of the resource's locks in the If: header. + ** + ** Otherwise, it must be cleared and we'll look for one. + */ +- seen_locktoken = (lock_list == NULL); ++ seen_locktoken = (lock_list == NULL ++ || flags & DAV_VALIDATE_NO_MODIFY); + } + + /* +--- httpd-2.4.6/modules/dav/main/mod_dav.h ++++ httpd-2.4.6/modules/dav/main/mod_dav.h +@@ -1297,6 +1297,9 @@ + the 424 DAV:response */ + #define DAV_VALIDATE_USE_424 0x0080 /* return 424 status, not 207 */ + #define DAV_VALIDATE_IS_PARENT 0x0100 /* for internal use */ ++#define DAV_VALIDATE_NO_MODIFY 0x0200 /* resource is not being modified ++ so allow even if lock token ++ is not provided */ + + /* Lock-null related public lock functions */ + DAV_DECLARE(int) dav_get_resource_state(request_rec *r, diff --git a/SOURCES/httpd-2.4.6-r1515372.patch b/SOURCES/httpd-2.4.6-r1515372.patch new file mode 100644 index 0000000..dc782ac --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1515372.patch @@ -0,0 +1,11 @@ +--- a/modules/ldap/util_ldap_cache.c 2013/08/19 11:41:29 1515371 ++++ b/modules/ldap/util_ldap_cache.c 2013/08/19 11:45:19 1515372 +@@ -52,7 +52,7 @@ + + if (node) { + if (!(node->url = util_ald_strdup(cache, n->url))) { +- util_ald_free(cache, node->url); ++ util_ald_free(cache, node); + return NULL; + } + node->search_cache = n->search_cache; diff --git a/SOURCES/httpd-2.4.6-r1524368.patch b/SOURCES/httpd-2.4.6-r1524368.patch new file mode 100644 index 0000000..235d977 --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1524368.patch @@ -0,0 +1,20 @@ +--- a/modules/proxy/mod_proxy_fcgi.c 2013/09/18 11:17:28 1524367 ++++ b/modules/proxy/mod_proxy_fcgi.c 2013/09/18 11:18:02 1524368 +@@ -429,15 +429,13 @@ + ob = apr_brigade_create(r->pool, c->bucket_alloc); + + while (! done) { +- apr_interval_time_t timeout = conn->worker->s->timeout; ++ apr_interval_time_t timeout; + apr_size_t len; + int n; + + /* We need SOME kind of timeout here, or virtually anything will + * cause timeout errors. */ +- if (! conn->worker->s->timeout_set) { +- timeout = apr_time_from_sec(30); +- } ++ apr_socket_timeout_get(conn->sock, &timeout); + + rv = apr_poll(&pfd, 1, &n, timeout); + if (rv != APR_SUCCESS) { diff --git a/SOURCES/httpd-2.4.6-r1526189.patch b/SOURCES/httpd-2.4.6-r1526189.patch new file mode 100644 index 0000000..f1a7333 --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1526189.patch @@ -0,0 +1,62 @@ +diff --git a/modules/proxy/mod_proxy.h b/modules/proxy/mod_proxy.h +index 81fd14c..cd1710f 100644 +--- a/modules/proxy/mod_proxy.h ++++ b/modules/proxy/mod_proxy.h +@@ -856,6 +856,17 @@ PROXY_DECLARE(int) ap_proxy_connect_backend(const char *proxy_function, + PROXY_DECLARE(int) ap_proxy_connection_create(const char *proxy_function, + proxy_conn_rec *conn, + conn_rec *c, server_rec *s); ++ ++/** ++ * Determine if proxy connection can potentially be reused at the ++ * end of this request. ++ * @param conn proxy connection ++ * @return non-zero if reusable, 0 otherwise ++ * @note Even if this function returns non-zero, the connection may ++ * be subsequently marked for closure. ++ */ ++PROXY_DECLARE(int) ap_proxy_connection_reusable(proxy_conn_rec *conn); ++ + /** + * Signal the upstream chain that the connection to the backend broke in the + * middle of the response. This is done by sending an error bucket with +diff --git a/modules/proxy/mod_proxy_fcgi.c b/modules/proxy/mod_proxy_fcgi.c +index 0f84416..c57696a 100644 +--- a/modules/proxy/mod_proxy_fcgi.c ++++ b/modules/proxy/mod_proxy_fcgi.c +@@ -247,7 +247,7 @@ static apr_status_t send_begin_request(proxy_conn_rec *conn, int request_id) + + brb.roleB1 = ((FCGI_RESPONDER >> 8) & 0xff); + brb.roleB0 = ((FCGI_RESPONDER) & 0xff); +- brb.flags = FCGI_KEEP_CONN; ++ brb.flags = ap_proxy_connection_reusable(conn) ? FCGI_KEEP_CONN : 0; + brb.reserved[0] = 0; + brb.reserved[1] = 0; + brb.reserved[2] = 0; +diff --git a/modules/proxy/proxy_util.c b/modules/proxy/proxy_util.c +index 8bc9fab..ca70ae4 100644 +--- a/modules/proxy/proxy_util.c ++++ b/modules/proxy/proxy_util.c +@@ -1333,6 +1333,13 @@ static void init_conn_pool(apr_pool_t *p, proxy_worker *worker) + worker->cp = cp; + } + ++PROXY_DECLARE(int) ap_proxy_connection_reusable(proxy_conn_rec *conn) ++{ ++ proxy_worker *worker = conn->worker; ++ ++ return ! (conn->close || !worker->s->is_address_reusable || worker->s->disablereuse); ++} ++ + static apr_status_t connection_cleanup(void *theconn) + { + proxy_conn_rec *conn = (proxy_conn_rec *)theconn; +@@ -1361,7 +1368,7 @@ static apr_status_t connection_cleanup(void *theconn) + } + + /* determine if the connection need to be closed */ +- if (conn->close || !worker->s->is_address_reusable || worker->s->disablereuse) { ++ if (!ap_proxy_connection_reusable(conn)) { + apr_pool_t *p = conn->pool; + apr_pool_clear(p); + conn = apr_pcalloc(p, sizeof(proxy_conn_rec)); diff --git a/SOURCES/httpd-2.4.6-r1527509.patch b/SOURCES/httpd-2.4.6-r1527509.patch new file mode 100644 index 0000000..103a460 --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1527509.patch @@ -0,0 +1,21 @@ +diff -Npru httpd-2.4.6.orig/modules/dav/main/mod_dav.c httpd-2.4.6/modules/dav/main/mod_dav.c +--- httpd-2.4.6.orig/modules/dav/main/mod_dav.c 2015-06-24 12:24:47.920000000 -0400 ++++ httpd-2.4.6/modules/dav/main/mod_dav.c 2015-06-24 12:27:19.706000000 -0400 +@@ -316,6 +316,8 @@ static int dav_error_response(request_re + { + r->status = status; + ++ r->status_line = ap_get_status_line(status); ++ + ap_set_content_type(r, "text/html; charset=ISO-8859-1"); + + /* begin the response now... */ +@@ -347,6 +349,8 @@ static int dav_error_response_tag(reques + { + r->status = err->status; + ++ r->status_line = ap_get_status_line(err->status); ++ + ap_set_content_type(r, DAV_XML_CONTENT_TYPE); + + ap_rputs(DAV_XML_HEADER DEBUG_CR diff --git a/SOURCES/httpd-2.4.6-r1528556.patch b/SOURCES/httpd-2.4.6-r1528556.patch new file mode 100644 index 0000000..bab51aa --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1528556.patch @@ -0,0 +1,23 @@ +Index: modules/mappers/mod_rewrite.c +=================================================================== +--- a/modules/mappers/mod_rewrite.c (revision 1499025) ++++ b/modules/mappers/mod_rewrite.c (revision 1528556) +@@ -589,6 +589,18 @@ + return 7; + } + break; ++ ++ case 'w': ++ case 'W': ++ if (!strncasecmp(uri, "s://", 4)) { /* ws:// */ ++ *sqs = 1; ++ return 5; ++ } ++ else if (!strncasecmp(uri, "ss://", 5)) { /* wss:// */ ++ *sqs = 1; ++ return 6; ++ } ++ break; + } + + return 0; diff --git a/SOURCES/httpd-2.4.6-r1528958.patch b/SOURCES/httpd-2.4.6-r1528958.patch new file mode 100644 index 0000000..82bfd6e --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1528958.patch @@ -0,0 +1,46 @@ +--- a/server/vhost.c 2013/07/15 11:50:50 1503188 ++++ b/server/vhost.c 2013/10/03 18:31:22 1528958 +@@ -577,14 +577,22 @@ + */ + + for (s = main_s->next; s; s = s->next) { ++ server_addr_rec *sar_prev = NULL; + has_default_vhost_addr = 0; + for (sar = s->addrs; sar; sar = sar->next) { + ipaddr_chain *ic; + char inaddr_any[16] = {0}; /* big enough to handle IPv4 or IPv6 */ +- ++ /* XXX: this treats 0.0.0.0 as a "default" server which matches no-exact-match for IPv6 */ + if (!memcmp(sar->host_addr->ipaddr_ptr, inaddr_any, sar->host_addr->ipaddr_len)) { + ic = find_default_server(sar->host_port); +- if (!ic || sar->host_port != ic->sar->host_port) { ++ ++ if (ic && sar->host_port == ic->sar->host_port) { /* we're a match for an existing "default server" */ ++ if (!sar_prev || memcmp(sar_prev->host_addr->ipaddr_ptr, inaddr_any, sar_prev->host_addr->ipaddr_len) ++ || sar_prev->host_port != sar->host_port) { ++ add_name_vhost_config(p, main_s, s, sar, ic); ++ } ++ } ++ else { + /* No default server, or we found a default server but + ** exactly one of us is a wildcard port, which means we want + ** two ip-based vhosts not an NVH with two names +@@ -592,6 +600,7 @@ + ic = new_ipaddr_chain(p, s, sar); + ic->next = default_list; + default_list = ic; ++ add_name_vhost_config(p, main_s, s, sar, ic); + } + has_default_vhost_addr = 1; + } +@@ -609,8 +618,9 @@ + ic->next = *iphash_table_tail[bucket]; + *iphash_table_tail[bucket] = ic; + } ++ add_name_vhost_config(p, main_s, s, sar, ic); + } +- add_name_vhost_config(p, main_s, s, sar, ic); ++ sar_prev = sar; + } + + /* Ok now we want to set up a server_hostname if the user was diff --git a/SOURCES/httpd-2.4.6-r1530280.patch b/SOURCES/httpd-2.4.6-r1530280.patch new file mode 100644 index 0000000..ea748e1 --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1530280.patch @@ -0,0 +1,59 @@ +--- a/modules/http/http_filters.c 2013/10/08 14:17:33 1530279 ++++ b/modules/http/http_filters.c 2013/10/08 14:18:44 1530280 +@@ -825,7 +825,7 @@ + * handler. + * Zap r->status_line if bad. + */ +-static void validate_status_line(request_rec *r) ++static apr_status_t validate_status_line(request_rec *r) + { + char *end; + +@@ -836,15 +836,19 @@ + || (end - 3) != r->status_line + || (len >= 4 && ! apr_isspace(r->status_line[3]))) { + r->status_line = NULL; ++ return APR_EGENERAL; + } + /* Since we passed the above check, we know that length three + * is equivalent to only a 3 digit numeric http status. + * RFC2616 mandates a trailing space, let's add it. + */ +- else if (len == 3) { ++ if (len == 3) { + r->status_line = apr_pstrcat(r->pool, r->status_line, " ", NULL); ++ return APR_EGENERAL; + } ++ return APR_SUCCESS; + } ++ return APR_EGENERAL; + } + + /* +@@ -856,15 +860,25 @@ + static void basic_http_header_check(request_rec *r, + const char **protocol) + { ++ apr_status_t rv; ++ + if (r->assbackwards) { + /* no such thing as a response protocol */ + return; + } + +- validate_status_line(r); ++ rv = validate_status_line(r); + + if (!r->status_line) { + r->status_line = ap_get_status_line(r->status); ++ } else if (rv != APR_SUCCESS) { ++ /* Status line is OK but our own reason phrase ++ * would be preferred if defined ++ */ ++ const char *tmp = ap_get_status_line(r->status); ++ if (!strncmp(tmp, r->status_line, 3)) { ++ r->status_line = tmp; ++ } + } + + /* Note that we must downgrade before checking for force responses. */ diff --git a/SOURCES/httpd-2.4.6-r1530999.patch b/SOURCES/httpd-2.4.6-r1530999.patch new file mode 100644 index 0000000..2c13959 --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1530999.patch @@ -0,0 +1,19 @@ +# ./pullrev.sh 1530999 + +http://svn.apache.org/viewvc?view=revision&revision=1530999 + +https://bugzilla.redhat.com/show_bug.cgi?id=1557785 + +--- httpd-2.4.6/server/core_filters.c ++++ httpd-2.4.6/server/core_filters.c +@@ -745,7 +745,9 @@ + pollset.reqevents = APR_POLLOUT; + pollset.desc.s = s; + apr_socket_timeout_get(s, &timeout); +- rv = apr_poll(&pollset, 1, &nsds, timeout); ++ do { ++ rv = apr_poll(&pollset, 1, &nsds, timeout); ++ } while (APR_STATUS_IS_EINTR(rv)); + if (rv != APR_SUCCESS) { + break; + } diff --git a/SOURCES/httpd-2.4.6-r1533448.patch b/SOURCES/httpd-2.4.6-r1533448.patch new file mode 100644 index 0000000..3b90cb1 --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1533448.patch @@ -0,0 +1,155 @@ +Index: modules/dav/fs/repos.c +=================================================================== +--- a/modules/dav/fs/repos.c (revision 1533447) ++++ b/modules/dav/fs/repos.c (revision 1533448) +@@ -717,13 +717,13 @@ + resource->pool = r->pool; + + /* make sure the URI does not have a trailing "/" */ +- len = strlen(r->uri); +- if (len > 1 && r->uri[len - 1] == '/') { +- s = apr_pstrmemdup(r->pool, r->uri, len-1); ++ len = strlen(r->unparsed_uri); ++ if (len > 1 && r->unparsed_uri[len - 1] == '/') { ++ s = apr_pstrmemdup(r->pool, r->unparsed_uri, len-1); + resource->uri = s; + } + else { +- resource->uri = r->uri; ++ resource->uri = r->unparsed_uri; + } + + if (r->finfo.filetype != APR_NOFILE) { +@@ -1482,6 +1482,18 @@ + return dav_fs_deleteset(info->pool, resource); + } + ++/* Take an unescaped path component and escape it and append it onto a ++ * dav_buffer for a URI */ ++static apr_size_t dav_fs_append_uri(apr_pool_t *p, dav_buffer *pbuf, ++ const char *path, apr_size_t pad) ++{ ++ const char *epath = ap_escape_uri(p, path); ++ apr_size_t epath_len = strlen(epath); ++ ++ dav_buffer_place_mem(p, pbuf, epath, epath_len + 1, pad); ++ return epath_len; ++} ++ + /* ### move this to dav_util? */ + /* Walk recursively down through directories, * + * including lock-null resources as we go. */ +@@ -1537,6 +1549,7 @@ + } + while ((apr_dir_read(&dirent, APR_FINFO_DIRENT, dirp)) == APR_SUCCESS) { + apr_size_t len; ++ apr_size_t escaped_len; + + len = strlen(dirent.name); + +@@ -1579,7 +1592,7 @@ + + /* copy the file to the URI, too. NOTE: we will pad an extra byte + for the trailing slash later. */ +- dav_buffer_place_mem(pool, &fsctx->uri_buf, dirent.name, len + 1, 1); ++ escaped_len = dav_fs_append_uri(pool, &fsctx->uri_buf, dirent.name, 1); + + /* if there is a secondary path, then do that, too */ + if (fsctx->path2.buf != NULL) { +@@ -1612,7 +1625,7 @@ + fsctx->path2.cur_len += len; + + /* adjust URI length to incorporate subdir and a slash */ +- fsctx->uri_buf.cur_len += len + 1; ++ fsctx->uri_buf.cur_len += escaped_len + 1; + fsctx->uri_buf.buf[fsctx->uri_buf.cur_len - 1] = '/'; + fsctx->uri_buf.buf[fsctx->uri_buf.cur_len] = '\0'; + +@@ -1678,8 +1691,8 @@ + */ + dav_buffer_place_mem(pool, &fsctx->path1, + fsctx->locknull_buf.buf + offset, len + 1, 0); +- dav_buffer_place_mem(pool, &fsctx->uri_buf, +- fsctx->locknull_buf.buf + offset, len + 1, 0); ++ dav_fs_append_uri(pool, &fsctx->uri_buf, ++ fsctx->locknull_buf.buf + offset, 0); + if (fsctx->path2.buf != NULL) { + dav_buffer_place_mem(pool, &fsctx->path2, + fsctx->locknull_buf.buf + offset, +Index: modules/dav/main/mod_dav.c +=================================================================== +--- a/modules/dav/main/mod_dav.c (revision 1533447) ++++ b/modules/dav/main/mod_dav.c (revision 1533448) +@@ -396,11 +396,9 @@ + */ + static const char *dav_xml_escape_uri(apr_pool_t *p, const char *uri) + { +- const char *e_uri = ap_escape_uri(p, uri); +- + /* check the easy case... */ +- if (ap_strchr_c(e_uri, '&') == NULL) +- return e_uri; ++ if (ap_strchr_c(uri, '&') == NULL) ++ return uri; + + /* there was a '&', so more work is needed... sigh. */ + +@@ -408,7 +406,7 @@ + * Note: this is a teeny bit of overkill since we know there are no + * '<' or '>' characters, but who cares. + */ +- return apr_xml_quote_string(p, e_uri, 0); ++ return apr_xml_quote_string(p, uri, 0); + } + + +@@ -604,7 +602,8 @@ + return DONE; + } + +-/* handy function for return values of methods that (may) create things */ ++/* handy function for return values of methods that (may) create things. ++ * locn if provided is assumed to be escaped. */ + static int dav_created(request_rec *r, const char *locn, const char *what, + int replaced) + { +@@ -612,8 +611,6 @@ + + if (locn == NULL) { + locn = r->unparsed_uri; +- } else { +- locn = ap_escape_uri(r->pool, locn); + } + + /* did the target resource already exist? */ +@@ -3004,7 +3001,7 @@ + } + + /* return an appropriate response (HTTP_CREATED or HTTP_NO_CONTENT) */ +- return dav_created(r, lookup.rnew->uri, "Destination", ++ return dav_created(r, lookup.rnew->unparsed_uri, "Destination", + resnew_state == DAV_RESOURCE_EXISTS); + } + +@@ -4610,7 +4607,7 @@ + + /* return an appropriate response (HTTP_CREATED) */ + /* ### spec doesn't say what happens when destination was replaced */ +- return dav_created(r, lookup.rnew->uri, "Binding", 0); ++ return dav_created(r, lookup.rnew->unparsed_uri, "Binding", 0); + } + + +Index: modules/dav/main/mod_dav.h +=================================================================== +--- a/modules/dav/main/mod_dav.h (revision 1533447) ++++ b/modules/dav/main/mod_dav.h (revision 1533448) +@@ -386,7 +386,7 @@ + * REGULAR and WORKSPACE resources, + * and is always 1 for WORKING */ + +- const char *uri; /* the URI for this resource */ ++ const char *uri; /* the escaped URI for this resource */ + + dav_resource_private *info; /* the provider's private info */ + diff --git a/SOURCES/httpd-2.4.6-r1537535.patch b/SOURCES/httpd-2.4.6-r1537535.patch new file mode 100644 index 0000000..dc2c6c9 --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1537535.patch @@ -0,0 +1,38 @@ +# ./pullrev.sh 1537535 + +http://svn.apache.org/viewvc?view=revision&revision=1537535 + +--- httpd-2.4.6/modules/ssl/ssl_engine_config.c.r1537535 ++++ httpd-2.4.6/modules/ssl/ssl_engine_config.c +@@ -198,7 +198,7 @@ static SSLSrvConfigRec *ssl_config_serve + SSLSrvConfigRec *sc = apr_palloc(p, sizeof(*sc)); + + sc->mc = NULL; +- sc->enabled = SSL_ENABLED_FALSE; ++ sc->enabled = SSL_ENABLED_UNSET; + sc->proxy_enabled = UNSET; + sc->vhost_id = NULL; /* set during module init */ + sc->vhost_id_len = 0; /* set during module init */ +--- httpd-2.4.6/modules/ssl/ssl_engine_init.c.r1537535 ++++ httpd-2.4.6/modules/ssl/ssl_engine_init.c +@@ -289,13 +289,16 @@ int ssl_init_Module(apr_pool_t *p, apr_p + sc->vhost_id = ssl_util_vhostid(p, s); + sc->vhost_id_len = strlen(sc->vhost_id); + +- if (ap_get_server_protocol(s) && +- strcmp("https", ap_get_server_protocol(s)) == 0) { ++ /* Default to enabled if SSLEngine is not set explicitly, and ++ * the protocol is https. */ ++ if (ap_get_server_protocol(s) ++ && strcmp("https", ap_get_server_protocol(s)) == 0 ++ && sc->enabled == SSL_ENABLED_UNSET) { + sc->enabled = SSL_ENABLED_TRUE; + } + +- /* If sc->enabled is UNSET, then SSL is optional on this vhost */ +- /* Fix up stuff that may not have been set */ ++ /* Fix up stuff that may not have been set. If sc->enabled is ++ * UNSET, then SSL is disabled on this vhost. */ + if (sc->enabled == SSL_ENABLED_UNSET) { + sc->enabled = SSL_ENABLED_FALSE; + } diff --git a/SOURCES/httpd-2.4.6-r1542327.patch b/SOURCES/httpd-2.4.6-r1542327.patch new file mode 100644 index 0000000..b11b535 --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1542327.patch @@ -0,0 +1,868 @@ +# ./pullrev.sh 1542327 +http://svn.apache.org/viewvc?view=revision&revision=1542327 + +--- httpd-2.4.6/LAYOUT.r1542327 ++++ httpd-2.4.6/LAYOUT +@@ -108,7 +108,6 @@ modules/ ................ Manditory and + mod_ssl.c ............... main source file containing API structures + mod_ssl.h ............... common header file of mod_ssl + ssl_engine_config.c ..... module configuration handling +- ssl_engine_dh.c ......... DSA/DH support + ssl_engine_init.c ....... module initialization + ssl_engine_io.c ......... I/O support + ssl_engine_kernel.c ..... SSL engine kernel +--- httpd-2.4.6/modules/ssl/config.m4.r1542327 ++++ httpd-2.4.6/modules/ssl/config.m4 +@@ -20,7 +20,6 @@ dnl # list of module object files + ssl_objs="dnl + mod_ssl.lo dnl + ssl_engine_config.lo dnl +-ssl_engine_dh.lo dnl + ssl_engine_init.lo dnl + ssl_engine_io.lo dnl + ssl_engine_kernel.lo dnl +--- httpd-2.4.6/modules/ssl/mod_ssl.c.r1542327 ++++ httpd-2.4.6/modules/ssl/mod_ssl.c +@@ -515,15 +515,6 @@ int ssl_init_ssl_connection(conn_rec *c, + + sslconn->ssl = ssl; + +- /* +- * Configure callbacks for SSL connection +- */ +- SSL_set_tmp_rsa_callback(ssl, ssl_callback_TmpRSA); +- SSL_set_tmp_dh_callback(ssl, ssl_callback_TmpDH); +-#ifndef OPENSSL_NO_EC +- SSL_set_tmp_ecdh_callback(ssl, ssl_callback_TmpECDH); +-#endif +- + SSL_set_verify_result(ssl, X509_V_OK); + + ssl_io_filter_init(c, r, ssl); +--- httpd-2.4.6/modules/ssl/mod_ssl.dsp.r1542327 ++++ httpd-2.4.6/modules/ssl/mod_ssl.dsp +@@ -112,10 +112,6 @@ SOURCE=.\ssl_engine_config.c + # End Source File + # Begin Source File + +-SOURCE=.\ssl_engine_dh.c +-# End Source File +-# Begin Source File +- + SOURCE=.\ssl_engine_init.c + # End Source File + # Begin Source File +--- httpd-2.4.6/modules/ssl/ssl_engine_config.c.r1542327 ++++ httpd-2.4.6/modules/ssl/ssl_engine_config.c +@@ -76,8 +76,6 @@ SSLModConfigRec *ssl_config_global_creat + mc->stapling_mutex = NULL; + #endif + +- memset(mc->pTmpKeys, 0, sizeof(mc->pTmpKeys)); +- + apr_pool_userdata_set(mc, SSL_MOD_CONFIG_KEY, + apr_pool_cleanup_null, + pool); +--- httpd-2.4.6/modules/ssl/ssl_engine_dh.c.r1542327 ++++ httpd-2.4.6/modules/ssl/ssl_engine_dh.c +@@ -1,244 +0,0 @@ +-#if 0 +-=pod +-#endif +- +-/* Licensed to the Apache Software Foundation (ASF) under one or more +- * contributor license agreements. See the NOTICE file distributed with +- * this work for additional information regarding copyright ownership. +- * The ASF licenses this file to You under the Apache License, Version 2.0 +- * (the "License"); you may not use this file except in compliance with +- * the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- */ +- +-/* _ _ +- * _ __ ___ ___ __| | ___ ___| | mod_ssl +- * | '_ ` _ \ / _ \ / _` | / __/ __| | Apache Interface to OpenSSL +- * | | | | | | (_) | (_| | \__ \__ \ | +- * |_| |_| |_|\___/ \__,_|___|___/___/_| +- * |_____| +- * ssl_engine_dh.c +- * Diffie-Hellman Built-in Temporary Parameters +- */ +- +-#include "ssl_private.h" +- +-/* ----BEGIN GENERATED SECTION-------- */ +- +-/* +-** Diffie-Hellman-Parameters: (512 bit) +-** prime: +-** 00:9f:db:8b:8a:00:45:44:f0:04:5f:17:37:d0:ba: +-** 2e:0b:27:4c:df:1a:9f:58:82:18:fb:43:53:16:a1: +-** 6e:37:41:71:fd:19:d8:d8:f3:7c:39:bf:86:3f:d6: +-** 0e:3e:30:06:80:a3:03:0c:6e:4c:37:57:d0:8f:70: +-** e6:aa:87:10:33 +-** generator: 2 (0x2) +-** Diffie-Hellman-Parameters: (1024 bit) +-** prime: +-** 00:d6:7d:e4:40:cb:bb:dc:19:36:d6:93:d3:4a:fd: +-** 0a:d5:0c:84:d2:39:a4:5f:52:0b:b8:81:74:cb:98: +-** bc:e9:51:84:9f:91:2e:63:9c:72:fb:13:b4:b4:d7: +-** 17:7e:16:d5:5a:c1:79:ba:42:0b:2a:29:fe:32:4a: +-** 46:7a:63:5e:81:ff:59:01:37:7b:ed:dc:fd:33:16: +-** 8a:46:1a:ad:3b:72:da:e8:86:00:78:04:5b:07:a7: +-** db:ca:78:74:08:7d:15:10:ea:9f:cc:9d:dd:33:05: +-** 07:dd:62:db:88:ae:aa:74:7d:e0:f4:d6:e2:bd:68: +-** b0:e7:39:3e:0f:24:21:8e:b3 +-** generator: 2 (0x2) +-*/ +- +-static unsigned char dh512_p[] = { +- 0x9F, 0xDB, 0x8B, 0x8A, 0x00, 0x45, 0x44, 0xF0, 0x04, 0x5F, 0x17, 0x37, +- 0xD0, 0xBA, 0x2E, 0x0B, 0x27, 0x4C, 0xDF, 0x1A, 0x9F, 0x58, 0x82, 0x18, +- 0xFB, 0x43, 0x53, 0x16, 0xA1, 0x6E, 0x37, 0x41, 0x71, 0xFD, 0x19, 0xD8, +- 0xD8, 0xF3, 0x7C, 0x39, 0xBF, 0x86, 0x3F, 0xD6, 0x0E, 0x3E, 0x30, 0x06, +- 0x80, 0xA3, 0x03, 0x0C, 0x6E, 0x4C, 0x37, 0x57, 0xD0, 0x8F, 0x70, 0xE6, +- 0xAA, 0x87, 0x10, 0x33, +-}; +-static unsigned char dh512_g[] = { +- 0x02, +-}; +- +-static DH *get_dh512(void) +-{ +- DH *dh; +- +- if (!(dh = DH_new())) { +- return NULL; +- } +- +- dh->p = BN_bin2bn(dh512_p, sizeof(dh512_p), NULL); +- dh->g = BN_bin2bn(dh512_g, sizeof(dh512_g), NULL); +- if (!(dh->p && dh->g)) { +- DH_free(dh); +- return NULL; +- } +- +- return dh; +-} +- +-static unsigned char dh1024_p[] = { +- 0xD6, 0x7D, 0xE4, 0x40, 0xCB, 0xBB, 0xDC, 0x19, 0x36, 0xD6, 0x93, 0xD3, +- 0x4A, 0xFD, 0x0A, 0xD5, 0x0C, 0x84, 0xD2, 0x39, 0xA4, 0x5F, 0x52, 0x0B, +- 0xB8, 0x81, 0x74, 0xCB, 0x98, 0xBC, 0xE9, 0x51, 0x84, 0x9F, 0x91, 0x2E, +- 0x63, 0x9C, 0x72, 0xFB, 0x13, 0xB4, 0xB4, 0xD7, 0x17, 0x7E, 0x16, 0xD5, +- 0x5A, 0xC1, 0x79, 0xBA, 0x42, 0x0B, 0x2A, 0x29, 0xFE, 0x32, 0x4A, 0x46, +- 0x7A, 0x63, 0x5E, 0x81, 0xFF, 0x59, 0x01, 0x37, 0x7B, 0xED, 0xDC, 0xFD, +- 0x33, 0x16, 0x8A, 0x46, 0x1A, 0xAD, 0x3B, 0x72, 0xDA, 0xE8, 0x86, 0x00, +- 0x78, 0x04, 0x5B, 0x07, 0xA7, 0xDB, 0xCA, 0x78, 0x74, 0x08, 0x7D, 0x15, +- 0x10, 0xEA, 0x9F, 0xCC, 0x9D, 0xDD, 0x33, 0x05, 0x07, 0xDD, 0x62, 0xDB, +- 0x88, 0xAE, 0xAA, 0x74, 0x7D, 0xE0, 0xF4, 0xD6, 0xE2, 0xBD, 0x68, 0xB0, +- 0xE7, 0x39, 0x3E, 0x0F, 0x24, 0x21, 0x8E, 0xB3, +-}; +-static unsigned char dh1024_g[] = { +- 0x02, +-}; +- +-static DH *get_dh1024(void) +-{ +- DH *dh; +- +- if (!(dh = DH_new())) { +- return NULL; +- } +- +- dh->p = BN_bin2bn(dh1024_p, sizeof(dh1024_p), NULL); +- dh->g = BN_bin2bn(dh1024_g, sizeof(dh1024_g), NULL); +- if (!(dh->p && dh->g)) { +- DH_free(dh); +- return NULL; +- } +- +- return dh; +-} +- +-/* ----END GENERATED SECTION---------- */ +- +-DH *ssl_dh_GetTmpParam(int nKeyLen) +-{ +- DH *dh; +- +- if (nKeyLen == 512) +- dh = get_dh512(); +- else if (nKeyLen == 1024) +- dh = get_dh1024(); +- else +- dh = get_dh1024(); +- return dh; +-} +- +-DH *ssl_dh_GetParamFromFile(char *file) +-{ +- DH *dh = NULL; +- BIO *bio; +- +- if ((bio = BIO_new_file(file, "r")) == NULL) +- return NULL; +- dh = PEM_read_bio_DHparams(bio, NULL, NULL, NULL); +- BIO_free(bio); +- return (dh); +-} +- +-/* +-=cut +-## +-## Embedded Perl script for generating the temporary DH parameters +-## +- +-require 5.003; +-use strict; +- +-# configuration +-my $file = $0; +-my $begin = '----BEGIN GENERATED SECTION--------'; +-my $end = '----END GENERATED SECTION----------'; +- +-# read ourself and keep a backup +-open(FP, "<$file") || die; +-my $source = ''; +-$source .= $_ while (); +-close(FP); +-open(FP, ">$file.bak") || die; +-print FP $source; +-close(FP); +- +-# generate the DH parameters +-print "1. Generate 512 and 1024 bit Diffie-Hellman parameters (p, g)\n"; +-my $rand = ''; +-foreach $file (qw(/var/log/messages /var/adm/messages +- /kernel /vmunix /vmlinuz /etc/hosts /etc/resolv.conf)) { +- if (-f $file) { +- $rand = $file if ($rand eq ''); +- $rand .= ":$file" if ($rand ne ''); +- } +-} +-$rand = "-rand $rand" if ($rand ne ''); +-system("openssl gendh $rand -out dh512.pem 512"); +-system("openssl gendh $rand -out dh1024.pem 1024"); +- +-# generate DH param info +-my $dhinfo = ''; +-open(FP, "openssl dh -noout -text -in dh512.pem |") || die; +-$dhinfo .= $_ while (); +-close(FP); +-open(FP, "openssl dh -noout -text -in dh1024.pem |") || die; +-$dhinfo .= $_ while (); +-close(FP); +-$dhinfo =~ s|^|** |mg; +-$dhinfo = "\n\/\*\n$dhinfo\*\/\n\n"; +- +-my $indent_args = "-i4 -npsl -di0 -br -nce -d0 -cli0 -npcs -nfc1"; +- +-# generate C source from DH params +-my $dhsource = ''; +-open(FP, "openssl dh -noout -C -in dh512.pem | indent $indent_args | expand |") || die; +-$dhsource .= $_ while (); +-close(FP); +-open(FP, "openssl dh -noout -C -in dh1024.pem | indent $indent_args | expand |") || die; +-$dhsource .= $_ while (); +-close(FP); +-$dhsource =~ s|(DH\s+\*get_dh)(\d+)[^}]*\n}|static $1$2(void) +-{ +- DH *dh; +- +- if (!(dh = DH_new())) { +- return NULL; +- } +- +- dh->p = BN_bin2bn(dh$2_p, sizeof(dh$2_p), NULL); +- dh->g = BN_bin2bn(dh$2_g, sizeof(dh$2_g), NULL); +- if (!(dh->p && dh->g)) { +- DH_free(dh); +- return NULL; +- } +- +- return dh; +-} +-|sg; +- +-# generate output +-my $o = $dhinfo . $dhsource; +- +-# insert the generated code at the target location +-$source =~ s|(\/\* $begin.+?\n).*\n(.*?\/\* $end)|$1$o$2|s; +- +-# and update the source on disk +-print "Updating file `$file'\n"; +-open(FP, ">$file") || die; +-print FP $source; +-close(FP); +- +-# cleanup +-unlink("dh512.pem"); +-unlink("dh1024.pem"); +- +-=pod +-*/ +--- httpd-2.4.6/modules/ssl/ssl_engine_init.c.r1542327 ++++ httpd-2.4.6/modules/ssl/ssl_engine_init.c +@@ -56,180 +56,6 @@ static void ssl_add_version_components(a + modver, AP_SERVER_BASEVERSION, incver); + } + +- +-/* +- * Handle the Temporary RSA Keys and DH Params +- */ +- +-#define MODSSL_TMP_KEY_FREE(mc, type, idx) \ +- if (mc->pTmpKeys[idx]) { \ +- type##_free((type *)mc->pTmpKeys[idx]); \ +- mc->pTmpKeys[idx] = NULL; \ +- } +- +-#define MODSSL_TMP_KEYS_FREE(mc, type) \ +- MODSSL_TMP_KEY_FREE(mc, type, SSL_TMP_KEY_##type##_512); \ +- MODSSL_TMP_KEY_FREE(mc, type, SSL_TMP_KEY_##type##_1024) +- +-static void ssl_tmp_keys_free(server_rec *s) +-{ +- SSLModConfigRec *mc = myModConfig(s); +- +- MODSSL_TMP_KEYS_FREE(mc, RSA); +- MODSSL_TMP_KEYS_FREE(mc, DH); +-#ifndef OPENSSL_NO_EC +- MODSSL_TMP_KEY_FREE(mc, EC_KEY, SSL_TMP_KEY_EC_256); +-#endif +-} +- +-static int ssl_tmp_key_init_rsa(server_rec *s, +- int bits, int idx) +-{ +- SSLModConfigRec *mc = myModConfig(s); +- +-#ifdef HAVE_FIPS +- +- if (FIPS_mode() && bits < 1024) { +- mc->pTmpKeys[idx] = NULL; +- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(01877) +- "Init: Skipping generating temporary " +- "%d bit RSA private key in FIPS mode", bits); +- return OK; +- } +- +-#endif +-#ifdef HAVE_GENERATE_EX +- { +- RSA *tkey; +- BIGNUM *bn_f4; +- if (!(tkey = RSA_new()) +- || !(bn_f4 = BN_new()) +- || !BN_set_word(bn_f4, RSA_F4) +- || !RSA_generate_key_ex(tkey, bits, bn_f4, NULL)) +- { +- ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(01878) +- "Init: Failed to generate temporary " +- "%d bit RSA private key", bits); +- ssl_log_ssl_error(SSLLOG_MARK, APLOG_ERR, s); +- return !OK; +- } +- BN_free(bn_f4); +- mc->pTmpKeys[idx] = tkey; +- } +-#else +- if (!(mc->pTmpKeys[idx] = +- RSA_generate_key(bits, RSA_F4, NULL, NULL))) +- { +- ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(01879) +- "Init: Failed to generate temporary " +- "%d bit RSA private key", bits); +- ssl_log_ssl_error(SSLLOG_MARK, APLOG_ERR, s); +- return !OK; +- } +-#endif +- +- return OK; +-} +- +-static int ssl_tmp_key_init_dh(server_rec *s, +- int bits, int idx) +-{ +- SSLModConfigRec *mc = myModConfig(s); +- +-#ifdef HAVE_FIPS +- +- if (FIPS_mode() && bits < 1024) { +- mc->pTmpKeys[idx] = NULL; +- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(01880) +- "Init: Skipping generating temporary " +- "%d bit DH parameters in FIPS mode", bits); +- return OK; +- } +- +-#endif +- +- if (!(mc->pTmpKeys[idx] = +- ssl_dh_GetTmpParam(bits))) +- { +- ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(01881) +- "Init: Failed to generate temporary " +- "%d bit DH parameters", bits); +- return !OK; +- } +- +- return OK; +-} +- +-#ifndef OPENSSL_NO_EC +-static int ssl_tmp_key_init_ec(server_rec *s, +- int bits, int idx) +-{ +- SSLModConfigRec *mc = myModConfig(s); +- EC_KEY *ecdh = NULL; +- +- /* XXX: Are there any FIPS constraints we should enforce? */ +- +- if (bits != 256) { +- ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(02298) +- "Init: Failed to generate temporary " +- "%d bit EC parameters, only 256 bits supported", bits); +- return !OK; +- } +- +- if ((ecdh = EC_KEY_new()) == NULL || +- EC_KEY_set_group(ecdh, EC_GROUP_new_by_curve_name(NID_X9_62_prime256v1)) != 1) +- { +- ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(02299) +- "Init: Failed to generate temporary " +- "%d bit EC parameters", bits); +- return !OK; +- } +- +- mc->pTmpKeys[idx] = ecdh; +- return OK; +-} +- +-#define MODSSL_TMP_KEY_INIT_EC(s, bits) \ +- ssl_tmp_key_init_ec(s, bits, SSL_TMP_KEY_EC_##bits) +- +-#endif +- +-#define MODSSL_TMP_KEY_INIT_RSA(s, bits) \ +- ssl_tmp_key_init_rsa(s, bits, SSL_TMP_KEY_RSA_##bits) +- +-#define MODSSL_TMP_KEY_INIT_DH(s, bits) \ +- ssl_tmp_key_init_dh(s, bits, SSL_TMP_KEY_DH_##bits) +- +-static int ssl_tmp_keys_init(server_rec *s) +-{ +- ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, s, +- "Init: Generating temporary RSA private keys (512/1024 bits)"); +- +- if (MODSSL_TMP_KEY_INIT_RSA(s, 512) || +- MODSSL_TMP_KEY_INIT_RSA(s, 1024)) { +- return !OK; +- } +- +- ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, s, +- "Init: Generating temporary DH parameters (512/1024 bits)"); +- +- if (MODSSL_TMP_KEY_INIT_DH(s, 512) || +- MODSSL_TMP_KEY_INIT_DH(s, 1024)) { +- return !OK; +- } +- +-#ifndef OPENSSL_NO_EC +- ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, s, +- "Init: Generating temporary EC parameters (256 bits)"); +- +- if (MODSSL_TMP_KEY_INIT_EC(s, 256)) { +- return !OK; +- } +-#endif +- +- return OK; +-} +- + /* + * Per-module initialization + */ +@@ -370,10 +196,6 @@ int ssl_init_Module(apr_pool_t *p, apr_p + */ + ssl_pphrase_Handle(base_server, ptemp); + +- if (ssl_tmp_keys_init(base_server)) { +- return !OK; +- } +- + /* + * initialize the mutex handling + */ +@@ -681,6 +503,9 @@ static void ssl_init_ctx_protocol(server + * Configure additional context ingredients + */ + SSL_CTX_set_options(ctx, SSL_OP_SINGLE_DH_USE); ++#ifndef OPENSSL_NO_EC ++ SSL_CTX_set_options(ctx, SSL_OP_SINGLE_ECDH_USE); ++#endif + + #ifdef SSL_OP_NO_SESSION_RESUMPTION_ON_RENEGOTIATION + /* +@@ -721,11 +546,7 @@ static void ssl_init_ctx_callbacks(serve + { + SSL_CTX *ctx = mctx->ssl_ctx; + +- SSL_CTX_set_tmp_rsa_callback(ctx, ssl_callback_TmpRSA); + SSL_CTX_set_tmp_dh_callback(ctx, ssl_callback_TmpDH); +-#ifndef OPENSSL_NO_EC +- SSL_CTX_set_tmp_ecdh_callback(ctx,ssl_callback_TmpECDH); +-#endif + + SSL_CTX_set_info_callback(ctx, ssl_callback_Info); + +@@ -1165,12 +986,16 @@ static void ssl_init_server_certs(server + modssl_ctx_t *mctx) + { + const char *rsa_id, *dsa_id; +-#ifndef OPENSSL_NO_EC ++#ifndef OPENSSL_NO_EC + const char *ecc_id; ++ EC_GROUP *ecparams; ++ int nid; ++ EC_KEY *eckey; + #endif + const char *vhost_id = mctx->sc->vhost_id; + int i; + int have_rsa, have_dsa; ++ DH *dhparams; + #ifndef OPENSSL_NO_EC + int have_ecc; + #endif +@@ -1217,6 +1042,40 @@ static void ssl_init_server_certs(server + "Oops, no " KEYTYPES " server private key found?!"); + ssl_die(s); + } ++ ++ /* ++ * Try to read DH parameters from the (first) SSLCertificateFile ++ */ ++ if ((mctx->pks->cert_files[0] != NULL) && ++ (dhparams = ssl_dh_GetParamFromFile(mctx->pks->cert_files[0]))) { ++ SSL_CTX_set_tmp_dh(mctx->ssl_ctx, dhparams); ++ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(02540) ++ "Custom DH parameters (%d bits) for %s loaded from %s", ++ BN_num_bits(dhparams->p), vhost_id, ++ mctx->pks->cert_files[0]); ++ } ++ ++#ifndef OPENSSL_NO_EC ++ /* ++ * Similarly, try to read the ECDH curve name from SSLCertificateFile... ++ */ ++ if ((mctx->pks->cert_files[0] != NULL) && ++ (ecparams = ssl_ec_GetParamFromFile(mctx->pks->cert_files[0])) && ++ (nid = EC_GROUP_get_curve_name(ecparams)) && ++ (eckey = EC_KEY_new_by_curve_name(nid))) { ++ SSL_CTX_set_tmp_ecdh(mctx->ssl_ctx, eckey); ++ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(02541) ++ "ECDH curve %s for %s specified in %s", ++ OBJ_nid2sn(nid), vhost_id, mctx->pks->cert_files[0]); ++ } ++ /* ++ * ...otherwise, configure NIST P-256 (required to enable ECDHE) ++ */ ++ else { ++ SSL_CTX_set_tmp_ecdh(mctx->ssl_ctx, ++ EC_KEY_new_by_curve_name(NID_X9_62_prime256v1)); ++ } ++#endif + } + + #ifdef HAVE_TLS_SESSION_TICKETS +@@ -1754,11 +1613,6 @@ apr_status_t ssl_init_ModuleKill(void *d + ssl_scache_kill(base_server); + + /* +- * Destroy the temporary keys and params +- */ +- ssl_tmp_keys_free(base_server); +- +- /* + * Free the non-pool allocated structures + * in the per-server configurations + */ +--- httpd-2.4.6/modules/ssl/ssl_engine_kernel.c.r1542327 ++++ httpd-2.4.6/modules/ssl/ssl_engine_kernel.c +@@ -1289,117 +1289,70 @@ const authz_provider ssl_authz_provider_ + */ + + /* +- * Handle out temporary RSA private keys on demand +- * +- * The background of this as the TLSv1 standard explains it: +- * +- * | D.1. Temporary RSA keys +- * | +- * | US Export restrictions limit RSA keys used for encryption to 512 +- * | bits, but do not place any limit on lengths of RSA keys used for +- * | signing operations. Certificates often need to be larger than 512 +- * | bits, since 512-bit RSA keys are not secure enough for high-value +- * | transactions or for applications requiring long-term security. Some +- * | certificates are also designated signing-only, in which case they +- * | cannot be used for key exchange. +- * | +- * | When the public key in the certificate cannot be used for encryption, +- * | the server signs a temporary RSA key, which is then exchanged. In +- * | exportable applications, the temporary RSA key should be the maximum +- * | allowable length (i.e., 512 bits). Because 512-bit RSA keys are +- * | relatively insecure, they should be changed often. For typical +- * | electronic commerce applications, it is suggested that keys be +- * | changed daily or every 500 transactions, and more often if possible. +- * | Note that while it is acceptable to use the same temporary key for +- * | multiple transactions, it must be signed each time it is used. +- * | +- * | RSA key generation is a time-consuming process. In many cases, a +- * | low-priority process can be assigned the task of key generation. +- * | Whenever a new key is completed, the existing temporary key can be +- * | replaced with the new one. +- * +- * XXX: base on comment above, if thread support is enabled, +- * we should spawn a low-priority thread to generate new keys +- * on the fly. +- * +- * So we generated 512 and 1024 bit temporary keys on startup +- * which we now just hand out on demand.... ++ * Grab well-defined DH parameters from OpenSSL, see ++ * (get_rfc*) for all available primes. + */ +- +-RSA *ssl_callback_TmpRSA(SSL *ssl, int export, int keylen) +-{ +- conn_rec *c = (conn_rec *)SSL_get_app_data(ssl); +- SSLModConfigRec *mc = myModConfigFromConn(c); +- int idx; +- +- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, +- "handing out temporary %d bit RSA key", keylen); +- +- /* doesn't matter if export flag is on, +- * we won't be asked for keylen > 512 in that case. +- * if we are asked for a keylen > 1024, it is too expensive +- * to generate on the fly. +- * XXX: any reason not to generate 2048 bit keys at startup? +- */ +- +- switch (keylen) { +- case 512: +- idx = SSL_TMP_KEY_RSA_512; +- break; +- +- case 1024: +- default: +- idx = SSL_TMP_KEY_RSA_1024; +- } +- +- return (RSA *)mc->pTmpKeys[idx]; ++#define make_get_dh(rfc,size,gen) \ ++static DH *get_dh##size(void) \ ++{ \ ++ DH *dh; \ ++ if (!(dh = DH_new())) { \ ++ return NULL; \ ++ } \ ++ dh->p = get_##rfc##_prime_##size(NULL); \ ++ BN_dec2bn(&dh->g, #gen); \ ++ if (!dh->p || !dh->g) { \ ++ DH_free(dh); \ ++ return NULL; \ ++ } \ ++ return dh; \ + } + + /* +- * Hand out the already generated DH parameters... ++ * Prepare DH parameters from 1024 to 4096 bits, in 1024-bit increments ++ */ ++make_get_dh(rfc2409, 1024, 2) ++make_get_dh(rfc3526, 2048, 2) ++make_get_dh(rfc3526, 3072, 2) ++make_get_dh(rfc3526, 4096, 2) ++ ++/* ++ * Hand out standard DH parameters, based on the authentication strength + */ + DH *ssl_callback_TmpDH(SSL *ssl, int export, int keylen) + { + conn_rec *c = (conn_rec *)SSL_get_app_data(ssl); +- SSLModConfigRec *mc = myModConfigFromConn(c); +- int idx; +- +- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, +- "handing out temporary %d bit DH key", keylen); ++ EVP_PKEY *pkey = SSL_get_privatekey(ssl); ++ int type = pkey ? EVP_PKEY_type(pkey->type) : EVP_PKEY_NONE; + +- switch (keylen) { +- case 512: +- idx = SSL_TMP_KEY_DH_512; +- break; +- +- case 1024: +- default: +- idx = SSL_TMP_KEY_DH_1024; ++ /* ++ * OpenSSL will call us with either keylen == 512 or keylen == 1024 ++ * (see the definition of SSL_EXPORT_PKEYLENGTH in ssl_locl.h). ++ * Adjust the DH parameter length according to the size of the ++ * RSA/DSA private key used for the current connection, and always ++ * use at least 1024-bit parameters. ++ * Note: This may cause interoperability issues with implementations ++ * which limit their DH support to 1024 bit - e.g. Java 7 and earlier. ++ * In this case, SSLCertificateFile can be used to specify fixed ++ * 1024-bit DH parameters (with the effect that OpenSSL skips this ++ * callback). ++ */ ++ if ((type == EVP_PKEY_RSA) || (type == EVP_PKEY_DSA)) { ++ keylen = EVP_PKEY_bits(pkey); + } + +- return (DH *)mc->pTmpKeys[idx]; +-} +- +-#ifndef OPENSSL_NO_EC +-EC_KEY *ssl_callback_TmpECDH(SSL *ssl, int export, int keylen) +-{ +- conn_rec *c = (conn_rec *)SSL_get_app_data(ssl); +- SSLModConfigRec *mc = myModConfigFromConn(c); +- int idx; +- +- /* XXX Uses 256-bit key for now. TODO: support other sizes. */ + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, +- "handing out temporary 256 bit ECC key"); +- +- switch (keylen) { +- case 256: +- default: +- idx = SSL_TMP_KEY_EC_256; +- } ++ "handing out built-in DH parameters for %d-bit authenticated connection", keylen); + +- return (EC_KEY *)mc->pTmpKeys[idx]; ++ if (keylen >= 4096) ++ return get_dh4096(); ++ else if (keylen >= 3072) ++ return get_dh3072(); ++ else if (keylen >= 2048) ++ return get_dh2048(); ++ else ++ return get_dh1024(); + } +-#endif + + /* + * This OpenSSL callback function is called when OpenSSL +--- httpd-2.4.6/modules/ssl/ssl_private.h.r1542327 ++++ httpd-2.4.6/modules/ssl/ssl_private.h +@@ -310,22 +310,6 @@ typedef int ssl_algo_t; + #define SSL_AIDX_MAX (2) + #endif + +- +-/** +- * Define IDs for the temporary RSA keys and DH params +- */ +- +-#define SSL_TMP_KEY_RSA_512 (0) +-#define SSL_TMP_KEY_RSA_1024 (1) +-#define SSL_TMP_KEY_DH_512 (2) +-#define SSL_TMP_KEY_DH_1024 (3) +-#ifndef OPENSSL_NO_EC +-#define SSL_TMP_KEY_EC_256 (4) +-#define SSL_TMP_KEY_MAX (5) +-#else +-#define SSL_TMP_KEY_MAX (4) +-#endif +- + /** + * Define the SSL options + */ +@@ -547,7 +531,6 @@ typedef struct { + apr_global_mutex_t *pMutex; + apr_array_header_t *aRandSeed; + apr_hash_t *tVHostKeys; +- void *pTmpKeys[SSL_TMP_KEY_MAX]; + + /* Two hash tables of pointers to ssl_asn1_t structures. The + * structures are used to store certificates and private keys +@@ -837,11 +820,7 @@ extern const authz_provider ssl_authz_pr + extern const authz_provider ssl_authz_provider_verify_client; + + /** OpenSSL callbacks */ +-RSA *ssl_callback_TmpRSA(SSL *, int, int); + DH *ssl_callback_TmpDH(SSL *, int, int); +-#ifndef OPENSSL_NO_EC +-EC_KEY *ssl_callback_TmpECDH(SSL *, int, int); +-#endif + int ssl_callback_SSLVerify(int, X509_STORE_CTX *); + int ssl_callback_SSLVerify_CRL(int, X509_STORE_CTX *, conn_rec *); + int ssl_callback_proxy_cert(SSL *ssl, X509 **x509, EVP_PKEY **pkey); +@@ -921,8 +900,10 @@ int ssl_init_ssl_connection(con + void ssl_pphrase_Handle(server_rec *, apr_pool_t *); + + /** Diffie-Hellman Parameter Support */ +-DH *ssl_dh_GetTmpParam(int); +-DH *ssl_dh_GetParamFromFile(char *); ++DH *ssl_dh_GetParamFromFile(const char *); ++#ifndef OPNESSL_NO_EC ++EC_GROUP *ssl_ec_GetParamFromFile(const char *); ++#endif + + unsigned char *ssl_asn1_table_set(apr_hash_t *table, + const char *key, +--- httpd-2.4.6/modules/ssl/ssl_util_ssl.c.r1542327 ++++ httpd-2.4.6/modules/ssl/ssl_util_ssl.c +@@ -483,6 +483,38 @@ BOOL SSL_X509_INFO_load_path(apr_pool_t + + /* _________________________________________________________________ + ** ++** Custom (EC)DH parameter support ++** _________________________________________________________________ ++*/ ++ ++DH *ssl_dh_GetParamFromFile(const char *file) ++{ ++ DH *dh = NULL; ++ BIO *bio; ++ ++ if ((bio = BIO_new_file(file, "r")) == NULL) ++ return NULL; ++ dh = PEM_read_bio_DHparams(bio, NULL, NULL, NULL); ++ BIO_free(bio); ++ return (dh); ++} ++ ++#ifndef OPENSSL_NO_EC ++EC_GROUP *ssl_ec_GetParamFromFile(const char *file) ++{ ++ EC_GROUP *group = NULL; ++ BIO *bio; ++ ++ if ((bio = BIO_new_file(file, "r")) == NULL) ++ return NULL; ++ group = PEM_read_bio_ECPKParameters(bio, NULL, NULL, NULL); ++ BIO_free(bio); ++ return (group); ++} ++#endif ++ ++/* _________________________________________________________________ ++** + ** Extra Server Certificate Chain Support + ** _________________________________________________________________ + */ diff --git a/SOURCES/httpd-2.4.6-r1553540.patch b/SOURCES/httpd-2.4.6-r1553540.patch new file mode 100644 index 0000000..6677cbb --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1553540.patch @@ -0,0 +1,28 @@ +# ./pullrev.sh 1553540 + +https://bugzilla.redhat.com/show_bug.cgi?id=1040447 + +http://svn.apache.org/viewvc?view=revision&revision=1553540 + +--- httpd-2.4.6/modules/proxy/mod_proxy_http.c ++++ httpd-2.4.6/modules/proxy/mod_proxy_http.c +@@ -710,7 +710,7 @@ + force10 = 0; + } + +- header_brigade = apr_brigade_create(p, origin->bucket_alloc); ++ header_brigade = apr_brigade_create(p, bucket_alloc); + rv = ap_proxy_create_hdrbrgd(p, header_brigade, r, p_conn, + worker, conf, uri, url, server_portstr, + &old_cl_val, &old_te_val); +@@ -1813,6 +1813,10 @@ + } + } while (interim_response && (interim_response < AP_MAX_INTERIM_RESPONSES)); + ++ /* We have to cleanup bb brigade, because buckets inserted to it could be ++ * created from scpool and this pool can be freed before this brigade. */ ++ apr_brigade_cleanup(bb); ++ + /* See define of AP_MAX_INTERIM_RESPONSES for why */ + if (interim_response >= AP_MAX_INTERIM_RESPONSES) { + return ap_proxyerror(r, HTTP_BAD_GATEWAY, diff --git a/SOURCES/httpd-2.4.6-r1555539.patch b/SOURCES/httpd-2.4.6-r1555539.patch new file mode 100644 index 0000000..414d127 --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1555539.patch @@ -0,0 +1,265 @@ +diff --git a/docs/manual/expr.html.en b/docs/manual/expr.html.en +index 5c3ae45..8bd941a 100644 +--- a/docs/manual/expr.html.en ++++ b/docs/manual/expr.html.en +@@ -46,7 +46,7 @@ +
  • Other
  • +
  • Comparison with SSLRequire
  • +
  • Version History
  • +-

    See also

    ++

    See also

    +
    top
    +
    +

    Grammar in Backus-Naur Form notation

    +diff --git a/docs/manual/mod/mod_authnz_ldap.html.en b/docs/manual/mod/mod_authnz_ldap.html.en +index 7199052..c86dc8a 100644 +--- a/docs/manual/mod/mod_authnz_ldap.html.en ++++ b/docs/manual/mod/mod_authnz_ldap.html.en +@@ -350,6 +350,9 @@ for HTTP Basic authentication.
    ++ ++ ++ ++ ++ ++ ++ ++
    Description:Enable or disable use of TLS session tickets
    Syntax:SSLSessionTickets on|off
    Default:SSLCompression on
    Context:server config, virtual host
    Status:Extension
    Module:mod_ssl
    Compatibility:Available.
    ++

    This directive allows to enable or disable the use of TLS session tickets(RFC 5077).

    ++
    ++

    TLS session tickets are enabled by default. Using them without restarting ++the web server with an appropriate frequency (e.g. daily) compromises perfect ++forward secrecy.

    ++
    ++ ++
    +
    top
    +

    SSLCryptoDevice Directive

    + +diff --git a/modules/ssl/mod_ssl.c b/modules/ssl/mod_ssl.c +index bbe1d20..4a8b661 100644 +--- a/modules/ssl/mod_ssl.c ++++ b/modules/ssl/mod_ssl.c +@@ -141,6 +141,9 @@ static const command_rec ssl_config_cmds[] = { + SSL_CMD_SRV(Compression, FLAG, + "Enable SSL level compression" + "(`on', `off')") ++ SSL_CMD_SRV(SessionTickets, FLAG, ++ "Enable or disable TLS session tickets" ++ "(`on', `off')") + SSL_CMD_SRV(InsecureRenegotiation, FLAG, + "Enable support for insecure renegotiation") + SSL_CMD_ALL(UserName, TAKE1, +diff --git a/modules/ssl/ssl_engine_config.c b/modules/ssl/ssl_engine_config.c +index 9530fcc..86a7f0f 100644 +--- a/modules/ssl/ssl_engine_config.c ++++ b/modules/ssl/ssl_engine_config.c +@@ -216,6 +216,7 @@ static SSLSrvConfigRec *ssl_config_server_new(apr_pool_t *p) + #ifndef OPENSSL_NO_COMP + sc->compression = UNSET; + #endif ++ sc->session_tickets = UNSET; + + modssl_ctx_init_proxy(sc, p); + +@@ -346,6 +347,7 @@ void *ssl_config_server_merge(apr_pool_t *p, void *basev, void *addv) + #ifndef OPENSSL_NO_COMP + cfgMergeBool(compression); + #endif ++ cfgMergeBool(session_tickets); + + modssl_ctx_cfg_merge_proxy(base->proxy, add->proxy, mrg->proxy); + +@@ -720,6 +722,17 @@ const char *ssl_cmd_SSLHonorCipherOrder(cmd_parms *cmd, void *dcfg, int flag) + #endif + } + ++const char *ssl_cmd_SSLSessionTickets(cmd_parms *cmd, void *dcfg, int flag) ++{ ++ SSLSrvConfigRec *sc = mySrvConfig(cmd->server); ++#ifndef SSL_OP_NO_TICKET ++ return "This version of OpenSSL does not support using " ++ "SSLSessionTickets."; ++#endif ++ sc->session_tickets = flag ? TRUE : FALSE; ++ return NULL; ++} ++ + const char *ssl_cmd_SSLInsecureRenegotiation(cmd_parms *cmd, void *dcfg, int flag) + { + #ifdef SSL_OP_ALLOW_UNSAFE_LEGACY_RENEGOTIATION +diff --git a/modules/ssl/ssl_engine_init.c b/modules/ssl/ssl_engine_init.c +index 568627f..672760c 100644 +--- a/modules/ssl/ssl_engine_init.c ++++ b/modules/ssl/ssl_engine_init.c +@@ -566,6 +566,16 @@ static void ssl_init_ctx_protocol(server_rec *s, + } + #endif + ++#ifdef SSL_OP_NO_TICKET ++ /* ++ * Configure using RFC 5077 TLS session tickets ++ * for session resumption. ++ */ ++ if (sc->session_tickets == FALSE) { ++ SSL_CTX_set_options(ctx, SSL_OP_NO_TICKET); ++ } ++#endif ++ + #ifdef SSL_OP_ALLOW_UNSAFE_LEGACY_RENEGOTIATION + if (sc->insecure_reneg == TRUE) { + SSL_CTX_set_options(ctx, SSL_OP_ALLOW_UNSAFE_LEGACY_RENEGOTIATION); +diff --git a/modules/ssl/ssl_private.h b/modules/ssl/ssl_private.h +index 0cc6d3f..b601316 100644 +--- a/modules/ssl/ssl_private.h ++++ b/modules/ssl/ssl_private.h +@@ -701,6 +701,7 @@ struct SSLSrvConfigRec { + #ifndef OPENSSL_NO_COMP + BOOL compression; + #endif ++ BOOL session_tickets; + }; + + /** +@@ -756,6 +757,7 @@ const char *ssl_cmd_SSLCARevocationFile(cmd_parms *, void *, const char *); + const char *ssl_cmd_SSLCARevocationCheck(cmd_parms *, void *, const char *); + const char *ssl_cmd_SSLHonorCipherOrder(cmd_parms *cmd, void *dcfg, int flag); + const char *ssl_cmd_SSLCompression(cmd_parms *, void *, int flag); ++const char *ssl_cmd_SSLSessionTickets(cmd_parms *, void *, int flag); + const char *ssl_cmd_SSLVerifyClient(cmd_parms *, void *, const char *); + const char *ssl_cmd_SSLVerifyDepth(cmd_parms *, void *, const char *); + const char *ssl_cmd_SSLSessionCache(cmd_parms *, void *, const char *); diff --git a/SOURCES/httpd-2.4.6-r1650655.patch b/SOURCES/httpd-2.4.6-r1650655.patch new file mode 100644 index 0000000..1791419 --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1650655.patch @@ -0,0 +1,41 @@ +diff --git a/modules/proxy/proxy_util.c b/modules/proxy/proxy_util.c +index c37a09b..2121892 100644 +--- a/modules/proxy/proxy_util.c ++++ b/modules/proxy/proxy_util.c +@@ -1733,6 +1733,9 @@ PROXY_DECLARE(char *) ap_proxy_define_worker(apr_pool_t *p, + + memset(wshared, 0, sizeof(proxy_worker_shared)); + ++ if (uri.port && uri.port == ap_proxy_port_of_scheme(uri.scheme)) { ++ uri.port = 0; ++ } + ptr = apr_uri_unparse(p, &uri, APR_URI_UNP_REVEALPASSWORD); + if (PROXY_STRNCPY(wshared->name, ptr) != APR_SUCCESS) { + return apr_psprintf(p, "worker name (%s) too long", ptr); +@@ -2688,6 +2691,13 @@ PROXY_DECLARE(int) ap_proxy_connect_backend(const char *proxy_function, + worker->s->hostname); + break; + } ++ ++ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(02823) ++ "%s: connection established with Unix domain socket " ++ "%s (%s)", ++ proxy_function, ++ conn->uds_path, ++ worker->s->hostname); + } + else + #endif +@@ -2780,6 +2790,12 @@ PROXY_DECLARE(int) ap_proxy_connect_backend(const char *proxy_function, + backend_addr = backend_addr->next; + continue; + } ++ ++ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(02824) ++ "%s: connection established with %pI (%s)", ++ proxy_function, ++ backend_addr, ++ worker->s->hostname); + } + + /* Set a timeout on the socket */ diff --git a/SOURCES/httpd-2.4.6-r1650677.patch b/SOURCES/httpd-2.4.6-r1650677.patch new file mode 100644 index 0000000..6599d98 --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1650677.patch @@ -0,0 +1,49 @@ +--- a/modules/proxy/mod_proxy_fcgi.c 2015/01/09 21:25:26 1650676 ++++ b/modules/proxy/mod_proxy_fcgi.c 2015/01/09 21:33:12 1650677 +@@ -367,7 +367,7 @@ + request_rec *r, int request_id) + { + apr_bucket_brigade *ib, *ob; +- int seen_end_of_headers = 0, done = 0; ++ int seen_end_of_headers = 0, done = 0, ignore_body = 0; + apr_status_t rv = APR_SUCCESS; + int script_error_status = HTTP_OK; + conn_rec *c = r->connection; +@@ -577,9 +577,16 @@ + APR_BRIGADE_INSERT_TAIL(ob, tmp_b); + r->status = status; + ap_pass_brigade(r->output_filters, ob); +- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01070) +- "Error parsing script headers"); +- rv = APR_EINVAL; ++ if (status == HTTP_NOT_MODIFIED) { ++ /* The 304 response MUST NOT contain ++ * a message-body, ignore it. */ ++ ignore_body = 1; ++ } ++ else { ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01070) ++ "Error parsing script headers"); ++ rv = APR_EINVAL; ++ } + break; + } + +@@ -598,7 +605,7 @@ + r->status = HTTP_OK; + } + +- if (script_error_status == HTTP_OK) { ++ if (script_error_status == HTTP_OK && !ignore_body) { + rv = ap_pass_brigade(r->output_filters, ob); + if (rv != APR_SUCCESS) { + break; +@@ -626,7 +633,7 @@ + * but that could be a huge amount of data; so we pass + * along smaller chunks + */ +- if (script_error_status == HTTP_OK) { ++ if (script_error_status == HTTP_OK && !ignore_body) { + rv = ap_pass_brigade(r->output_filters, ob); + if (rv != APR_SUCCESS) { + break; diff --git a/SOURCES/httpd-2.4.6-r1651083.patch b/SOURCES/httpd-2.4.6-r1651083.patch new file mode 100644 index 0000000..57505ad --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1651083.patch @@ -0,0 +1,10 @@ +--- a/server/core.c 2015/01/12 13:37:20 1651082 ++++ b/server/core.c 2015/01/12 13:38:02 1651083 +@@ -1271,6 +1271,7 @@ + static int reset_config_defines(void *dummy) + { + ap_server_config_defines = saved_server_config_defines; ++ saved_server_config_defines = NULL; + server_config_defined_vars = NULL; + return OK; + } diff --git a/SOURCES/httpd-2.4.6-r1651653.patch b/SOURCES/httpd-2.4.6-r1651653.patch new file mode 100644 index 0000000..a67093e --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1651653.patch @@ -0,0 +1,100 @@ +diff --git a/server/util.c b/server/util.c +index e0ba5c2..a6516d4 100644 +--- a/server/util.c ++++ b/server/util.c +@@ -968,20 +968,20 @@ AP_DECLARE(const char *) ap_pcfg_strerror(apr_pool_t *p, ap_configfile_t *cfp, + /* Read one line from open ap_configfile_t, strip LF, increase line number */ + /* If custom handler does not define a getstr() function, read char by char */ + static apr_status_t ap_cfg_getline_core(char *buf, apr_size_t bufsize, +- ap_configfile_t *cfp) ++ apr_size_t offset, ap_configfile_t *cfp) + { + apr_status_t rc; + /* If a "get string" function is defined, use it */ + if (cfp->getstr != NULL) { + char *cp; +- char *cbuf = buf; +- apr_size_t cbufsize = bufsize; ++ char *cbuf = buf + offset; ++ apr_size_t cbufsize = bufsize - offset; + + while (1) { + ++cfp->line_number; + rc = cfp->getstr(cbuf, cbufsize, cfp->param); + if (rc == APR_EOF) { +- if (cbuf != buf) { ++ if (cbuf != buf + offset) { + *cbuf = '\0'; + break; + } +@@ -999,11 +999,11 @@ static apr_status_t ap_cfg_getline_core(char *buf, apr_size_t bufsize, + */ + cp = cbuf; + cp += strlen(cp); +- if (cp > cbuf && cp[-1] == LF) { ++ if (cp > buf && cp[-1] == LF) { + cp--; +- if (cp > cbuf && cp[-1] == CR) ++ if (cp > buf && cp[-1] == CR) + cp--; +- if (cp > cbuf && cp[-1] == '\\') { ++ if (cp > buf && cp[-1] == '\\') { + cp--; + /* + * line continuation requested - +@@ -1021,19 +1021,19 @@ static apr_status_t ap_cfg_getline_core(char *buf, apr_size_t bufsize, + } + } else { + /* No "get string" function defined; read character by character */ +- apr_size_t i = 0; ++ apr_size_t i = offset; + + if (bufsize < 2) { + /* too small, assume caller is crazy */ + return APR_EINVAL; + } +- buf[0] = '\0'; ++ buf[offset] = '\0'; + + while (1) { + char c; + rc = cfp->getch(&c, cfp->param); + if (rc == APR_EOF) { +- if (i > 0) ++ if (i > offset) + break; + else + return APR_EOF; +@@ -1051,11 +1051,11 @@ static apr_status_t ap_cfg_getline_core(char *buf, apr_size_t bufsize, + break; + } + } +- else if (i >= bufsize - 2) { +- return APR_ENOSPC; +- } + buf[i] = c; + ++i; ++ if (i >= bufsize - 1) { ++ return APR_ENOSPC; ++ } + } + buf[i] = '\0'; + } +@@ -1089,7 +1089,7 @@ static int cfg_trim_line(char *buf) + AP_DECLARE(apr_status_t) ap_cfg_getline(char *buf, apr_size_t bufsize, + ap_configfile_t *cfp) + { +- apr_status_t rc = ap_cfg_getline_core(buf, bufsize, cfp); ++ apr_status_t rc = ap_cfg_getline_core(buf, bufsize, 0, cfp); + if (rc == APR_SUCCESS) + cfg_trim_line(buf); + return rc; +@@ -1116,7 +1116,7 @@ AP_DECLARE(apr_status_t) ap_varbuf_cfg_getline(struct ap_varbuf *vb, + } + + for (;;) { +- rc = ap_cfg_getline_core(vb->buf + vb->strlen, vb->avail - vb->strlen, cfp); ++ rc = ap_cfg_getline_core(vb->buf, vb->avail, vb->strlen, cfp); + if (rc == APR_ENOSPC || rc == APR_SUCCESS) + vb->strlen += strlen(vb->buf + vb->strlen); + if (rc != APR_ENOSPC) diff --git a/SOURCES/httpd-2.4.6-r1651658.patch b/SOURCES/httpd-2.4.6-r1651658.patch new file mode 100644 index 0000000..d0de5d5 --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1651658.patch @@ -0,0 +1,22 @@ +--- a/modules/proxy/mod_proxy_fcgi.c 2015/01/14 13:22:50 1651657 ++++ b/modules/proxy/mod_proxy_fcgi.c 2015/01/14 13:24:10 1651658 +@@ -204,9 +204,19 @@ + apr_status_t rv; + apr_size_t len; + int i, numenv; ++ char *proxyfilename = r->filename; ++ ++ /* Strip balancer prefix */ ++ if (r->filename && !strncmp(r->filename, "proxy:balancer://", 17)) { ++ char *newfname = apr_pstrdup(r->pool, r->filename+17); ++ newfname = ap_strchr(newfname, '/'); ++ r->filename = newfname; ++ } + + ap_add_common_vars(r); + ap_add_cgi_vars(r); ++ ++ r->filename = proxyfilename; + + /* XXX are there any FastCGI specific env vars we need to send? */ + diff --git a/SOURCES/httpd-2.4.6-r1662640.patch b/SOURCES/httpd-2.4.6-r1662640.patch new file mode 100644 index 0000000..3d1f726 --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1662640.patch @@ -0,0 +1,40 @@ +--- a/modules/ssl/ssl_engine_kernel.c 2015/02/27 06:05:11 1662639 ++++ b/modules/ssl/ssl_engine_kernel.c 2015/02/27 06:18:31 1662640 +@@ -80,7 +80,8 @@ + + if (SSL_get_state(ssl) != SSL_ST_OK) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02030) +- "TLS upgrade handshake failed: not accepted by client!?"); ++ "TLS upgrade handshake failed"); ++ ssl_log_ssl_error(SSLLOG_MARK, APLOG_ERR, r->server); + + return APR_ECONNABORTED; + } +@@ -314,6 +315,16 @@ + int depth, verify_old, verify, n; + + if (ssl) { ++ /* ++ * We should have handshaken here (on handshakeserver), ++ * otherwise we are being redirected (ErrorDocument) from ++ * a renegotiation failure below. The access is still ++ * forbidden in the latter case, let ap_die() handle ++ * this recursive (same) error. ++ */ ++ if (SSL_get_state(ssl) != SSL_ST_OK) { ++ return HTTP_FORBIDDEN; ++ } + ctx = SSL_get_SSL_CTX(ssl); + } + +@@ -828,8 +839,8 @@ + + if (SSL_get_state(ssl) != SSL_ST_OK) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02261) +- "Re-negotiation handshake failed: " +- "Not accepted by client!?"); ++ "Re-negotiation handshake failed"); ++ ssl_log_ssl_error(SSLLOG_MARK, APLOG_ERR, r->server); + + r->connection->keepalive = AP_CONN_CLOSE; + return HTTP_FORBIDDEN; diff --git a/SOURCES/httpd-2.4.6-r1663647.patch b/SOURCES/httpd-2.4.6-r1663647.patch new file mode 100644 index 0000000..4d082cf --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1663647.patch @@ -0,0 +1,22 @@ +--- a/modules/aaa/mod_authn_dbd.c 2015/03/03 11:11:33 1663646 ++++ b/modules/aaa/mod_authn_dbd.c 2015/03/03 11:12:18 1663647 +@@ -174,7 +174,8 @@ + i++; + } + #endif +- dbd_password = apr_dbd_get_entry(dbd->driver, row, 0); ++ dbd_password = apr_pstrdup(r->pool, ++ apr_dbd_get_entry(dbd->driver, row, 0)); + } + /* we can't break out here or row won't get cleaned up */ + } +@@ -269,7 +270,8 @@ + i++; + } + #endif +- dbd_hash = apr_dbd_get_entry(dbd->driver, row, 0); ++ dbd_hash = apr_pstrdup(r->pool, ++ apr_dbd_get_entry(dbd->driver, row, 0)); + } + /* we can't break out here or row won't get cleaned up */ + } diff --git a/SOURCES/httpd-2.4.6-r1664565.patch b/SOURCES/httpd-2.4.6-r1664565.patch new file mode 100644 index 0000000..65f9a99 --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1664565.patch @@ -0,0 +1,68 @@ +--- httpd/modules/mappers/mod_rewrite.c 2015/03/06 08:55:34 1664564 ++++ httpd/modules/mappers/mod_rewrite.c 2015/03/06 09:14:07 1664565 +@@ -267,6 +267,8 @@ + const char *dbdq; /* SQL SELECT statement for rewritemap */ + const char *checkfile2; /* filename to check for map existence + NULL if only one file */ ++ const char *user; /* run RewriteMap program as this user */ ++ const char *group; /* run RewriteMap program as this group */ + } rewritemap_entry; + + /* special pattern types for RewriteCond */ +@@ -1171,6 +1173,7 @@ + + static apr_status_t rewritemap_program_child(apr_pool_t *p, + const char *progname, char **argv, ++ const char *user, const char *group, + apr_file_t **fpout, + apr_file_t **fpin) + { +@@ -1183,6 +1186,8 @@ + APR_FULL_BLOCK, APR_NO_PIPE)) + && APR_SUCCESS == (rc=apr_procattr_dir_set(procattr, + ap_make_dirstr_parent(p, argv[0]))) ++ && (!user || APR_SUCCESS == (rc=apr_procattr_user_set(procattr, user, ""))) ++ && (!group || APR_SUCCESS == (rc=apr_procattr_group_set(procattr, group))) + && APR_SUCCESS == (rc=apr_procattr_cmdtype_set(procattr, APR_PROGRAM)) + && APR_SUCCESS == (rc=apr_procattr_child_errfn_set(procattr, + rewrite_child_errfn)) +@@ -1240,6 +1245,7 @@ + } + + rc = rewritemap_program_child(p, map->argv[0], map->argv, ++ map->user, map->group, + &fpout, &fpin); + if (rc != APR_SUCCESS || fpin == NULL || fpout == NULL) { + ap_log_error(APLOG_MARK, APLOG_ERR, rc, s, APLOGNO(00654) +@@ -3018,7 +3024,7 @@ + } + + static const char *cmd_rewritemap(cmd_parms *cmd, void *dconf, const char *a1, +- const char *a2) ++ const char *a2, const char *a3) + { + rewrite_server_conf *sconf; + rewritemap_entry *newmap; +@@ -3124,6 +3130,11 @@ + + newmap->type = MAPTYPE_PRG; + newmap->checkfile = newmap->argv[0]; ++ if (a3) { ++ char *tok_cntx; ++ newmap->user = apr_strtok(apr_pstrdup(cmd->pool, a3), ":", &tok_cntx); ++ newmap->group = apr_strtok(NULL, ":", &tok_cntx); ++ } + } + else if (strncasecmp(a2, "int:", 4) == 0) { + newmap->type = MAPTYPE_INT; +@@ -5205,8 +5216,8 @@ + "an input string and a to be applied regexp-pattern"), + AP_INIT_RAW_ARGS("RewriteRule", cmd_rewriterule, NULL, OR_FILEINFO, + "an URL-applied regexp-pattern and a substitution URL"), +- AP_INIT_TAKE2( "RewriteMap", cmd_rewritemap, NULL, RSRC_CONF, +- "a mapname and a filename"), ++ AP_INIT_TAKE23( "RewriteMap", cmd_rewritemap, NULL, RSRC_CONF, ++ "a mapname and a filename and options"), + { NULL } + }; + diff --git a/SOURCES/httpd-2.4.6-r1668532.patch b/SOURCES/httpd-2.4.6-r1668532.patch new file mode 100644 index 0000000..6973eeb --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1668532.patch @@ -0,0 +1,67 @@ +diff --git a/server/scoreboard.c b/server/scoreboard.c +index a2e5daf..f989b99 100644 +--- a/server/scoreboard.c ++++ b/server/scoreboard.c +@@ -138,8 +138,6 @@ AP_DECLARE(int) ap_calc_scoreboard_size(void) + scoreboard_size += sizeof(process_score) * server_limit; + scoreboard_size += sizeof(worker_score) * server_limit * thread_limit; + +- pfn_ap_logio_get_last_bytes = APR_RETRIEVE_OPTIONAL_FN(ap_logio_get_last_bytes); +- + return scoreboard_size; + } + +@@ -148,6 +146,11 @@ AP_DECLARE(void) ap_init_scoreboard(void *shared_score) + char *more_storage; + int i; + ++ pfn_ap_logio_get_last_bytes = APR_RETRIEVE_OPTIONAL_FN(ap_logio_get_last_bytes); ++ if (!shared_score) { ++ return; ++ } ++ + ap_calc_scoreboard_size(); + ap_scoreboard_image = + ap_calloc(1, sizeof(scoreboard) + server_limit * sizeof(worker_score *)); +@@ -299,8 +302,6 @@ int ap_create_scoreboard(apr_pool_t *p, ap_scoreboard_e sb_type) + apr_status_t rv; + #endif + +- pfn_ap_logio_get_last_bytes = APR_RETRIEVE_OPTIONAL_FN(ap_logio_get_last_bytes); +- + if (ap_scoreboard_image) { + ap_scoreboard_image->global->restart_time = apr_time_now(); + memset(ap_scoreboard_image->parent, 0, +@@ -309,6 +310,7 @@ int ap_create_scoreboard(apr_pool_t *p, ap_scoreboard_e sb_type) + memset(ap_scoreboard_image->servers[i], 0, + sizeof(worker_score) * thread_limit); + } ++ ap_init_scoreboard(NULL); + return OK; + } + +diff --git a/server/core.c b/server/core.c +index c125015..eaa81a6 100644 +--- a/server/core.c ++++ b/server/core.c +@@ -4843,6 +4843,11 @@ static void core_child_init(apr_pool_t *pchild, server_rec *s) + apr_random_after_fork(&proc); + } + ++static void core_optional_fn_retrieve(void) ++{ ++ ap_init_scoreboard(NULL); ++} ++ + AP_CORE_DECLARE(void) ap_random_parent_after_fork(void) + { + /* +@@ -5022,6 +5027,8 @@ static void register_hooks(apr_pool_t *p) + APR_HOOK_REALLY_LAST); + ap_hook_dirwalk_stat(core_dirwalk_stat, NULL, NULL, APR_HOOK_REALLY_LAST); + ap_hook_open_htaccess(ap_open_htaccess, NULL, NULL, APR_HOOK_REALLY_LAST); ++ ap_hook_optional_fn_retrieve(core_optional_fn_retrieve, NULL, NULL, ++ APR_HOOK_MIDDLE); + + /* register the core's insert_filter hook and register core-provided + * filters diff --git a/SOURCES/httpd-2.4.6-r1674222.patch b/SOURCES/httpd-2.4.6-r1674222.patch new file mode 100644 index 0000000..60356b0 --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1674222.patch @@ -0,0 +1,81 @@ +Index: acinclude.m4 +=================================================================== +--- a/acinclude.m4 (revision 1667671) ++++ b/acinclude.m4 (working copy) +@@ -43,6 +43,7 @@ + APACHE_SUBST(installbuilddir) + APACHE_SUBST(runtimedir) + APACHE_SUBST(proxycachedir) ++ APACHE_SUBST(davlockdb) + APACHE_SUBST(other_targets) + APACHE_SUBST(progname) + APACHE_SUBST(prefix) +@@ -710,6 +711,7 @@ + APACHE_SUBST_EXPANDED_ARG(runtimedir) + APACHE_SUBST_EXPANDED_ARG(logfiledir) + APACHE_SUBST_EXPANDED_ARG(proxycachedir) ++ APACHE_SUBST_EXPANDED_ARG(davlockdb) + ]) + + dnl +Index: build/mkconfNW.awk +=================================================================== +--- a/build/mkconfNW.awk (revision 1667671) ++++ b/build/mkconfNW.awk (working copy) +@@ -26,6 +26,7 @@ + A["runtimedir"] = "logs" + A["errordir"] = "error" + A["proxycachedir"] = "proxy" ++ A["davlockdb"] = "davlockdb" + + B["htdocsdir"] = A["ServerRoot"]"/"A["htdocsdir"] + B["iconsdir"] = A["ServerRoot"]"/"A["iconsdir"] +@@ -32,6 +33,7 @@ + B["manualdir"] = A["ServerRoot"]"/"A["manualdir"] + B["errordir"] = A["ServerRoot"]"/"A["errordir"] + B["proxycachedir"] = A["ServerRoot"]"/"A["proxycachedir"] ++ B["davlockdb"] = A["ServerRoot"]"/"A["davlockdb"] + B["cgidir"] = A["ServerRoot"]"/"A["cgidir"] + B["logfiledir"] = A["logfiledir"] + B["sysconfdir"] = A["sysconfdir"] +Index: include/ap_config_layout.h.in +=================================================================== +--- a/include/ap_config_layout.h.in (revision 1667671) ++++ b/include/ap_config_layout.h.in (working copy) +@@ -60,5 +60,7 @@ + #define DEFAULT_REL_LOGFILEDIR "@rel_logfiledir@" + #define DEFAULT_EXP_PROXYCACHEDIR "@exp_proxycachedir@" + #define DEFAULT_REL_PROXYCACHEDIR "@rel_proxycachedir@" ++#define DEFAULT_EXP_DAVLOCKDB "@exp_davlockdb@" ++#define DEFAULT_REL_DAVLOCKDB "@rel_davlockdb@" + + #endif /* AP_CONFIG_LAYOUT_H */ +Index: modules/dav/fs/mod_dav_fs.c +=================================================================== +--- a/modules/dav/fs/mod_dav_fs.c (revision 1667671) ++++ b/modules/dav/fs/mod_dav_fs.c (working copy) +@@ -17,6 +17,7 @@ + #include "httpd.h" + #include "http_config.h" + #include "apr_strings.h" ++#include "ap_config_auto.h" + + #include "mod_dav.h" + #include "repos.h" +@@ -39,7 +40,15 @@ + + static void *dav_fs_create_server_config(apr_pool_t *p, server_rec *s) + { +- return apr_pcalloc(p, sizeof(dav_fs_server_conf)); ++ dav_fs_server_conf *conf = apr_pcalloc(p, sizeof(dav_fs_server_conf)); ++#ifdef DEFAULT_EXP_DAVLOCKDB ++ conf->lockdb_path = DEFAULT_EXP_DAVLOCKDB; ++ if (*conf->lockdb_path == '\0') { ++ conf->lockdb_path = NULL; ++ } ++#endif ++ ++ return conf; + } + + static void *dav_fs_merge_server_config(apr_pool_t *p, diff --git a/SOURCES/httpd-2.4.6-r1681107.patch b/SOURCES/httpd-2.4.6-r1681107.patch new file mode 100644 index 0000000..9a02995 --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1681107.patch @@ -0,0 +1,84 @@ +Index: modules/aaa/mod_authz_dbd.c +=================================================================== +--- a/modules/aaa/mod_authz_dbd.c (revision 1681106) ++++ b/modules/aaa/mod_authz_dbd.c (revision 1681107) +@@ -174,7 +174,9 @@ + action, r->user, message?message:noerror); + } + else if (newuri == NULL) { +- newuri = apr_dbd_get_entry(dbd->driver, row, 0); ++ newuri = ++ apr_pstrdup(r->pool, ++ apr_dbd_get_entry(dbd->driver, row, 0)); + } + /* we can't break out here or row won't get cleaned up */ + } +@@ -204,7 +206,6 @@ + apr_dbd_prepared_t *query; + apr_dbd_results_t *res = NULL; + apr_dbd_row_t *row = NULL; +- const char **group; + + if (cfg->query == NULL) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01649) +@@ -224,8 +225,9 @@ + rv != -1; + rv = apr_dbd_get_row(dbd->driver, r->pool, res, &row, -1)) { + if (rv == 0) { +- group = apr_array_push(groups); +- *group = apr_dbd_get_entry(dbd->driver, row, 0); ++ APR_ARRAY_PUSH(groups, const char *) = ++ apr_pstrdup(r->pool, ++ apr_dbd_get_entry(dbd->driver, row, 0)); + } + else { + message = apr_dbd_error(dbd->driver, dbd->handle, rv); +Index: modules/session/mod_session_dbd.c +=================================================================== +--- a/modules/session/mod_session_dbd.c (revision 1681106) ++++ b/modules/session/mod_session_dbd.c (revision 1681107) +@@ -138,7 +138,8 @@ + return APR_EGENERAL; + } + if (*val == NULL) { +- *val = apr_dbd_get_entry(dbd->driver, row, 0); ++ *val = apr_pstrdup(r->pool, ++ apr_dbd_get_entry(dbd->driver, row, 0)); + } + /* we can't break out here or row won't get cleaned up */ + } +Index: modules/mappers/mod_rewrite.c +=================================================================== +--- a/modules/mappers/mod_rewrite.c (revision 1681106) ++++ b/modules/mappers/mod_rewrite.c (revision 1681107) +@@ -1352,12 +1352,14 @@ + while ((rv = apr_dbd_get_row(db->driver, r->pool, res, &row, -1)) == 0) { + ++n; + if (ret == NULL) { +- ret = apr_dbd_get_entry(db->driver, row, 0); ++ ret = apr_pstrdup(r->pool, ++ apr_dbd_get_entry(db->driver, row, 0)); + } + else { + /* randomise crudely amongst multiple results */ + if ((double)rand() < (double)RAND_MAX/(double)n) { +- ret = apr_dbd_get_entry(db->driver, row, 0); ++ ret = apr_pstrdup(r->pool, ++ apr_dbd_get_entry(db->driver, row, 0)); + } + } + } +@@ -1370,11 +1372,11 @@ + case 0: + return NULL; + case 1: +- return apr_pstrdup(r->pool, ret); ++ return ret; + default: + /* what's a fair rewritelog level for this? */ + rewritelog((r, 3, NULL, "Multiple values found for %s", key)); +- return apr_pstrdup(r->pool, ret); ++ return ret; + } + } + diff --git a/SOURCES/httpd-2.4.6-r1681114.patch b/SOURCES/httpd-2.4.6-r1681114.patch new file mode 100644 index 0000000..d112972 --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1681114.patch @@ -0,0 +1,157 @@ +diff --git a/modules/http/http_request.c b/modules/http/http_request.c +index cdfec8b..c97dc77 100644 +--- a/modules/http/http_request.c ++++ b/modules/http/http_request.c +@@ -73,19 +73,22 @@ static void update_r_in_filters(ap_filter_t *f, + } + } + +-AP_DECLARE(void) ap_die(int type, request_rec *r) ++static void ap_die_r(int type, request_rec *r, int recursive_error) + { +- int error_index = ap_index_of_response(type); +- char *custom_response = ap_response_code_string(r, error_index); +- int recursive_error = 0; ++ char *custom_response; + request_rec *r_1st_err = r; + +- if (type == AP_FILTER_ERROR) { ++ if (type == OK || type == DONE){ ++ ap_finalize_request_protocol(r); ++ return; ++ } ++ ++ if (!ap_is_HTTP_VALID_RESPONSE(type)) { + ap_filter_t *next; + + /* + * Check if we still have the ap_http_header_filter in place. If +- * this is the case we should not ignore AP_FILTER_ERROR here because ++ * this is the case we should not ignore the error here because + * it means that we have not sent any response at all and never + * will. This is bad. Sent an internal server error instead. + */ +@@ -99,8 +102,14 @@ AP_DECLARE(void) ap_die(int type, request_rec *r) + * next->frec == ap_http_header_filter + */ + if (next) { +- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01579) +- "Custom error page caused AP_FILTER_ERROR"); ++ if (type != AP_FILTER_ERROR) { ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01579) ++ "Invalid response status %i", type); ++ } ++ else { ++ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02831) ++ "Response from AP_FILTER_ERROR"); ++ } + type = HTTP_INTERNAL_SERVER_ERROR; + } + else { +@@ -108,20 +117,13 @@ AP_DECLARE(void) ap_die(int type, request_rec *r) + } + } + +- if (type == DONE) { +- ap_finalize_request_protocol(r); +- return; +- } +- + /* + * The following takes care of Apache redirects to custom response URLs + * Note that if we are already dealing with the response to some other + * error condition, we just report on the original error, and give up on + * any attempt to handle the other thing "intelligently"... + */ +- if (r->status != HTTP_OK) { +- recursive_error = type; +- ++ if (recursive_error != HTTP_OK) { + while (r_1st_err->prev && (r_1st_err->prev->status != HTTP_OK)) + r_1st_err = r_1st_err->prev; /* Get back to original error */ + +@@ -140,6 +142,10 @@ AP_DECLARE(void) ap_die(int type, request_rec *r) + } + + custom_response = NULL; /* Do NOT retry the custom thing! */ ++ } else { ++ int error_index = ap_index_of_response(type); ++ custom_response = ap_response_code_string(r, error_index); ++ recursive_error = 0; + } + + r->status = type; +@@ -216,6 +222,11 @@ AP_DECLARE(void) ap_die(int type, request_rec *r) + ap_send_error_response(r_1st_err, recursive_error); + } + ++AP_DECLARE(void) ap_die(int type, request_rec *r) ++{ ++ ap_die_r(type, r, r->status); ++} ++ + static void check_pipeline(conn_rec *c) + { + if (c->keepalive != AP_CONN_CLOSE) { +@@ -337,18 +348,7 @@ void ap_process_async_request(request_rec *r) + apr_thread_mutex_unlock(r->invoke_mtx); + #endif + +- if (access_status == DONE) { +- /* e.g., something not in storage like TRACE */ +- access_status = OK; +- } +- +- if (access_status == OK) { +- ap_finalize_request_protocol(r); +- } +- else { +- r->status = HTTP_OK; +- ap_die(access_status, r); +- } ++ ap_die_r(access_status, r, HTTP_OK); + + ap_process_request_after_handler(r); + } +@@ -631,8 +631,8 @@ AP_DECLARE(void) ap_internal_fast_redirect(request_rec *rr, request_rec *r) + + AP_DECLARE(void) ap_internal_redirect(const char *new_uri, request_rec *r) + { +- request_rec *new = internal_internal_redirect(new_uri, r); + int access_status; ++ request_rec *new = internal_internal_redirect(new_uri, r); + + AP_INTERNAL_REDIRECT(r->uri, new_uri); + +@@ -648,12 +648,7 @@ AP_DECLARE(void) ap_internal_redirect(const char *new_uri, request_rec *r) + access_status = ap_invoke_handler(new); + } + } +- if (access_status == OK) { +- ap_finalize_request_protocol(new); +- } +- else { +- ap_die(access_status, new); +- } ++ ap_die(access_status, new); + } + + /* This function is designed for things like actions or CGI scripts, when +@@ -674,15 +669,9 @@ AP_DECLARE(void) ap_internal_redirect_handler(const char *new_uri, request_rec * + ap_set_content_type(new, r->content_type); + access_status = ap_process_request_internal(new); + if (access_status == OK) { +- if ((access_status = ap_invoke_handler(new)) != 0) { +- ap_die(access_status, new); +- return; +- } +- ap_finalize_request_protocol(new); +- } +- else { +- ap_die(access_status, new); ++ access_status = ap_invoke_handler(new); + } ++ ap_die(access_status, new); + } + + AP_DECLARE(void) ap_allow_methods(request_rec *r, int reset, ...) diff --git a/SOURCES/httpd-2.4.6-r1681289.patch b/SOURCES/httpd-2.4.6-r1681289.patch new file mode 100644 index 0000000..f4f63ac --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1681289.patch @@ -0,0 +1,19 @@ +diff --git a/modules/proxy/proxy_util.c b/modules/proxy/proxy_util.c +index a78224b..e672e4a 100644 +--- a/modules/proxy/proxy_util.c ++++ b/modules/proxy/proxy_util.c +@@ -2260,8 +2260,12 @@ ap_proxy_determine_connection(apr_pool_t *p, request_rec *r, + * The scheme handler decides if this is permanent or + * short living pool. + */ +- /* are we connecting directly, or via a proxy? */ +- if (!proxyname) { ++ /* Unless we are connecting the backend via a (forward Proxy)Remote, we ++ * have to use the original form of the URI (non absolute), but this is ++ * also the case via a remote proxy using the CONNECT method since the ++ * original request (and URI) is to be embedded in the body. ++ */ ++ if (!proxyname || conn->is_ssl) { + *url = apr_pstrcat(p, uri->path, uri->query ? "?" : "", + uri->query ? uri->query : "", + uri->fragment ? "#" : "", diff --git a/SOURCES/httpd-2.4.6-r1683112.patch b/SOURCES/httpd-2.4.6-r1683112.patch new file mode 100644 index 0000000..bb412c9 --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1683112.patch @@ -0,0 +1,45 @@ +diff --git a/modules/proxy/mod_proxy.c b/modules/proxy/mod_proxy.c +index 7f96aff..5517e08 100644 +--- a/modules/proxy/mod_proxy.c ++++ b/modules/proxy/mod_proxy.c +@@ -1118,7 +1118,7 @@ static int proxy_handler(request_rec *r) + AP_PROXY_RUN(r, worker, conf, url, attempts); + access_status = proxy_run_scheme_handler(r, worker, conf, + url, NULL, 0); +- if (access_status == OK) ++ if (access_status == OK || apr_table_get(r->notes, "proxy-error-override")) + break; + else if (access_status == HTTP_INTERNAL_SERVER_ERROR) { + /* Unrecoverable server error. +diff --git a/modules/proxy/mod_proxy_ajp.c b/modules/proxy/mod_proxy_ajp.c +index cf52a7d..380b870 100644 +--- a/modules/proxy/mod_proxy_ajp.c ++++ b/modules/proxy/mod_proxy_ajp.c +@@ -636,6 +636,11 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r, + */ + rv = r->status; + r->status = HTTP_OK; ++ /* ++ * prevent proxy_handler() from treating this as an ++ * internal error. ++ */ ++ apr_table_setn(r->notes, "proxy-error-override", "1"); + } + else { + rv = OK; +diff --git a/modules/proxy/mod_proxy_http.c b/modules/proxy/mod_proxy_http.c +index 89b5d15..bb5cdf9 100644 +--- a/modules/proxy/mod_proxy_http.c ++++ b/modules/proxy/mod_proxy_http.c +@@ -1648,6 +1648,11 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, + } + ap_discard_request_body(backend->r); + } ++ /* ++ * prevent proxy_handler() from treating this as an ++ * internal error. ++ */ ++ apr_table_setn(r->notes, "proxy-error-override", "1"); + return proxy_status; + } + diff --git a/SOURCES/httpd-2.4.6-r1684462.patch b/SOURCES/httpd-2.4.6-r1684462.patch new file mode 100644 index 0000000..ff5fe4b --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1684462.patch @@ -0,0 +1,34 @@ +--- a/modules/ssl/ssl_engine_kernel.c 2015/06/09 15:55:41 1684461 ++++ b/modules/ssl/ssl_engine_kernel.c 2015/06/09 16:09:28 1684462 +@@ -1940,7 +1940,7 @@ + #ifndef OPENSSL_NO_TLSEXT + /* + * This callback function is executed when OpenSSL encounters an extended +- * client hello with a server name indication extension ("SNI", cf. RFC 4366). ++ * client hello with a server name indication extension ("SNI", cf. RFC 6066). + */ + int ssl_callback_ServerNameIndication(SSL *ssl, int *al, modssl_ctx_t *mctx) + { +@@ -1962,7 +1962,21 @@ + "No matching SSL virtual host for servername " + "%s found (using default/first virtual host)", + servername); +- return SSL_TLSEXT_ERR_ALERT_WARNING; ++ /* ++ * RFC 6066 section 3 says "It is NOT RECOMMENDED to send ++ * a warning-level unrecognized_name(112) alert, because ++ * the client's behavior in response to warning-level alerts ++ * is unpredictable." ++ * ++ * To maintain backwards compatibility in mod_ssl, we ++ * no longer send any alert (neither warning- nor fatal-level), ++ * i.e. we take the second action suggested in RFC 6066: ++ * "If the server understood the ClientHello extension but ++ * does not recognize the server name, the server SHOULD take ++ * one of two actions: either abort the handshake by sending ++ * a fatal-level unrecognized_name(112) alert or continue ++ * the handshake." ++ */ + } + } + } diff --git a/SOURCES/httpd-2.4.6-r1688399.patch b/SOURCES/httpd-2.4.6-r1688399.patch new file mode 100644 index 0000000..91f94ee --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1688399.patch @@ -0,0 +1,11 @@ +--- a/modules/metadata/mod_remoteip.c 2015/06/30 08:36:49 1688398 ++++ b/modules/metadata/mod_remoteip.c 2015/06/30 08:40:17 1688399 +@@ -255,7 +255,7 @@ + } + remote = apr_pstrdup(r->pool, remote); + +- temp_sa = c->client_addr; ++ temp_sa = r->useragent_addr ? r->useragent_addr : c->client_addr; + + while (remote) { + diff --git a/SOURCES/httpd-2.4.6-r1705528.patch b/SOURCES/httpd-2.4.6-r1705528.patch new file mode 100644 index 0000000..0be0309 --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1705528.patch @@ -0,0 +1,24 @@ +Index: modules/cache/cache_util.c +=================================================================== +--- a/modules/cache/cache_util.c (revision 1705527) ++++ b/modules/cache/cache_util.c (revision 1705528) +@@ -1254,7 +1254,6 @@ + headers_out = apr_table_overlay(r->pool, r->headers_out, + r->err_headers_out); + +- apr_table_clear(r->err_headers_out); + + headers_out = ap_cache_cacheable_headers(r->pool, headers_out, + r->server); +Index: modules/cache/mod_cache.c +=================================================================== +--- a/modules/cache/mod_cache.c (revision 1705527) ++++ b/modules/cache/mod_cache.c (revision 1705528) +@@ -1452,6 +1452,7 @@ + * headers that might have snuck in. + */ + r->headers_out = ap_cache_cacheable_headers_out(r); ++ apr_table_clear(r->err_headers_out); + + /* Merge in our cached headers. However, keep any updated values. */ + /* take output, overlay on top of cached */ diff --git a/SOURCES/httpd-2.4.6-r1723522.patch b/SOURCES/httpd-2.4.6-r1723522.patch new file mode 100644 index 0000000..83db1c8 --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1723522.patch @@ -0,0 +1,13 @@ +diff --git a/docs/manual/mod/mod_log_debug.html.en b/docs/manual/mod/mod_log_debug.html.en +index d3d4bd7..1ffd37f 100644 +--- a/docs/manual/mod/mod_log_debug.html.en ++++ b/docs/manual/mod/mod_log_debug.html.en +@@ -61,7 +61,7 @@ + Log message if request to /foo/* is processed in a sub-request: +
    + <Location /foo/>
    +-  LogMessage "subrequest to /foo/" hook=type_checker expr=%{IS_SUBREQ}
    ++  LogMessage "subrequest to /foo/" hook=type_checker "expr=-T %{IS_SUBREQ}"
    + </Location>
    +         
    + diff --git a/SOURCES/httpd-2.4.6-r1726019.patch b/SOURCES/httpd-2.4.6-r1726019.patch new file mode 100644 index 0000000..4408d8c --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1726019.patch @@ -0,0 +1,20 @@ +diff --git a/modules/proxy/mod_proxy_fcgi.c b/modules/proxy/mod_proxy_fcgi.c +index 19fed62..7889b0e 100644 +--- a/modules/proxy/mod_proxy_fcgi.c ++++ b/modules/proxy/mod_proxy_fcgi.c +@@ -927,6 +927,15 @@ static int fcgi_do_request(apr_pool_t *p, request_rec *r, + /* Step 3: Read records from the back end server and handle them. */ + rv = dispatch(conn, conf, r, request_id); + if (rv != APR_SUCCESS) { ++ /* If the client aborted the connection during retrieval or (partially) ++ * sending the response, dont't return a HTTP_SERVICE_UNAVAILABLE, since ++ * this is not a backend problem. */ ++ if (r->connection->aborted) { ++ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, r, ++ "The client aborted the connection."); ++ conn->close = 1; ++ return OK; ++ } + ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01075) + "Error dispatching request to %s:", server_portstr); + conn->close = 1; diff --git a/SOURCES/httpd-2.4.6-r1737363.patch b/SOURCES/httpd-2.4.6-r1737363.patch new file mode 100644 index 0000000..90d3101 --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1737363.patch @@ -0,0 +1,26 @@ +# ./pullrev.sh 1737363 +http://svn.apache.org/viewvc?view=revision&revision=1737363 + +--- httpd-2.4.6/modules/proxy/mod_proxy_express.c ++++ httpd-2.4.6/modules/proxy/mod_proxy_express.c +@@ -145,16 +145,14 @@ + key.dsize = strlen(key.dptr); + + rv = apr_dbm_fetch(db, key, &val); ++ if (rv == APR_SUCCESS) { ++ backend = apr_pstrmemdup(r->pool, val.dptr, val.dsize); ++ } + apr_dbm_close(db); +- if (rv != APR_SUCCESS) { ++ if (rv != APR_SUCCESS || !backend) { + return DECLINED; + } + +- backend = apr_pstrmemdup(r->pool, val.dptr, val.dsize); +- if (!backend) { +- return DECLINED; +- } +- + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01004) + "proxy_express: found %s -> %s", name, backend); + r->filename = apr_pstrcat(r->pool, "proxy:", backend, r->uri, NULL); diff --git a/SOURCES/httpd-2.4.6-r1738878.patch b/SOURCES/httpd-2.4.6-r1738878.patch new file mode 100644 index 0000000..0aab1c4 --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1738878.patch @@ -0,0 +1,139 @@ +diff --git a/modules/proxy/ajp.h b/modules/proxy/ajp.h +index c65ebe5..330573b 100644 +--- a/modules/proxy/ajp.h ++++ b/modules/proxy/ajp.h +@@ -413,11 +413,13 @@ apr_status_t ajp_ilink_receive(apr_socket_t *sock, ajp_msg_t *msg); + * @param r current request + * @param buffsize max size of the AJP packet. + * @param uri requested uri ++ * @param secret authentication secret + * @return APR_SUCCESS or error + */ + apr_status_t ajp_send_header(apr_socket_t *sock, request_rec *r, + apr_size_t buffsize, +- apr_uri_t *uri); ++ apr_uri_t *uri, ++ const char *secret); + + /** + * Read the ajp message and return the type of the message. +diff --git a/modules/proxy/ajp_header.c b/modules/proxy/ajp_header.c +index 074f0a8..53571ee 100644 +--- a/modules/proxy/ajp_header.c ++++ b/modules/proxy/ajp_header.c +@@ -213,7 +213,8 @@ AJPV13_REQUEST/AJPV14_REQUEST= + + static apr_status_t ajp_marshal_into_msgb(ajp_msg_t *msg, + request_rec *r, +- apr_uri_t *uri) ++ apr_uri_t *uri, ++ const char *secret) + { + int method; + apr_uint32_t i, num_headers = 0; +@@ -293,17 +294,15 @@ static apr_status_t ajp_marshal_into_msgb(ajp_msg_t *msg, + i, elts[i].key, elts[i].val); + } + +-/* XXXX need to figure out how to do this +- if (s->secret) { ++ if (secret) { + if (ajp_msg_append_uint8(msg, SC_A_SECRET) || +- ajp_msg_append_string(msg, s->secret)) { ++ ajp_msg_append_string(msg, secret)) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, +- "Error ajp_marshal_into_msgb - " ++ "ajp_marshal_into_msgb: " + "Error appending secret"); + return APR_EGENERAL; + } + } +- */ + + if (r->user) { + if (ajp_msg_append_uint8(msg, SC_A_REMOTE_USER) || +@@ -628,7 +627,8 @@ static apr_status_t ajp_unmarshal_response(ajp_msg_t *msg, + apr_status_t ajp_send_header(apr_socket_t *sock, + request_rec *r, + apr_size_t buffsize, +- apr_uri_t *uri) ++ apr_uri_t *uri, ++ const char *secret) + { + ajp_msg_t *msg; + apr_status_t rc; +@@ -640,7 +640,7 @@ apr_status_t ajp_send_header(apr_socket_t *sock, + return rc; + } + +- rc = ajp_marshal_into_msgb(msg, r, uri); ++ rc = ajp_marshal_into_msgb(msg, r, uri, secret); + if (rc != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00988) + "ajp_send_header: ajp_marshal_into_msgb failed"); +diff --git a/modules/proxy/mod_proxy.c b/modules/proxy/mod_proxy.c +index 5517e08..e998f58 100644 +--- a/modules/proxy/mod_proxy.c ++++ b/modules/proxy/mod_proxy.c +@@ -260,6 +260,12 @@ static const char *set_worker_param(apr_pool_t *p, + return "flusher name length must be < 16 characters"; + PROXY_STRNCPY(worker->s->flusher, val); + } ++ else if (!strcasecmp(key, "secret")) { ++ if (PROXY_STRNCPY(worker->s->secret, val) != APR_SUCCESS) { ++ return apr_psprintf(p, "Secret length must be < %d characters", ++ (int)sizeof(worker->s->secret)); ++ } ++ } + else { + return "unknown Worker parameter"; + } +diff --git a/modules/proxy/mod_proxy.h b/modules/proxy/mod_proxy.h +index b702028..06f2b17 100644 +--- a/modules/proxy/mod_proxy.h ++++ b/modules/proxy/mod_proxy.h +@@ -317,6 +317,7 @@ PROXY_WORKER_DISABLED | PROXY_WORKER_STOPPED | PROXY_WORKER_IN_ERROR ) + #define PROXY_WORKER_MAX_HOSTNAME_SIZE 64 + #define PROXY_BALANCER_MAX_HOSTNAME_SIZE PROXY_WORKER_MAX_HOSTNAME_SIZE + #define PROXY_BALANCER_MAX_STICKY_SIZE 64 ++#define PROXY_WORKER_MAX_SECRET_SIZE 64 + + #define PROXY_MAX_PROVIDER_NAME_SIZE 16 + +@@ -394,6 +395,7 @@ typedef struct { + unsigned int disablereuse_set:1; + unsigned int was_malloced:1; + unsigned int is_name_matchable:1; ++ char secret[PROXY_WORKER_MAX_SECRET_SIZE]; /* authentication secret (e.g. AJP13) */ + } proxy_worker_shared; + + #define ALIGNED_PROXY_WORKER_SHARED_SIZE (APR_ALIGN_DEFAULT(sizeof(proxy_worker_shared))) +diff --git a/modules/proxy/mod_proxy_ajp.c b/modules/proxy/mod_proxy_ajp.c +index 380b870..81039bf 100644 +--- a/modules/proxy/mod_proxy_ajp.c ++++ b/modules/proxy/mod_proxy_ajp.c +@@ -196,6 +196,7 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r, + apr_off_t content_length = 0; + int original_status = r->status; + const char *original_status_line = r->status_line; ++ const char *secret = NULL; + + if (psf->io_buffer_size_set) + maxsize = psf->io_buffer_size; +@@ -205,12 +206,15 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r, + maxsize = AJP_MSG_BUFFER_SZ; + maxsize = APR_ALIGN(maxsize, 1024); + ++ if (*conn->worker->s->secret) ++ secret = conn->worker->s->secret; ++ + /* + * Send the AJP request to the remote server + */ + + /* send request headers */ +- status = ajp_send_header(conn->sock, r, maxsize, uri); ++ status = ajp_send_header(conn->sock, r, maxsize, uri, secret); + if (status != APR_SUCCESS) { + conn->close = 1; + ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(00868) diff --git a/SOURCES/httpd-2.4.6-r1748212.patch b/SOURCES/httpd-2.4.6-r1748212.patch new file mode 100644 index 0000000..69ff27f --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1748212.patch @@ -0,0 +1,70 @@ +# ./pullrev.sh 1748212 +http://svn.apache.org/viewvc?view=revision&revision=1748212 + +https://bugzilla.redhat.com/show_bug.cgi?id=1343582 + +diff -uap httpd-2.4.6/include/httpd.h.r1748212 httpd-2.4.6/include/httpd.h +--- httpd-2.4.6/include/httpd.h.r1748212 ++++ httpd-2.4.6/include/httpd.h +@@ -477,7 +477,7 @@ AP_DECLARE(const char *) ap_get_server_b + * When adding a new code here add it to status_lines as well. + * A future version should dynamically generate the apr_table_t at startup. + */ +-#define RESPONSE_CODES 83 ++#define RESPONSE_CODES 103 + + #define HTTP_CONTINUE 100 + #define HTTP_SWITCHING_PROTOCOLS 101 +@@ -525,6 +525,7 @@ AP_DECLARE(const char *) ap_get_server_b + #define HTTP_PRECONDITION_REQUIRED 428 + #define HTTP_TOO_MANY_REQUESTS 429 + #define HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE 431 ++#define HTTP_UNAVAILABLE_FOR_LEGAL_REASONS 451 + #define HTTP_INTERNAL_SERVER_ERROR 500 + #define HTTP_NOT_IMPLEMENTED 501 + #define HTTP_BAD_GATEWAY 502 +diff -uap httpd-2.4.6/modules/http/http_protocol.c.r1748212 httpd-2.4.6/modules/http/http_protocol.c +--- httpd-2.4.6/modules/http/http_protocol.c.r1748212 ++++ httpd-2.4.6/modules/http/http_protocol.c +@@ -146,7 +146,27 @@ static const char * const status_lines[R + "429 Too Many Requests", + NULL, /* 430 */ + "431 Request Header Fields Too Large", +-#define LEVEL_500 71 ++ NULL, /* 432 */ ++ NULL, /* 433 */ ++ NULL, /* 434 */ ++ NULL, /* 435 */ ++ NULL, /* 436 */ ++ NULL, /* 437 */ ++ NULL, /* 438 */ ++ NULL, /* 439 */ ++ NULL, /* 440 */ ++ NULL, /* 441 */ ++ NULL, /* 442 */ ++ NULL, /* 443 */ ++ NULL, /* 444 */ ++ NULL, /* 445 */ ++ NULL, /* 446 */ ++ NULL, /* 447 */ ++ NULL, /* 448 */ ++ NULL, /* 449 */ ++ NULL, /* 450 */ ++ "451 Unavailable For Legal Reasons", ++#define LEVEL_500 91 + "500 Internal Server Error", + "501 Not Implemented", + "502 Bad Gateway", +@@ -1295,6 +1315,12 @@ static const char *get_canned_error_stri + case HTTP_NETWORK_AUTHENTICATION_REQUIRED: + return("

    The client needs to authenticate to gain\n" + "network access.

    \n"); ++ case HTTP_UNAVAILABLE_FOR_LEGAL_REASONS: ++ s1 = apr_pstrcat(p, ++ "

    Access to ", ap_escape_html(r->pool, r->uri), ++ "\nhas been denied for legal reasons.
    \n", ++ NULL); ++ return(add_optional_notes(r, s1, "error-notes", "

    \n")); + default: /* HTTP_INTERNAL_SERVER_ERROR */ + /* + * This comparison to expose error-notes could be modified to diff --git a/SOURCES/httpd-2.4.6-r1775832.patch b/SOURCES/httpd-2.4.6-r1775832.patch new file mode 100644 index 0000000..97c5f5f --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1775832.patch @@ -0,0 +1,16 @@ +--- a/modules/filters/mod_ext_filter.c 2016/12/23 12:35:43 1775831 ++++ b/modules/filters/mod_ext_filter.c 2016/12/23 12:36:26 1775832 +@@ -757,6 +757,13 @@ + break; + } + ++ if (AP_BUCKET_IS_ERROR(b)) { ++ apr_bucket *cpy; ++ apr_bucket_copy(b, &cpy); ++ APR_BRIGADE_INSERT_TAIL(bb_tmp, cpy); ++ break; ++ } ++ + rv = apr_bucket_read(b, &data, &len, APR_BLOCK_READ); + if (rv != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01463) "apr_bucket_read()"); diff --git a/SOURCES/httpd-2.4.6-r1805099.patch b/SOURCES/httpd-2.4.6-r1805099.patch new file mode 100644 index 0000000..4d7f419 --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1805099.patch @@ -0,0 +1,46 @@ +diff --git a/modules/proxy/mod_proxy.c b/modules/proxy/mod_proxy.c +index b10be15..506a046 100644 +--- a/modules/proxy/mod_proxy.c ++++ b/modules/proxy/mod_proxy.c +@@ -1320,6 +1320,7 @@ static void *create_proxy_dir_config(apr_pool_t *p, char *dummy) + new->error_override = 0; + new->error_override_set = 0; + new->add_forwarded_headers = 1; ++ new->add_forwarded_headers_set = 0; + + return (void *) new; + } +@@ -1350,7 +1351,12 @@ static void *merge_proxy_dir_config(apr_pool_t *p, void *basev, void *addv) + new->error_override_set = add->error_override_set || base->error_override_set; + new->alias = (add->alias_set == 0) ? base->alias : add->alias; + new->alias_set = add->alias_set || base->alias_set; +- new->add_forwarded_headers = add->add_forwarded_headers; ++ new->add_forwarded_headers = ++ (add->add_forwarded_headers_set == 0) ? base->add_forwarded_headers ++ : add->add_forwarded_headers; ++ new->add_forwarded_headers_set = add->add_forwarded_headers_set ++ || base->add_forwarded_headers_set; ++ + return new; + } + +@@ -1837,6 +1843,7 @@ static const char * + { + proxy_dir_conf *conf = dconf; + conf->add_forwarded_headers = flag; ++ conf->add_forwarded_headers_set = 1; + return NULL; + } + static const char * +diff --git a/modules/proxy/mod_proxy.h b/modules/proxy/mod_proxy.h +index 06f2b17..8c76d4c 100644 +--- a/modules/proxy/mod_proxy.h ++++ b/modules/proxy/mod_proxy.h +@@ -219,6 +219,7 @@ typedef struct { + unsigned int error_override_set:1; + unsigned int alias_set:1; + unsigned int add_forwarded_headers:1; ++ unsigned int add_forwarded_headers_set:1; + } proxy_dir_conf; + + /* if we interpolate env vars per-request, we'll need a per-request diff --git a/SOURCES/httpd-2.4.6-r1811746.patch b/SOURCES/httpd-2.4.6-r1811746.patch new file mode 100644 index 0000000..6dc47bf --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1811746.patch @@ -0,0 +1,128 @@ +--- a/server/protocol.c 2017/10/10 17:47:25 1811745 ++++ b/server/protocol.c 2017/10/10 17:51:13 1811746 +@@ -1674,62 +1674,88 @@ + ctx->tmpbb = apr_brigade_create(r->pool, r->connection->bucket_alloc); + } + +- /* Loop through this set of buckets to compute their length +- */ ++ /* Loop through the brigade to count the length. To avoid ++ * arbitrary memory consumption with morphing bucket types, this ++ * loop will stop and pass on the brigade when necessary. */ + e = APR_BRIGADE_FIRST(b); + while (e != APR_BRIGADE_SENTINEL(b)) { ++ apr_status_t rv; ++ + if (APR_BUCKET_IS_EOS(e)) { + eos = 1; + break; + } +- if (e->length == (apr_size_t)-1) { ++ /* For a flush bucket, fall through to pass the brigade and ++ * flush now. */ ++ else if (APR_BUCKET_IS_FLUSH(e)) { ++ e = APR_BUCKET_NEXT(e); ++ } ++ /* For metadata bucket types other than FLUSH, loop. */ ++ else if (APR_BUCKET_IS_METADATA(e)) { ++ e = APR_BUCKET_NEXT(e); ++ continue; ++ } ++ /* For determinate length data buckets, count the length and ++ * continue. */ ++ else if (e->length != (apr_size_t)-1) { ++ r->bytes_sent += e->length; ++ e = APR_BUCKET_NEXT(e); ++ continue; ++ } ++ /* For indeterminate length data buckets, perform one read. */ ++ else /* e->length == (apr_size_t)-1 */ { + apr_size_t len; + const char *ignored; +- apr_status_t rv; +- +- /* This is probably a pipe bucket. Send everything +- * prior to this, and then read the data for this bucket. +- */ ++ + rv = apr_bucket_read(e, &ignored, &len, eblock); ++ if ((rv != APR_SUCCESS) && !APR_STATUS_IS_EAGAIN(rv)) { ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(00574) ++ "ap_content_length_filter: " ++ "apr_bucket_read() failed"); ++ return rv; ++ } + if (rv == APR_SUCCESS) { +- /* Attempt a nonblocking read next time through */ + eblock = APR_NONBLOCK_READ; ++ e = APR_BUCKET_NEXT(e); + r->bytes_sent += len; + } + else if (APR_STATUS_IS_EAGAIN(rv)) { +- /* Output everything prior to this bucket, and then +- * do a blocking read on the next batch. +- */ +- if (e != APR_BRIGADE_FIRST(b)) { +- apr_bucket *flush; +- apr_brigade_split_ex(b, e, ctx->tmpbb); +- flush = apr_bucket_flush_create(r->connection->bucket_alloc); +- +- APR_BRIGADE_INSERT_TAIL(b, flush); +- rv = ap_pass_brigade(f->next, b); +- if (rv != APR_SUCCESS || f->c->aborted) { +- return rv; +- } +- apr_brigade_cleanup(b); +- APR_BRIGADE_CONCAT(b, ctx->tmpbb); +- e = APR_BRIGADE_FIRST(b); ++ apr_bucket *flush; + +- ctx->data_sent = 1; +- } ++ /* Next read must block. */ + eblock = APR_BLOCK_READ; +- continue; +- } +- else { +- ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(00574) +- "ap_content_length_filter: " +- "apr_bucket_read() failed"); +- return rv; ++ ++ /* Ensure the last bucket to pass down is a flush if ++ * the next read will block. */ ++ flush = apr_bucket_flush_create(f->c->bucket_alloc); ++ APR_BUCKET_INSERT_BEFORE(e, flush); + } + } +- else { +- r->bytes_sent += e->length; ++ ++ /* Optimization: if the next bucket is EOS (directly after a ++ * bucket morphed to the heap, or a flush), short-cut to ++ * handle EOS straight away - allowing C-L to be determined ++ * for content which is already entirely in memory. */ ++ if (e != APR_BRIGADE_SENTINEL(b) && APR_BUCKET_IS_EOS(e)) { ++ continue; ++ } ++ ++ /* On reaching here, pass on everything in the brigade up to ++ * this point. */ ++ apr_brigade_split_ex(b, e, ctx->tmpbb); ++ ++ rv = ap_pass_brigade(f->next, b); ++ if (rv != APR_SUCCESS) { ++ return rv; ++ } ++ else if (f->c->aborted) { ++ return APR_ECONNABORTED; + } +- e = APR_BUCKET_NEXT(e); ++ apr_brigade_cleanup(b); ++ APR_BRIGADE_CONCAT(b, ctx->tmpbb); ++ e = APR_BRIGADE_FIRST(b); ++ ++ ctx->data_sent = 1; + } + + /* If we've now seen the entire response and it's otherwise diff --git a/SOURCES/httpd-2.4.6-r1811831.patch b/SOURCES/httpd-2.4.6-r1811831.patch new file mode 100644 index 0000000..1f63e14 --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1811831.patch @@ -0,0 +1,79 @@ +--- a/server/util_script.c 2017/10/11 14:41:00 1811830 ++++ b/server/util_script.c 2017/10/11 14:48:55 1811831 +@@ -92,9 +92,21 @@ + } + } + +-static void env2env(apr_table_t *table, const char *name) ++/* Sets variable @name in table @dest from r->subprocess_env if ++ * available, else from the environment, else from @fallback if ++ * non-NULL. */ ++static void env2env(apr_table_t *dest, request_rec *r, ++ const char *name, const char *fallback) + { +- add_unless_null(table, name, getenv(name)); ++ const char *val; ++ ++ val = apr_table_get(r->subprocess_env, name); ++ if (!val) ++ val = apr_pstrdup(r->pool, getenv(name)); ++ if (!val) ++ val = apr_pstrdup(r->pool, fallback); ++ if (val) ++ apr_table_addn(dest, name, val); + } + + AP_DECLARE(char **) ap_create_environment(apr_pool_t *p, apr_table_t *t) +@@ -211,37 +223,29 @@ + add_unless_null(e, http2env(r, hdrs[i].key), hdrs[i].val); + } + +- env_temp = apr_table_get(r->subprocess_env, "PATH"); +- if (env_temp == NULL) { +- env_temp = getenv("PATH"); +- } +- if (env_temp == NULL) { +- env_temp = DEFAULT_PATH; +- } +- apr_table_addn(e, "PATH", apr_pstrdup(r->pool, env_temp)); +- ++ env2env(e, r, "PATH", DEFAULT_PATH); + #if defined(WIN32) +- env2env(e, "SystemRoot"); +- env2env(e, "COMSPEC"); +- env2env(e, "PATHEXT"); +- env2env(e, "WINDIR"); ++ env2env(e, r, "SystemRoot", NULL); ++ env2env(e, r, "COMSPEC", NULL); ++ env2env(e, r, "PATHEXT", NULL); ++ env2env(e, r, "WINDIR", NULL); + #elif defined(OS2) +- env2env(e, "COMSPEC"); +- env2env(e, "ETC"); +- env2env(e, "DPATH"); +- env2env(e, "PERLLIB_PREFIX"); ++ env2env(e, r, "COMSPEC", NULL); ++ env2env(e, r, "ETC", NULL); ++ env2env(e, r, "DPATH", NULL); ++ env2env(e, r, "PERLLIB_PREFIX", NULL); + #elif defined(BEOS) +- env2env(e, "LIBRARY_PATH"); ++ env2env(e, r, "LIBRARY_PATH", NULL); + #elif defined(DARWIN) +- env2env(e, "DYLD_LIBRARY_PATH"); ++ env2env(e, r, "DYLD_LIBRARY_PATH", NULL); + #elif defined(_AIX) +- env2env(e, "LIBPATH"); ++ env2env(e, r, "LIBPATH", NULL); + #elif defined(__HPUX__) + /* HPUX PARISC 2.0W knows both, otherwise redundancy is harmless */ +- env2env(e, "SHLIB_PATH"); +- env2env(e, "LD_LIBRARY_PATH"); ++ env2env(e, r, "SHLIB_PATH", NULL); ++ env2env(e, r, "LD_LIBRARY_PATH", NULL); + #else /* Some Unix */ +- env2env(e, "LD_LIBRARY_PATH"); ++ env2env(e, r, "LD_LIBRARY_PATH", NULL); + #endif + + apr_table_addn(e, "SERVER_SIGNATURE", ap_psignature("", r)); diff --git a/SOURCES/httpd-2.4.6-r1811976.patch b/SOURCES/httpd-2.4.6-r1811976.patch new file mode 100644 index 0000000..166cc9e --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1811976.patch @@ -0,0 +1,156 @@ +diff --git a/docs/manual/mod/mod_ssl.html.en b/docs/manual/mod/mod_ssl.html.en +index 98540cd..4580f1c 100644 +--- a/docs/manual/mod/mod_ssl.html.en ++++ b/docs/manual/mod/mod_ssl.html.en +@@ -197,6 +197,12 @@ the SSLOptions directiv + first (or only) attribute of any DN is added only under a non-suffixed + name; i.e. no _0 suffixed entries are added.

    + ++

    The _RAW suffix may now be added to mod_ssl DN variable names ++(such as SSL_CLIENT_I_O_RAW). When this suffix is used, conversion ++of certificate name attributes to UTF-8 is omitted. This allows variable ++lookups and comparisons for certificates with incorrectly tagged name ++attributes.

    ++ +

    The format of the *_DN variables has changed in Apache HTTPD + 2.3.11. See the LegacyDNStringFormat option for + SSLOptions for details.

    +@@ -861,7 +867,7 @@ SSLEngine on + </VirtualHost> + + +-

    In Apache 2.1 and later, SSLEngine can be set to ++

    In httpd 2.2.0 and later, SSLEngine can be set to + optional. This enables support for + RFC 2817, Upgrading to TLS + Within HTTP/1.1. At this time no web browsers support RFC 2817.

    +diff --git a/modules/ssl/ssl_engine_vars.c b/modules/ssl/ssl_engine_vars.c +index 2b7c9ba..e25a6d4 100644 +--- a/modules/ssl/ssl_engine_vars.c ++++ b/modules/ssl/ssl_engine_vars.c +@@ -41,7 +41,7 @@ + + static char *ssl_var_lookup_ssl(apr_pool_t *p, conn_rec *c, request_rec *r, char *var); + static char *ssl_var_lookup_ssl_cert(apr_pool_t *p, request_rec *r, X509 *xs, char *var); +-static char *ssl_var_lookup_ssl_cert_dn(apr_pool_t *p, X509_NAME *xsname, char *var); ++static char *ssl_var_lookup_ssl_cert_dn(apr_pool_t *p, X509_NAME *xsname, const char *var); + static char *ssl_var_lookup_ssl_cert_san(apr_pool_t *p, X509 *xs, char *var); + static char *ssl_var_lookup_ssl_cert_valid(apr_pool_t *p, ASN1_TIME *tm); + static char *ssl_var_lookup_ssl_cert_remain(apr_pool_t *p, ASN1_TIME *tm); +@@ -562,15 +562,23 @@ static const struct { + { NULL, 0, 0 } + }; + +-static char *ssl_var_lookup_ssl_cert_dn(apr_pool_t *p, X509_NAME *xsname, char *var) ++static char *ssl_var_lookup_ssl_cert_dn(apr_pool_t *p, X509_NAME *xsname, ++ const char *var) + { +- char *result, *ptr; ++ const char *ptr; ++ char *result; + X509_NAME_ENTRY *xsne; +- int i, j, n, idx = 0; ++ int i, j, n, idx = 0, raw = 0; + apr_size_t varlen; + ++ ptr = ap_strrchr_c(var, '_'); ++ if (ptr && ptr > var && strcmp(ptr + 1, "RAW") == 0) { ++ var = apr_pstrmemdup(p, var, ptr - var); ++ raw = 1; ++ } ++ + /* if an _N suffix is used, find the Nth attribute of given name */ +- ptr = strchr(var, '_'); ++ ptr = ap_strchr_c(var, '_'); + if (ptr != NULL && strspn(ptr + 1, "0123456789") == strlen(ptr + 1)) { + idx = atoi(ptr + 1); + varlen = ptr - var; +@@ -592,7 +600,7 @@ static char *ssl_var_lookup_ssl_cert_dn(apr_pool_t *p, X509_NAME *xsname, char * + n =OBJ_obj2nid((ASN1_OBJECT *)X509_NAME_ENTRY_get_object(xsne)); + + if (n == ssl_var_lookup_ssl_cert_dn_rec[i].nid && idx-- == 0) { +- result = SSL_X509_NAME_ENTRY_to_string(p, xsne); ++ result = SSL_X509_NAME_ENTRY_to_string(p, xsne, raw); + break; + } + } +@@ -897,7 +905,7 @@ static void extract_dn(apr_table_t *t, apr_hash_t *nids, const char *pfx, + apr_hash_set(count, &nid, sizeof nid, dup); + key = apr_pstrcat(p, pfx, tag, NULL); + } +- value = SSL_X509_NAME_ENTRY_to_string(p, xsne); ++ value = SSL_X509_NAME_ENTRY_to_string(p, xsne, 0); + apr_table_setn(t, key, value); + } + } +diff --git a/modules/ssl/ssl_util_ssl.c b/modules/ssl/ssl_util_ssl.c +index 09a9877..fbd701f 100644 +--- a/modules/ssl/ssl_util_ssl.c ++++ b/modules/ssl/ssl_util_ssl.c +@@ -236,18 +236,21 @@ BOOL SSL_X509_getBC(X509 *cert, int *ca, int *pathlen) + return TRUE; + } + +-/* convert an ASN.1 string to a UTF-8 string (escaping control characters) */ +-char *SSL_ASN1_STRING_to_utf8(apr_pool_t *p, ASN1_STRING *asn1str) ++/* Convert ASN.1 string to a pool-allocated char * string, escaping ++ * control characters. If raw is zero, convert to UTF-8, otherwise ++ * unchanged from the character set. */ ++char *SSL_ASN1_STRING_convert(apr_pool_t *p, ASN1_STRING *asn1str, int raw) + { + char *result = NULL; + BIO *bio; +- int len; ++ int len, flags = ASN1_STRFLGS_ESC_CTRL; + + if ((bio = BIO_new(BIO_s_mem())) == NULL) + return NULL; + +- ASN1_STRING_print_ex(bio, asn1str, ASN1_STRFLGS_ESC_CTRL| +- ASN1_STRFLGS_UTF8_CONVERT); ++ if (!raw) flags |= ASN1_STRFLGS_UTF8_CONVERT; ++ ++ ASN1_STRING_print_ex(bio, asn1str, flags); + len = BIO_pending(bio); + if (len > 0) { + result = apr_palloc(p, len+1); +@@ -258,10 +261,13 @@ char *SSL_ASN1_STRING_to_utf8(apr_pool_t *p, ASN1_STRING *asn1str) + return result; + } + ++#define SSL_ASN1_STRING_to_utf8(p, a) SSL_ASN1_STRING_convert(p, a, 0) ++ + /* convert a NAME_ENTRY to UTF8 string */ +-char *SSL_X509_NAME_ENTRY_to_string(apr_pool_t *p, X509_NAME_ENTRY *xsne) ++char *SSL_X509_NAME_ENTRY_to_string(apr_pool_t *p, X509_NAME_ENTRY *xsne, ++ int raw) + { +- char *result = SSL_ASN1_STRING_to_utf8(p, X509_NAME_ENTRY_get_data(xsne)); ++ char *result = SSL_ASN1_STRING_convert(p, X509_NAME_ENTRY_get_data(xsne), raw); + ap_xlate_proto_from_ascii(result, len); + return result; + } +@@ -414,7 +420,7 @@ BOOL SSL_X509_getIDs(apr_pool_t *p, X509 *x509, apr_array_header_t **ids) + subj = X509_get_subject_name(x509); + while ((i = X509_NAME_get_index_by_NID(subj, NID_commonName, i)) != -1) { + APR_ARRAY_PUSH(*ids, const char *) = +- SSL_X509_NAME_ENTRY_to_string(p, X509_NAME_get_entry(subj, i)); ++ SSL_X509_NAME_ENTRY_to_string(p, X509_NAME_get_entry(subj, i), 0); + } + + return apr_is_empty_array(*ids) ? FALSE : TRUE; +diff --git a/modules/ssl/ssl_util_ssl.h b/modules/ssl/ssl_util_ssl.h +index be07ab7..611957e 100644 +--- a/modules/ssl/ssl_util_ssl.h ++++ b/modules/ssl/ssl_util_ssl.h +@@ -65,8 +65,8 @@ EVP_PKEY *SSL_read_PrivateKey(char *, EVP_PKEY **, pem_password_cb *, void *); + int SSL_smart_shutdown(SSL *ssl); + BOOL SSL_X509_isSGC(X509 *); + BOOL SSL_X509_getBC(X509 *, int *, int *); +-char *SSL_ASN1_STRING_to_utf8(apr_pool_t *, ASN1_STRING *); +-char *SSL_X509_NAME_ENTRY_to_string(apr_pool_t *p, X509_NAME_ENTRY *xsne); ++char *SSL_ASN1_STRING_to_utf8(apr_pool_t *, ASN1_STRING *, int raw); ++char *SSL_X509_NAME_ENTRY_to_string(apr_pool_t *p, X509_NAME_ENTRY *xsne, int raw); + char *SSL_X509_NAME_to_string(apr_pool_t *, X509_NAME *, int); + BOOL SSL_X509_getSAN(apr_pool_t *, X509 *, int, const char *, int, apr_array_header_t **); + BOOL SSL_X509_getIDs(apr_pool_t *, X509 *, apr_array_header_t **); diff --git a/SOURCES/httpd-2.4.6-r1824872.patch b/SOURCES/httpd-2.4.6-r1824872.patch new file mode 100644 index 0000000..2cec77f --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1824872.patch @@ -0,0 +1,25 @@ +--- a/modules/ldap/util_ldap.c 2018/02/20 13:00:39 1824871 ++++ b/modules/ldap/util_ldap.c 2018/02/20 13:02:54 1824872 +@@ -2858,7 +2858,6 @@ + st->search_cache_size = base->search_cache_size; + st->compare_cache_ttl = base->compare_cache_ttl; + st->compare_cache_size = base->compare_cache_size; +- st->util_ldap_cache_lock = base->util_ldap_cache_lock; + + st->connections = NULL; + st->ssl_supported = 0; /* not known until post-config and re-merged */ +@@ -2977,12 +2976,12 @@ + st_vhost = (util_ldap_state_t *) + ap_get_module_config(s_vhost->module_config, + &ldap_module); +- ++ st_vhost->util_ldap_cache = st->util_ldap_cache; ++ st_vhost->util_ldap_cache_lock = st->util_ldap_cache_lock; + #if APR_HAS_SHARED_MEMORY + st_vhost->cache_shm = st->cache_shm; + st_vhost->cache_rmm = st->cache_rmm; + st_vhost->cache_file = st->cache_file; +- st_vhost->util_ldap_cache = st->util_ldap_cache; + ap_log_error(APLOG_MARK, APLOG_DEBUG, result, s, APLOGNO(01316) + "LDAP merging Shared Cache conf: shm=0x%pp rmm=0x%pp " + "for VHOST: %s", st->cache_shm, st->cache_rmm, diff --git a/SOURCES/httpd-2.4.6-r1825120.patch b/SOURCES/httpd-2.4.6-r1825120.patch new file mode 100644 index 0000000..f490839 --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1825120.patch @@ -0,0 +1,96 @@ +diff --git a/modules/ssl/ssl_engine_init.c b/modules/ssl/ssl_engine_init.c +index 57b76c0..814ec4f 100644 +--- a/modules/ssl/ssl_engine_init.c ++++ b/modules/ssl/ssl_engine_init.c +@@ -1522,70 +1522,18 @@ void ssl_init_CheckServers(SSLModConfigRec *mc, server_rec *base_server, apr_poo + } + } + +-static int ssl_init_FindCAList_X509NameCmp(const X509_NAME * const *a, +- const X509_NAME * const *b) +-{ +- return(X509_NAME_cmp(*a, *b)); +-} +- +-static void ssl_init_PushCAList(STACK_OF(X509_NAME) *ca_list, +- server_rec *s, apr_pool_t *ptemp, +- const char *file) +-{ +- int n; +- STACK_OF(X509_NAME) *sk; +- +- sk = (STACK_OF(X509_NAME) *) +- SSL_load_client_CA_file(file); +- +- if (!sk) { +- return; +- } +- +- for (n = 0; n < sk_X509_NAME_num(sk); n++) { +- X509_NAME *name = sk_X509_NAME_value(sk, n); +- +- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(02209) +- "CA certificate: %s", +- SSL_X509_NAME_to_string(ptemp, name, 0)); +- +- /* +- * note that SSL_load_client_CA_file() checks for duplicates, +- * but since we call it multiple times when reading a directory +- * we must also check for duplicates ourselves. +- */ +- +- if (sk_X509_NAME_find(ca_list, name) < 0) { +- /* this will be freed when ca_list is */ +- sk_X509_NAME_push(ca_list, name); +- } +- else { +- /* need to free this ourselves, else it will leak */ +- X509_NAME_free(name); +- } +- } +- +- sk_X509_NAME_free(sk); +-} +- + STACK_OF(X509_NAME) *ssl_init_FindCAList(server_rec *s, + apr_pool_t *ptemp, + const char *ca_file, + const char *ca_path) + { +- STACK_OF(X509_NAME) *ca_list; +- +- /* +- * Start with a empty stack/list where new +- * entries get added in sorted order. +- */ +- ca_list = sk_X509_NAME_new(ssl_init_FindCAList_X509NameCmp); ++ STACK_OF(X509_NAME) *ca_list = sk_X509_NAME_new_null();; + + /* + * Process CA certificate bundle file + */ + if (ca_file) { +- ssl_init_PushCAList(ca_list, s, ptemp, ca_file); ++ SSL_add_file_cert_subjects_to_stack(ca_list, ca_file); + /* + * If ca_list is still empty after trying to load ca_file + * then the file failed to load, and users should hear about that. +@@ -1619,17 +1567,12 @@ STACK_OF(X509_NAME) *ssl_init_FindCAList(server_rec *s, + continue; /* don't try to load directories */ + } + file = apr_pstrcat(ptemp, ca_path, "/", direntry.name, NULL); +- ssl_init_PushCAList(ca_list, s, ptemp, file); ++ SSL_add_file_cert_subjects_to_stack(ca_list, file); + } + + apr_dir_close(dir); + } + +- /* +- * Cleanup +- */ +- (void) sk_X509_NAME_set_cmp_func(ca_list, NULL); +- + return ca_list; + } + diff --git a/SOURCES/httpd-2.4.6-r1826995.patch b/SOURCES/httpd-2.4.6-r1826995.patch new file mode 100644 index 0000000..c179178 --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1826995.patch @@ -0,0 +1,198 @@ +diff --git a/docs/manual/mod/mod_ssl.html.en b/docs/manual/mod/mod_ssl.html.en +index 4580f1c..fb8202e 100644 +--- a/docs/manual/mod/mod_ssl.html.en ++++ b/docs/manual/mod/mod_ssl.html.en +@@ -991,7 +991,8 @@ the certificate being verified.

    +

    This option enables OCSP validation of the client certificate + chain. If this option is enabled, certificates in the client's + certificate chain will be validated against an OCSP responder after +-normal verification (including CRL checks) have taken place.

    ++normal verification (including CRL checks) have taken place. In ++mode 'leaf', only the client certificate itself will be validated.

    + +

    The OCSP responder used is either extracted from the certificate + itself, or derived by configuration; see the +diff --git a/modules/ssl/mod_ssl.c b/modules/ssl/mod_ssl.c +index 4a8b661..e637a9d 100644 +--- a/modules/ssl/mod_ssl.c ++++ b/modules/ssl/mod_ssl.c +@@ -227,8 +227,8 @@ static const command_rec ssl_config_cmds[] = { + "request body if a per-location SSL renegotiation is required due to " + "changed access control requirements") + +- SSL_CMD_SRV(OCSPEnable, FLAG, +- "Enable use of OCSP to verify certificate revocation ('on', 'off')") ++ SSL_CMD_SRV(OCSPEnable, RAW_ARGS, ++ "Enable use of OCSP to verify certificate revocation mode ('on', 'leaf', 'off')") + SSL_CMD_SRV(OCSPDefaultResponder, TAKE1, + "URL of the default OCSP Responder") + SSL_CMD_SRV(OCSPOverrideResponder, FLAG, +diff --git a/modules/ssl/ssl_engine_config.c b/modules/ssl/ssl_engine_config.c +index 86a7f0f..714aee9 100644 +--- a/modules/ssl/ssl_engine_config.c ++++ b/modules/ssl/ssl_engine_config.c +@@ -130,7 +130,7 @@ static void modssl_ctx_init(modssl_ctx_t *mctx) + mctx->auth.verify_depth = UNSET; + mctx->auth.verify_mode = SSL_CVERIFY_UNSET; + +- mctx->ocsp_enabled = FALSE; ++ mctx->ocsp_mask = UNSET; + mctx->ocsp_force_default = FALSE; + mctx->ocsp_responder = NULL; + mctx->ocsp_resptime_skew = UNSET; +@@ -264,7 +264,7 @@ static void modssl_ctx_cfg_merge(modssl_ctx_t *base, + cfgMergeInt(auth.verify_depth); + cfgMerge(auth.verify_mode, SSL_CVERIFY_UNSET); + +- cfgMergeBool(ocsp_enabled); ++ cfgMergeInt(ocsp_mask); + cfgMergeBool(ocsp_force_default); + cfgMerge(ocsp_responder, NULL); + cfgMergeInt(ocsp_resptime_skew); +@@ -1575,11 +1575,46 @@ const char *ssl_cmd_SSLUserName(cmd_parms *cmd, void *dcfg, + return NULL; + } + +-const char *ssl_cmd_SSLOCSPEnable(cmd_parms *cmd, void *dcfg, int flag) ++static const char *ssl_cmd_ocspcheck_parse(cmd_parms *parms, ++ const char *arg, ++ int *mask) + { +- SSLSrvConfigRec *sc = mySrvConfig(cmd->server); ++ const char *w; ++ ++ w = ap_getword_conf(parms->temp_pool, &arg); ++ if (strcEQ(w, "off")) { ++ *mask = SSL_OCSPCHECK_NONE; ++ } ++ else if (strcEQ(w, "leaf")) { ++ *mask = SSL_OCSPCHECK_LEAF; ++ } ++ else if (strcEQ(w, "on")) { ++ *mask = SSL_OCSPCHECK_CHAIN; ++ } ++ else { ++ return apr_pstrcat(parms->temp_pool, parms->cmd->name, ++ ": Invalid argument '", w, "'", ++ NULL); ++ } ++ ++ while (*arg) { ++ w = ap_getword_conf(parms->temp_pool, &arg); ++ if (strcEQ(w, "no_ocsp_for_cert_ok")) { ++ *mask |= SSL_OCSPCHECK_NO_OCSP_FOR_CERT_OK; ++ } ++ else { ++ return apr_pstrcat(parms->temp_pool, parms->cmd->name, ++ ": Invalid argument '", w, "'", ++ NULL); ++ } ++ } + +- sc->server->ocsp_enabled = flag ? TRUE : FALSE; ++ return NULL; ++} ++ ++const char *ssl_cmd_SSLOCSPEnable(cmd_parms *cmd, void *dcfg, const char *arg) ++{ ++ SSLSrvConfigRec *sc = mySrvConfig(cmd->server); + + #ifdef OPENSSL_NO_OCSP + if (flag) { +@@ -1588,7 +1623,7 @@ const char *ssl_cmd_SSLOCSPEnable(cmd_parms *cmd, void *dcfg, int flag) + } + #endif + +- return NULL; ++ return ssl_cmd_ocspcheck_parse(cmd, arg, &sc->server->ocsp_mask); + } + + const char *ssl_cmd_SSLOCSPOverrideResponder(cmd_parms *cmd, void *dcfg, int flag) +diff --git a/modules/ssl/ssl_engine_init.c b/modules/ssl/ssl_engine_init.c +index 672760c..57b76c0 100644 +--- a/modules/ssl/ssl_engine_init.c ++++ b/modules/ssl/ssl_engine_init.c +@@ -762,6 +762,10 @@ static void ssl_init_ctx_crl(server_rec *s, + unsigned long crlflags = 0; + char *cfgp = mctx->pkp ? "SSLProxy" : "SSL"; + ++ if (mctx->ocsp_mask == UNSET) { ++ mctx->ocsp_mask = SSL_OCSPCHECK_NONE; ++ } ++ + /* + * Configure Certificate Revocation List (CRL) Details + */ +diff --git a/modules/ssl/ssl_engine_kernel.c b/modules/ssl/ssl_engine_kernel.c +index 5ff35f5..9dc236c 100644 +--- a/modules/ssl/ssl_engine_kernel.c ++++ b/modules/ssl/ssl_engine_kernel.c +@@ -1416,7 +1416,8 @@ int ssl_callback_SSLVerify(int ok, X509_STORE_CTX *ctx) + /* + * Perform OCSP-based revocation checks + */ +- if (ok && sc->server->ocsp_enabled) { ++ if (ok && ((mctx->ocsp_mask & SSL_OCSPCHECK_CHAIN) || ++ (errdepth == 0 && (mctx->ocsp_mask & SSL_OCSPCHECK_LEAF)))) { + /* If there was an optional verification error, it's not + * possible to perform OCSP validation since the issuer may be + * missing/untrusted. Fail in that case. */ +diff --git a/modules/ssl/ssl_engine_ocsp.c b/modules/ssl/ssl_engine_ocsp.c +index 90da5c2..58d267b 100644 +--- a/modules/ssl/ssl_engine_ocsp.c ++++ b/modules/ssl/ssl_engine_ocsp.c +@@ -136,7 +136,14 @@ static int verify_ocsp_status(X509 *cert, X509_STORE_CTX *ctx, conn_rec *c, + + ruri = determine_responder_uri(sc, cert, c, pool); + if (!ruri) { +- return V_OCSP_CERTSTATUS_UNKNOWN; ++ if (sc->server->ocsp_mask & SSL_OCSPCHECK_NO_OCSP_FOR_CERT_OK) { ++ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, ++ "Skipping OCSP check for certificate cos no OCSP URL" ++ " found and no_ocsp_for_cert_ok is set"); ++ return V_OCSP_CERTSTATUS_GOOD; ++ } else { ++ return V_OCSP_CERTSTATUS_UNKNOWN; ++ } + } + + request = create_request(ctx, cert, &certID, s, pool); +diff --git a/modules/ssl/ssl_private.h b/modules/ssl/ssl_private.h +index b601316..2d505f9 100644 +--- a/modules/ssl/ssl_private.h ++++ b/modules/ssl/ssl_private.h +@@ -379,6 +379,16 @@ typedef enum { + } ssl_crlcheck_t; + + /** ++ * OCSP checking mask (mode | flags) ++ */ ++typedef enum { ++ SSL_OCSPCHECK_NONE = (0), ++ SSL_OCSPCHECK_LEAF = (1 << 0), ++ SSL_OCSPCHECK_CHAIN = (1 << 1), ++ SSL_OCSPCHECK_NO_OCSP_FOR_CERT_OK = (1 << 2) ++} ssl_ocspcheck_t; ++ ++/** + * Define the SSL pass phrase dialog types + */ + typedef enum { +@@ -668,7 +678,7 @@ typedef struct { + + modssl_auth_ctx_t auth; + +- BOOL ocsp_enabled; /* true if OCSP verification enabled */ ++ int ocsp_mask; + BOOL ocsp_force_default; /* true if the default responder URL is + * used regardless of per-cert URL */ + const char *ocsp_responder; /* default responder URL */ +@@ -796,7 +806,7 @@ const char *ssl_cmd_SSLOCSPDefaultResponder(cmd_parms *cmd, void *dcfg, const ch + const char *ssl_cmd_SSLOCSPResponseTimeSkew(cmd_parms *cmd, void *dcfg, const char *arg); + const char *ssl_cmd_SSLOCSPResponseMaxAge(cmd_parms *cmd, void *dcfg, const char *arg); + const char *ssl_cmd_SSLOCSPResponderTimeout(cmd_parms *cmd, void *dcfg, const char *arg); +-const char *ssl_cmd_SSLOCSPEnable(cmd_parms *cmd, void *dcfg, int flag); ++const char *ssl_cmd_SSLOCSPEnable(cmd_parms *cmd, void *dcfg, const char *flag); + + #ifndef OPENSSL_NO_SRP + const char *ssl_cmd_SSLSRPVerifierFile(cmd_parms *cmd, void *dcfg, const char *arg); diff --git a/SOURCES/httpd-2.4.6-r1833014.patch b/SOURCES/httpd-2.4.6-r1833014.patch new file mode 100644 index 0000000..9a8f455 --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1833014.patch @@ -0,0 +1,83 @@ +diff --git a/modules/http/http_request.c b/modules/http/http_request.c +index c97dc77..9885de4 100644 +--- a/modules/http/http_request.c ++++ b/modules/http/http_request.c +@@ -227,11 +227,21 @@ AP_DECLARE(void) ap_die(int type, request_rec *r) + ap_die_r(type, r, r->status); + } + +-static void check_pipeline(conn_rec *c) ++#define RETRIEVE_BRIGADE_FROM_POOL(bb, key, pool, allocator) do { \ ++ apr_pool_userdata_get((void **)&bb, key, pool); \ ++ if (bb == NULL) { \ ++ bb = apr_brigade_create(pool, allocator); \ ++ apr_pool_userdata_setn((const void *)bb, key, NULL, pool); \ ++ } \ ++ else { \ ++ apr_brigade_cleanup(bb); \ ++ } \ ++} while(0) ++ ++static void check_pipeline(conn_rec *c, apr_bucket_brigade *bb) + { + if (c->keepalive != AP_CONN_CLOSE) { + apr_status_t rv; +- apr_bucket_brigade *bb = apr_brigade_create(c->pool, c->bucket_alloc); + + rv = ap_get_brigade(c->input_filters, bb, AP_MODE_SPECULATIVE, + APR_NONBLOCK_READ, 1); +@@ -245,11 +255,10 @@ static void check_pipeline(conn_rec *c) + else { + c->data_in_input_filters = 1; + } +- apr_brigade_destroy(bb); ++ apr_brigade_cleanup(bb); + } + } + +- + AP_DECLARE(void) ap_process_request_after_handler(request_rec *r) + { + apr_bucket_brigade *bb; +@@ -260,11 +269,13 @@ AP_DECLARE(void) ap_process_request_after_handler(request_rec *r) + * this bucket is destroyed, the request will be logged and + * its pool will be freed + */ +- bb = apr_brigade_create(r->connection->pool, r->connection->bucket_alloc); ++ RETRIEVE_BRIGADE_FROM_POOL(bb, "ap_process_request_after_handler_brigade", ++ c->pool, c->bucket_alloc); + b = ap_bucket_eor_create(r->connection->bucket_alloc, r); + APR_BRIGADE_INSERT_HEAD(bb, b); + + ap_pass_brigade(r->connection->output_filters, bb); ++ apr_brigade_cleanup(bb); + + /* From here onward, it is no longer safe to reference r + * or r->pool, because r->pool may have been destroyed +@@ -273,7 +284,7 @@ AP_DECLARE(void) ap_process_request_after_handler(request_rec *r) + + if (c->cs) + c->cs->state = CONN_STATE_WRITE_COMPLETION; +- check_pipeline(c); ++ check_pipeline(c, bb); + AP_PROCESS_REQUEST_RETURN((uintptr_t)r, r->uri, r->status); + if (ap_extended_status) { + ap_time_process_request(c->sbh, STOP_PREQUEST); +@@ -363,7 +374,8 @@ void ap_process_request(request_rec *r) + ap_process_async_request(r); + + if (!c->data_in_input_filters) { +- bb = apr_brigade_create(c->pool, c->bucket_alloc); ++ RETRIEVE_BRIGADE_FROM_POOL(bb, "ap_process_request_after_handler_brigade", ++ c->pool, c->bucket_alloc); + b = apr_bucket_flush_create(c->bucket_alloc); + APR_BRIGADE_INSERT_HEAD(bb, b); + rv = ap_pass_brigade(c->output_filters, bb); +@@ -380,6 +392,7 @@ void ap_process_request(request_rec *r) + "Timeout while writing data for URI %s to the" + " client", r->unparsed_uri); + } ++ apr_brigade_cleanup(bb); + } + if (ap_extended_status) { + ap_time_process_request(c->sbh, STOP_PREQUEST); diff --git a/SOURCES/httpd-2.4.6-r1861793+.patch b/SOURCES/httpd-2.4.6-r1861793+.patch new file mode 100644 index 0000000..dd7bd81 --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1861793+.patch @@ -0,0 +1,285 @@ +# ./pullrev.sh 1861793 1862611 1862612 +http://svn.apache.org/viewvc?view=revision&revision=1861793 +http://svn.apache.org/viewvc?view=revision&revision=1862611 +http://svn.apache.org/viewvc?view=revision&revision=1862612 +http://svn.apache.org/viewvc?view=revision&revision=1862724 + +--- httpd-2.4.6/configure.in.r1861793+ ++++ httpd-2.4.6/configure.in +@@ -464,6 +464,28 @@ + AC_SEARCH_LIBS(crypt, crypt) + CRYPT_LIBS="$LIBS" + APACHE_SUBST(CRYPT_LIBS) ++ ++if test "$ac_cv_search_crypt" != "no"; then ++ # Test crypt() with the SHA-512 test vector from https://akkadia.org/drepper/SHA-crypt.txt ++ AC_CACHE_CHECK([whether crypt() supports SHA-2], [ap_cv_crypt_sha2], [ ++ AC_RUN_IFELSE([AC_LANG_PROGRAM([[ ++#include ++#include ++#include ++ ++#define PASSWD_0 "Hello world!" ++#define SALT_0 "\$6\$saltstring" ++#define EXPECT_0 "\$6\$saltstring\$svn8UoSVapNtMuq1ukKS4tPQd8iKwSMHWjl/O817G3uBnIFNjnQJu" \ ++ "esI68u4OTLiBFdcbYEdFCoEOfaS35inz1" ++]], [char *result = crypt(PASSWD_0, SALT_0); ++ if (!result) return 1; ++ if (strcmp(result, EXPECT_0)) return 2; ++])], [ap_cv_crypt_sha2=yes], [ap_cv_crypt_sha2=no])]) ++ if test "$ap_cv_crypt_sha2" = yes; then ++ AC_DEFINE([HAVE_CRYPT_SHA2], 1, [Define if crypt() supports SHA-2 hashes]) ++ fi ++fi ++ + LIBS="$saved_LIBS" + + dnl See Comment #Spoon +--- httpd-2.4.6/docs/man/htpasswd.1.r1861793+ ++++ httpd-2.4.6/docs/man/htpasswd.1 +@@ -27,16 +27,16 @@ + .SH "SYNOPSIS" + + .PP +-\fBhtpasswd\fR [ -\fBc\fR ] [ -\fBi\fR ] [ -\fBm\fR | -\fBB\fR | -\fBd\fR | -\fBs\fR | -\fBp\fR ] [ -\fBC\fR \fIcost\fR ] [ -\fBD\fR ] [ -\fBv\fR ] \fIpasswdfile\fR \fIusername\fR ++\fBhtpasswd\fR [ -\fBc\fR ] [ -\fBi\fR ] [ -\fBm\fR | -\fBB\fR | -\fBd\fR | -\fBs\fR | -\fBp\fR ] [ -\fBr\fR \fIrounds\fR ] [ -\fBC\fR \fIcost\fR ] [ -\fBD\fR ] [ -\fBv\fR ] \fIpasswdfile\fR \fIusername\fR + + .PP +-\fBhtpasswd\fR -\fBb\fR [ -\fBc\fR ] [ -\fBm\fR | -\fBB\fR | -\fBd\fR | -\fBs\fR | -\fBp\fR ] [ -\fBC\fR \fIcost\fR ] [ -\fBD\fR ] [ -\fBv\fR ] \fIpasswdfile\fR \fIusername\fR \fIpassword\fR ++\fBhtpasswd\fR -\fBb\fR [ -\fBc\fR ] [ -\fBm\fR | -\fBB\fR | -\fBd\fR | -\fBs\fR | -\fBp\fR ] [ -\fBr\fR \fIrounds\fR ] [ -\fBC\fR \fIcost\fR ] [ -\fBD\fR ] [ -\fBv\fR ] \fIpasswdfile\fR \fIusername\fR \fIpassword\fR + + .PP +-\fBhtpasswd\fR -\fBn\fR [ -\fBi\fR ] [ -\fBm\fR | -\fBB\fR | -\fBd\fR | -\fBs\fR | -\fBp\fR ] [ -\fBC\fR \fIcost\fR ] \fIusername\fR ++\fBhtpasswd\fR -\fBn\fR [ -\fBi\fR ] [ -\fBm\fR | -\fBB\fR | -\fBd\fR | -\fBs\fR | -\fBp\fR ] [ -\fBr\fR \fIrounds\fR ] [ -\fBC\fR \fIcost\fR ] \fIusername\fR + + .PP +-\fBhtpasswd\fR -\fBnb\fR [ -\fBm\fR | -\fBB\fR | -\fBd\fR | -\fBs\fR | -\fBp\fR ] [ -\fBC\fR \fIcost\fR ] \fIusername\fR \fIpassword\fR ++\fBhtpasswd\fR -\fBnb\fR [ -\fBm\fR | -\fBB\fR | -\fBd\fR | -\fBs\fR | -\fBp\fR ] [ -\fBr\fR \fIrounds\fR ] [ -\fBC\fR \fIcost\fR ] \fIusername\fR \fIpassword\fR + + + .SH "SUMMARY" +@@ -48,7 +48,7 @@ + Resources available from the Apache HTTP server can be restricted to just the users listed in the files created by htpasswd\&. This program can only manage usernames and passwords stored in a flat-file\&. It can encrypt and display password information for use in other types of data stores, though\&. To use a DBM database see dbmmanage or htdbm\&. + + .PP +-htpasswd encrypts passwords using either bcrypt, a version of MD5 modified for Apache, SHA1, or the system's crypt() routine\&. Files managed by htpasswd may contain a mixture of different encoding types of passwords; some user records may have bcrypt or MD5-encrypted passwords while others in the same file may have passwords encrypted with crypt()\&. +++\fBhtpasswd\fR encrypts passwords using either bcrypt, a version of MD5 modified for Apache, SHA-1, or the system's \fBcrypt()\fR routine\&. SHA-2-based hashes (SHA-256 and SHA-512) are supported for \fBcrypt()\fR\&. Files managed by \fBhtpasswd\fR may contain a mixture of different encoding types of passwords; some user records may have bcrypt or MD5-encrypted passwords while others in the same file may have passwords encrypted with \fBcrypt()\fR\&. + + .PP + This manual page only lists the command line arguments\&. For details of the directives necessary to configure user authentication in httpd see the Apache manual, which is part of the Apache distribution or can be found at http://httpd\&.apache\&.org/\&. +@@ -73,17 +73,26 @@ + -m + Use MD5 encryption for passwords\&. This is the default (since version 2\&.2\&.18)\&. + .TP ++-2 ++Use SHA-256 \fBcrypt()\fR based hashes for passwords\&. This is supported on most Unix platforms\&. ++.TP ++-5 ++Use SHA-512 \fBcrypt()\fR based hashes for passwords\&. This is supported on most Unix platforms\&. ++.TP + -B + Use bcrypt encryption for passwords\&. This is currently considered to be very secure\&. + .TP + -C + This flag is only allowed in combination with -B (bcrypt encryption)\&. It sets the computing time used for the bcrypt algorithm (higher is more secure but slower, default: 5, valid: 4 to 31)\&. + .TP ++-r ++This flag is only allowed in combination with \fB-2\fR or \fB-5\fR\&. It sets the number of hash rounds used for the SHA-2 algorithms (higher is more secure but slower; the default is 5,000)\&. ++.TP + -d + Use crypt() encryption for passwords\&. This is not supported by the httpd server on Windows and Netware\&. This algorithm limits the password length to 8 characters\&. This algorithm is \fBinsecure\fR by today's standards\&. It used to be the default algorithm until version 2\&.2\&.17\&. + .TP + -s +-Use SHA encryption for passwords\&. Facilitates migration from/to Netscape servers using the LDAP Directory Interchange Format (ldif)\&. This algorithm is \fBinsecure\fR by today's standards\&. ++Use SHA-1 encryption for passwords\&. Facilitates migration from/to Netscape servers using the LDAP Directory Interchange Format (ldif)\&. This algorithm is \fBinsecure\fR by today's standards\&. + .TP + -p + Use plaintext passwords\&. Though htpasswd will support creation on all platforms, the httpd daemon will only accept plain text passwords on Windows and Netware\&. +@@ -152,11 +161,14 @@ + When using the crypt() algorithm, note that only the first 8 characters of the password are used to form the password\&. If the supplied password is longer, the extra characters will be silently discarded\&. + + .PP +-The SHA encryption format does not use salting: for a given password, there is only one encrypted representation\&. The crypt() and MD5 formats permute the representation by prepending a random salt string, to make dictionary attacks against the passwords more difficult\&. ++The SHA-1 encryption format does not use salting: for a given password, there is only one encrypted representation\&. The crypt() and MD5 formats permute the representation by prepending a random salt string, to make dictionary attacks against the passwords more difficult\&. + + .PP +-The SHA and crypt() formats are insecure by today's standards\&. +- ++The SHA-1 and crypt() formats are insecure by today's standards\&. ++ ++.PP ++The SHA-2-based \fBcrypt()\fR formats (SHA-256 and SHA-512) are supported on most modern Unix systems, and follow the specification at https://www\&.akkadia\&.org/drepper/SHA-crypt\&.txt\& ++ + .SH "RESTRICTIONS" + + .PP +--- httpd-2.4.6/support/htpasswd.c.r1861793+ ++++ httpd-2.4.6/support/htpasswd.c +@@ -93,28 +93,32 @@ + static void usage(void) + { + apr_file_printf(errfile, "Usage:" NL +- "\thtpasswd [-cimBdpsDv] [-C cost] passwordfile username" NL +- "\thtpasswd -b[cmBdpsDv] [-C cost] passwordfile username password" NL ++ "\thtpasswd [-cimB25dpsDv] [-C cost] [-r rounds] passwordfile username" NL ++ "\thtpasswd -b[cmB25dpsDv] [-C cost] [-r rounds] passwordfile username password" NL + NL +- "\thtpasswd -n[imBdps] [-C cost] username" NL +- "\thtpasswd -nb[mBdps] [-C cost] username password" NL ++ "\thtpasswd -n[imB25dps] [-C cost] [-r rounds] username" NL ++ "\thtpasswd -nb[mB25dps] [-C cost] [-r rounds] username password" NL + " -c Create a new file." NL + " -n Don't update file; display results on stdout." NL + " -b Use the password from the command line rather than prompting " + "for it." NL + " -i Read password from stdin without verification (for script usage)." NL + " -m Force MD5 encryption of the password (default)." NL +- " -B Force bcrypt encryption of the password (very secure)." NL ++ " -2 Force SHA-256 crypt() hash of the password (secure)." NL ++ " -5 Force SHA-512 crypt() hash of the password (secure)." NL ++ " -B Force bcrypt aencryption of the password (very secure)." NL + " -C Set the computing time used for the bcrypt algorithm" NL + " (higher is more secure but slower, default: %d, valid: 4 to 31)." NL ++ " -r Set the number of rounds used for the SHA-256, SHA-512 algorithms" NL ++ " (higher is more secure but slower, default: 5000)." NL + " -d Force CRYPT encryption of the password (8 chars max, insecure)." NL +- " -s Force SHA encryption of the password (insecure)." NL ++ " -s Force SHA-1 encryption of the password (insecure)." NL + " -p Do not encrypt the password (plaintext, insecure)." NL + " -D Delete the specified user." NL + " -v Verify password for the specified user." NL + "On other systems than Windows and NetWare the '-p' flag will " + "probably not work." NL +- "The SHA algorithm does not use a salt and is less secure than the " ++ "The SHA-1 algorithm does not use a salt and is less secure than the " + "MD5 algorithm." NL, + BCRYPT_DEFAULT_COST + ); +@@ -173,7 +177,7 @@ + if (rv != APR_SUCCESS) + exit(ERR_SYNTAX); + +- while ((rv = apr_getopt(state, "cnmspdBbDiC:v", &opt, &opt_arg)) == APR_SUCCESS) { ++ while ((rv = apr_getopt(state, "cnmspdBbDi25C:r:v", &opt, &opt_arg)) == APR_SUCCESS) { + switch (opt) { + case 'c': + *mask |= APHTP_NEWFILE; +--- httpd-2.4.6/support/passwd_common.c.r1861793+ ++++ httpd-2.4.6/support/passwd_common.c +@@ -185,10 +185,15 @@ + #if CRYPT_ALGO_SUPPORTED + char *cbuf; + #endif ++#ifdef HAVE_CRYPT_SHA2 ++ const char *setting; ++ char method; ++#endif + +- if (ctx->cost != 0 && ctx->alg != ALG_BCRYPT) { ++ if (ctx->cost != 0 && ctx->alg != ALG_BCRYPT ++ && ctx->alg != ALG_CRYPT_SHA256 && ctx->alg != ALG_CRYPT_SHA512 ) { + apr_file_printf(errfile, +- "Warning: Ignoring -C argument for this algorithm." NL); ++ "Warning: Ignoring -C/-r argument for this algorithm." NL); + } + + if (ctx->passwd == NULL) { +@@ -246,6 +251,34 @@ + break; + #endif /* CRYPT_ALGO_SUPPORTED */ + ++#ifdef HAVE_CRYPT_SHA2 ++ case ALG_CRYPT_SHA256: ++ case ALG_CRYPT_SHA512: ++ ret = generate_salt(salt, 16, &ctx->errstr, ctx->pool); ++ if (ret != 0) ++ break; ++ ++ method = ctx->alg == ALG_CRYPT_SHA256 ? '5': '6'; ++ ++ if (ctx->cost) ++ setting = apr_psprintf(ctx->pool, "$%c$rounds=%d$%s", ++ method, ctx->cost, salt); ++ else ++ setting = apr_psprintf(ctx->pool, "$%c$%s", ++ method, salt); ++ ++ cbuf = crypt(pw, setting); ++ if (cbuf == NULL) { ++ rv = APR_FROM_OS_ERROR(errno); ++ ctx->errstr = apr_psprintf(ctx->pool, "crypt() failed: %pm", &rv); ++ ret = ERR_PWMISMATCH; ++ break; ++ } ++ ++ apr_cpystrn(ctx->out, cbuf, ctx->out_len - 1); ++ break; ++#endif /* HAVE_CRYPT_SHA2 */ ++ + #if BCRYPT_ALGO_SUPPORTED + case ALG_BCRYPT: + rv = apr_generate_random_bytes((unsigned char*)salt, 16); +@@ -294,6 +327,19 @@ + case 's': + ctx->alg = ALG_APSHA; + break; ++#ifdef HAVE_CRYPT_SHA2 ++ case '2': ++ ctx->alg = ALG_CRYPT_SHA256; ++ break; ++ case '5': ++ ctx->alg = ALG_CRYPT_SHA512; ++ break; ++#else ++ case '2': ++ case '5': ++ ctx->errstr = "SHA-2 crypt() algorithms are not supported on this platform."; ++ return ERR_ALG_NOT_SUPP; ++#endif + case 'p': + ctx->alg = ALG_PLAIN; + #if !PLAIN_ALGO_SUPPORTED +@@ -324,11 +370,12 @@ + return ERR_ALG_NOT_SUPP; + #endif + break; +- case 'C': { ++ case 'C': ++ case 'r': { + char *endptr; + long num = strtol(opt_arg, &endptr, 10); + if (*endptr != '\0' || num <= 0) { +- ctx->errstr = "argument to -C must be a positive integer"; ++ ctx->errstr = "argument to -C/-r must be a positive integer"; + return ERR_SYNTAX; + } + ctx->cost = num; +--- httpd-2.4.6/support/passwd_common.h.r1861793+ ++++ httpd-2.4.6/support/passwd_common.h +@@ -28,6 +28,8 @@ + #include "apu_version.h" + #endif + ++#include "ap_config_auto.h" ++ + #define MAX_STRING_LEN 256 + + #define ALG_PLAIN 0 +@@ -35,6 +37,8 @@ + #define ALG_APMD5 2 + #define ALG_APSHA 3 + #define ALG_BCRYPT 4 ++#define ALG_CRYPT_SHA256 5 ++#define ALG_CRYPT_SHA512 6 + + #define BCRYPT_DEFAULT_COST 5 + +@@ -79,7 +83,7 @@ + apr_size_t out_len; + char *passwd; + int alg; +- int cost; ++ int cost; /* cost for bcrypt, rounds for SHA-2 */ + enum { + PW_PROMPT = 0, + PW_ARG, diff --git a/SOURCES/httpd-2.4.6-r1862604.patch b/SOURCES/httpd-2.4.6-r1862604.patch new file mode 100644 index 0000000..83c6c4a --- /dev/null +++ b/SOURCES/httpd-2.4.6-r1862604.patch @@ -0,0 +1,22 @@ +--- a/docs/conf/magic 2019/07/05 11:22:46 1862603 ++++ b/docs/conf/magic 2019/07/05 11:26:12 1862604 +@@ -87,7 +87,7 @@ + # Microsoft WAVE format (*.wav) + # [GRR 950115: probably all of the shorts and longs should be leshort/lelong] + # Microsoft RIFF +-0 string RIFF audio/unknown ++0 string RIFF + # - WAVE format + >8 string WAVE audio/x-wav + # MPEG audio. +--- a/modules/metadata/mod_mime_magic.c 2019/07/05 11:22:46 1862603 ++++ b/modules/metadata/mod_mime_magic.c 2019/07/05 11:26:12 1862604 +@@ -606,7 +606,7 @@ + /* high overhead for 1 char - just hope they don't do this much */ + str[0] = c; + str[1] = '\0'; +- return magic_rsl_add(r, str); ++ return magic_rsl_add(r, apr_pstrdup(r->pool, str)); + } + + /* allocate and copy a contiguous string from a result string list */ diff --git a/SOURCES/httpd-2.4.6-rewrite-clientaddr.patch b/SOURCES/httpd-2.4.6-rewrite-clientaddr.patch new file mode 100644 index 0000000..e2bd079 --- /dev/null +++ b/SOURCES/httpd-2.4.6-rewrite-clientaddr.patch @@ -0,0 +1,14 @@ +--- a/modules/mappers/mod_rewrite.c 2014/01/28 19:40:17 1562174 ++++ b/modules/mappers/mod_rewrite.c 2014/02/10 18:54:23 1566702 +@@ -2139,7 +2139,10 @@ + break; + + case 16: +- if (!strcmp(var, "REQUEST_FILENAME")) { ++ if (*var == 'C' && !strcmp(var, "CONN_REMOTE_ADDR")) { ++ result = r->connection->client_ip; ++ } ++ else if (!strcmp(var, "REQUEST_FILENAME")) { + result = r->filename; /* same as script_filename (15) */ + } + break; diff --git a/SOURCES/httpd-2.4.6-rewrite-dir.patch b/SOURCES/httpd-2.4.6-rewrite-dir.patch new file mode 100644 index 0000000..c29f0a6 --- /dev/null +++ b/SOURCES/httpd-2.4.6-rewrite-dir.patch @@ -0,0 +1,59 @@ +Index: modules/mappers/mod_rewrite.h +=================================================================== +--- a/modules/mappers/mod_rewrite.h (revision 1560696) ++++ b/modules/mappers/mod_rewrite.h (revision 1560697) +@@ -29,6 +29,8 @@ + #include "apr_optional.h" + #include "httpd.h" + ++#define REWRITE_REDIRECT_HANDLER_NAME "redirect-handler" ++ + /* rewrite map function prototype */ + typedef char *(rewrite_mapfunc_t)(request_rec *r, char *key); + +Index: modules/mappers/mod_dir.c +=================================================================== +--- a/modules/mappers/mod_dir.c (revision 1560696) ++++ b/modules/mappers/mod_dir.c (revision 1560697) +@@ -29,6 +29,7 @@ + #include "http_log.h" + #include "http_main.h" + #include "util_script.h" ++#include "mod_rewrite.h" + + module AP_MODULE_DECLARE_DATA dir_module; + +@@ -260,6 +261,11 @@ + return HTTP_MOVED_PERMANENTLY; + } + ++ /* we're running between mod_rewrites fixup and its internal redirect handler, step aside */ ++ if (!strcmp(r->handler, REWRITE_REDIRECT_HANDLER_NAME)) { ++ return DECLINED; ++ } ++ + if (d->index_names) { + names_ptr = (char **)d->index_names->elts; + num_names = d->index_names->nelts; +Index: modules/mappers/mod_rewrite.c +=================================================================== +--- a/modules/mappers/mod_rewrite.c (revision 1560696) ++++ b/modules/mappers/mod_rewrite.c (revision 1560697) +@@ -5004,7 +5004,7 @@ + rewritelog((r, 1, dconf->directory, "internal redirect with %s " + "[INTERNAL REDIRECT]", r->filename)); + r->filename = apr_pstrcat(r->pool, "redirect:", r->filename, NULL); +- r->handler = "redirect-handler"; ++ r->handler = REWRITE_REDIRECT_HANDLER_NAME; + return OK; + } + } +@@ -5050,7 +5050,7 @@ + */ + static int handler_redirect(request_rec *r) + { +- if (strcmp(r->handler, "redirect-handler")) { ++ if (strcmp(r->handler, REWRITE_REDIRECT_HANDLER_NAME)) { + return DECLINED; + } + diff --git a/SOURCES/httpd-2.4.6-rotatelog-timezone.patch b/SOURCES/httpd-2.4.6-rotatelog-timezone.patch new file mode 100644 index 0000000..2ef4f11 --- /dev/null +++ b/SOURCES/httpd-2.4.6-rotatelog-timezone.patch @@ -0,0 +1,97 @@ +diff --git a/support/rotatelogs.c b/support/rotatelogs.c +index d75d018..216bb12 100644 +--- a/support/rotatelogs.c ++++ b/support/rotatelogs.c +@@ -178,14 +178,14 @@ static void usage(const char *argv0, const char *reason) + exit(1); + } + +-/* +- * Get the unix time with timezone corrections +- * given in the config struct. +- */ +-static int get_now(rotate_config_t *config) ++/* This function returns the current Unix time (time_t) plus any ++ * configured or derived local time offset. The offset applied is ++ * returned via *offset. */ ++static int get_now(rotate_config_t *config, apr_int32_t *offset) + { + apr_time_t tNow = apr_time_now(); +- int utc_offset = config->utc_offset; ++ int utc_offset; ++ + if (config->use_localtime) { + /* Check for our UTC offset before using it, since it might + * change if there's a switch between standard and daylight +@@ -195,6 +195,13 @@ static int get_now(rotate_config_t *config) + apr_time_exp_lt(<, tNow); + utc_offset = lt.tm_gmtoff; + } ++ else { ++ utc_offset = config->utc_offset; ++ } ++ ++ if (offset) ++ *offset = utc_offset; ++ + return (int)apr_time_sec(tNow) + utc_offset; + } + +@@ -258,13 +265,13 @@ static void checkRotate(rotate_config_t *config, rotate_status_t *status) + status->rotateReason = ROTATE_SIZE; + } + else if (config->tRotation) { +- if (get_now(config) >= status->tLogEnd) { ++ if (get_now(config, NULL) >= status->tLogEnd) { + status->rotateReason = ROTATE_TIME; + } + } + } + else if (config->tRotation) { +- if (get_now(config) >= status->tLogEnd) { ++ if (get_now(config, NULL) >= status->tLogEnd) { + status->rotateReason = ROTATE_TIME; + } + } +@@ -371,12 +378,16 @@ static void post_rotate(apr_pool_t *pool, struct logfile *newlog, + static void doRotate(rotate_config_t *config, rotate_status_t *status) + { + +- int now = get_now(config); ++ apr_int32_t offset; ++ int now; + int tLogStart; + apr_status_t rv; + struct logfile newlog; + int thisLogNum = -1; + ++ /* Retrieve local-time-adjusted-Unix-time. */ ++ now = get_now(config, &offset); ++ + status->rotateReason = ROTATE_NONE; + + if (config->tRotation) { +@@ -401,7 +412,13 @@ static void doRotate(rotate_config_t *config, rotate_status_t *status) + apr_time_exp_t e; + apr_size_t rs; + +- apr_time_exp_gmt(&e, tNow); ++ /* Explode the local-time-adjusted-Unix-time into a struct tm, ++ * first *reversing* local-time-adjustment applied by ++ * get_now() if we are using localtime. */ ++ if (config->use_localtime) ++ apr_time_exp_lt(&e, tNow - apr_time_from_sec(offset)); ++ else ++ apr_time_exp_gmt(&e, tNow); + apr_strftime(newlog.name, &rs, sizeof(newlog.name), config->szLogRoot, &e); + } + else { +@@ -648,7 +665,7 @@ int main (int argc, const char * const argv[]) + nRead = sizeof(buf); + #if APR_FILES_AS_SOCKETS + if (config.create_empty && config.tRotation) { +- polltimeout = status.tLogEnd ? status.tLogEnd - get_now(&config) : config.tRotation; ++ polltimeout = status.tLogEnd ? status.tLogEnd - get_now(&config, NULL) : config.tRotation; + if (polltimeout <= 0) { + pollret = APR_TIMEUP; + } diff --git a/SOURCES/httpd-2.4.6-rotatelogs-zombie.patch b/SOURCES/httpd-2.4.6-rotatelogs-zombie.patch new file mode 100644 index 0000000..04a2c33 --- /dev/null +++ b/SOURCES/httpd-2.4.6-rotatelogs-zombie.patch @@ -0,0 +1,23 @@ +diff --git a/support/rotatelogs.c b/support/rotatelogs.c +index 55c4406..f4c6490 100644 +--- a/support/rotatelogs.c ++++ b/support/rotatelogs.c +@@ -49,6 +49,7 @@ + #include "apr_time.h" + #include "apr_getopt.h" + #include "apr_thread_proc.h" ++#include "apr_signal.h" + #if APR_FILES_AS_SOCKETS + #include "apr_poll.h" + #endif +@@ -595,6 +596,10 @@ int main (int argc, const char * const argv[]) + break; + case 'p': + config.postrotate_prog = opt_arg; ++#ifdef SIGCHLD ++ /* Prevent creation of zombies (on modern Unix systems). */ ++ apr_signal(SIGCHLD, SIG_IGN); ++#endif + break; + case 'f': + config.force_open = 1; diff --git a/SOURCES/httpd-2.4.6-sigint.patch b/SOURCES/httpd-2.4.6-sigint.patch new file mode 100644 index 0000000..7574a9c --- /dev/null +++ b/SOURCES/httpd-2.4.6-sigint.patch @@ -0,0 +1,45 @@ +From 20656c3b77cc548b59fea3bde5e2b7705d71c427 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Jan=20Kalu=C5=BEa?= +Date: Mon, 18 Aug 2014 07:43:43 +0000 +Subject: [PATCH] prefork: Ignore SIGINT in child. This fixes race-condition in + signals handling when httpd is runnning on foreground and user hits ctrl+c. + In this case, SIGINT is sent to all children followed by SIGTERM from the + main process, which interrupts the SIGINT handler and leads to inconsistency + (process freezes or crashes). + +git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1618555 13f79535-47bb-0310-9956-ffa450edef68 +--- + server/mpm/prefork/prefork.c | 10 ++++++++++ + 1 file changed, 10 insertions(+) + +diff --git a/server/mpm/prefork/prefork.c b/server/mpm/prefork/prefork.c +index 8790ec0..d6c038b 100644 +--- a/server/mpm/prefork/prefork.c ++++ b/server/mpm/prefork/prefork.c +@@ -222,6 +222,9 @@ static void clean_child_exit(int code) + { + mpm_state = AP_MPMQ_STOPPING; + ++ apr_signal(SIGHUP, SIG_IGN); ++ apr_signal(SIGTERM, SIG_IGN); ++ + if (pchild) { + apr_pool_destroy(pchild); + } +@@ -817,6 +820,13 @@ static int make_child(server_rec *s, int slot) + */ + apr_signal(SIGHUP, just_die); + apr_signal(SIGTERM, just_die); ++ /* Ignore SIGINT in child. This fixes race-condition in signals ++ * handling when httpd is runnning on foreground and user hits ctrl+c. ++ * In this case, SIGINT is sent to all children followed by SIGTERM ++ * from the main process, which interrupts the SIGINT handler and ++ * leads to inconsistency. ++ */ ++ apr_signal(SIGINT, SIG_IGN); + /* The child process just closes listeners on AP_SIG_GRACEFUL. + * The pod is used for signalling the graceful restart. + */ +-- +2.0.4 + diff --git a/SOURCES/httpd-2.4.6-ssl-ecdh-auto.patch b/SOURCES/httpd-2.4.6-ssl-ecdh-auto.patch new file mode 100644 index 0000000..7cbb1ec --- /dev/null +++ b/SOURCES/httpd-2.4.6-ssl-ecdh-auto.patch @@ -0,0 +1,22 @@ +diff --git a/modules/ssl/ssl_engine_init.c b/modules/ssl/ssl_engine_init.c +index 0275452..8efdcd7 100644 +--- a/modules/ssl/ssl_engine_init.c ++++ b/modules/ssl/ssl_engine_init.c +@@ -1144,11 +1144,16 @@ static void ssl_init_server_certs(server_rec *s, + OBJ_nid2sn(nid), vhost_id, mctx->pks->cert_files[0]); + } + /* +- * ...otherwise, configure NIST P-256 (required to enable ECDHE) ++ * ...otherwise, enable auto curve selection (OpenSSL 1.0.2 and later) ++ * or configure NIST P-256 (required to enable ECDHE for earlier versions) + */ + else { ++#if defined(SSL_CTX_set_ecdh_auto) ++ SSL_CTX_set_ecdh_auto(mctx->ssl_ctx, 1); ++#else + SSL_CTX_set_tmp_ecdh(mctx->ssl_ctx, + EC_KEY_new_by_curve_name(NID_X9_62_prime256v1)); ++#endif + } + #endif + } diff --git a/SOURCES/httpd-2.4.6-ssl-error-free.patch b/SOURCES/httpd-2.4.6-ssl-error-free.patch new file mode 100644 index 0000000..6f268d2 --- /dev/null +++ b/SOURCES/httpd-2.4.6-ssl-error-free.patch @@ -0,0 +1,46 @@ +diff --git a/modules/ssl/mod_ssl.c b/modules/ssl/mod_ssl.c +index 19d3ec7..926e05e 100644 +--- a/modules/ssl/mod_ssl.c ++++ b/modules/ssl/mod_ssl.c +@@ -295,9 +295,12 @@ static apr_status_t ssl_cleanup_pre_config(void *data) + #endif + ERR_remove_state(0); + +- /* Don't call ERR_free_strings here; ERR_load_*_strings only +- * actually load the error strings once per process due to static ++ /* Don't call ERR_free_strings in earlier versions, ERR_load_*_strings only ++ * actually loaded the error strings once per process due to static + * variable abuse in OpenSSL. */ ++#if (OPENSSL_VERSION_NUMBER >= 0x00090805f) ++ ERR_free_strings(); ++#endif + + /* Also don't call CRYPTO_cleanup_all_ex_data here; any registered + * ex_data indices may have been cached in static variables in +diff --git a/modules/ssl/ssl_engine_init.c b/modules/ssl/ssl_engine_init.c +index 8425acb..508991e 100644 +--- a/modules/ssl/ssl_engine_init.c ++++ b/modules/ssl/ssl_engine_init.c +@@ -1065,7 +1065,7 @@ static void ssl_init_server_certs(server_rec *s, + const char *ecc_id; + EC_GROUP *ecparams; + int nid; +- EC_KEY *eckey; ++ EC_KEY *eckey = NULL; + #endif + const char *vhost_id = mctx->sc->vhost_id; + int i; +@@ -1151,10 +1151,11 @@ static void ssl_init_server_certs(server_rec *s, + #if defined(SSL_CTX_set_ecdh_auto) + SSL_CTX_set_ecdh_auto(mctx->ssl_ctx, 1); + #else +- SSL_CTX_set_tmp_ecdh(mctx->ssl_ctx, +- EC_KEY_new_by_curve_name(NID_X9_62_prime256v1)); ++ eckey = EC_KEY_new_by_curve_name(NID_X9_62_prime256v1); ++ SSL_CTX_set_tmp_ecdh(mctx->ssl_ctx, eckey); + #endif + } ++ EC_KEY_free(eckey); + #endif + } + diff --git a/SOURCES/httpd-2.4.6-ssl-large-keys.patch b/SOURCES/httpd-2.4.6-ssl-large-keys.patch new file mode 100644 index 0000000..46078ba --- /dev/null +++ b/SOURCES/httpd-2.4.6-ssl-large-keys.patch @@ -0,0 +1,173 @@ +diff --git a/modules/ssl/ssl_engine_init.c b/modules/ssl/ssl_engine_init.c +index 46769e9..0275452 100644 +--- a/modules/ssl/ssl_engine_init.c ++++ b/modules/ssl/ssl_engine_init.c +@@ -41,6 +41,79 @@ + #define KEYTYPES "RSA or DSA" + #endif + ++/* ++ * Grab well-defined DH parameters from OpenSSL, see the get_rfc* ++ * functions in for all available primes. ++ */ ++static DH *make_dh_params(BIGNUM *(*prime)(BIGNUM *), const char *gen) ++{ ++ DH *dh = DH_new(); ++ ++ if (!dh) { ++ return NULL; ++ } ++ dh->p = prime(NULL); ++ BN_dec2bn(&dh->g, gen); ++ if (!dh->p || !dh->g) { ++ DH_free(dh); ++ return NULL; ++ } ++ return dh; ++} ++ ++/* Storage and initialization for DH parameters. */ ++static struct dhparam { ++ BIGNUM *(*const prime)(BIGNUM *); /* function to generate... */ ++ DH *dh; /* ...this, used for keys.... */ ++ const unsigned int min; /* ...of length >= this. */ ++} dhparams[] = { ++ { get_rfc3526_prime_8192, NULL, 6145 }, ++ { get_rfc3526_prime_6144, NULL, 4097 }, ++ { get_rfc3526_prime_4096, NULL, 3073 }, ++ { get_rfc3526_prime_3072, NULL, 2049 }, ++ { get_rfc3526_prime_2048, NULL, 1025 }, ++ { get_rfc2409_prime_1024, NULL, 0 } ++}; ++ ++static void init_dh_params(void) ++{ ++ unsigned n; ++ ++ for (n = 0; n < sizeof(dhparams)/sizeof(dhparams[0]); n++) ++ dhparams[n].dh = make_dh_params(dhparams[n].prime, "2"); ++} ++ ++static void free_dh_params(void) ++{ ++ unsigned n; ++ ++ /* DH_free() is a noop for a NULL parameter, so these are harmless ++ * in the (unexpected) case where these variables are already ++ * NULL. */ ++ for (n = 0; n < sizeof(dhparams)/sizeof(dhparams[0]); n++) { ++ DH_free(dhparams[n].dh); ++ dhparams[n].dh = NULL; ++ } ++} ++ ++/* Hand out the same DH structure though once generated as we leak ++ * memory otherwise and freeing the structure up after use would be ++ * hard to track and in fact is not needed at all as it is safe to ++ * use the same parameters over and over again security wise (in ++ * contrast to the keys itself) and code safe as the returned structure ++ * is duplicated by OpenSSL anyway. Hence no modification happens ++ * to our copy. */ ++DH *modssl_get_dh_params(unsigned keylen) ++{ ++ unsigned n; ++ ++ for (n = 0; n < sizeof(dhparams)/sizeof(dhparams[0]); n++) ++ if (keylen >= dhparams[n].min) ++ return dhparams[n].dh; ++ ++ return NULL; /* impossible to reach. */ ++} ++ + static void ssl_add_version_components(apr_pool_t *p, + server_rec *s) + { +@@ -244,6 +317,8 @@ int ssl_init_Module(apr_pool_t *p, apr_pool_t *plog, + + SSL_init_app_data2_idx(); /* for SSL_get_app_data2() at request time */ + ++ init_dh_params(); ++ + return OK; + } + +@@ -1623,6 +1698,8 @@ apr_status_t ssl_init_ModuleKill(void *data) + ssl_init_ctx_cleanup_server(sc->server); + } + ++ free_dh_params(); ++ + return APR_SUCCESS; + } + +diff --git a/modules/ssl/ssl_engine_kernel.c b/modules/ssl/ssl_engine_kernel.c +index 2d6d59e..1ecbccd 100644 +--- a/modules/ssl/ssl_engine_kernel.c ++++ b/modules/ssl/ssl_engine_kernel.c +@@ -1287,34 +1287,6 @@ const authz_provider ssl_authz_provider_verify_client = + */ + + /* +- * Grab well-defined DH parameters from OpenSSL, see +- * (get_rfc*) for all available primes. +- */ +-#define make_get_dh(rfc,size,gen) \ +-static DH *get_dh##size(void) \ +-{ \ +- DH *dh; \ +- if (!(dh = DH_new())) { \ +- return NULL; \ +- } \ +- dh->p = get_##rfc##_prime_##size(NULL); \ +- BN_dec2bn(&dh->g, #gen); \ +- if (!dh->p || !dh->g) { \ +- DH_free(dh); \ +- return NULL; \ +- } \ +- return dh; \ +-} +- +-/* +- * Prepare DH parameters from 1024 to 4096 bits, in 1024-bit increments +- */ +-make_get_dh(rfc2409, 1024, 2) +-make_get_dh(rfc3526, 2048, 2) +-make_get_dh(rfc3526, 3072, 2) +-make_get_dh(rfc3526, 4096, 2) +- +-/* + * Hand out standard DH parameters, based on the authentication strength + */ + DH *ssl_callback_TmpDH(SSL *ssl, int export, int keylen) +@@ -1342,14 +1314,7 @@ DH *ssl_callback_TmpDH(SSL *ssl, int export, int keylen) + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, + "handing out built-in DH parameters for %d-bit authenticated connection", keylen); + +- if (keylen >= 4096) +- return get_dh4096(); +- else if (keylen >= 3072) +- return get_dh3072(); +- else if (keylen >= 2048) +- return get_dh2048(); +- else +- return get_dh1024(); ++ return modssl_get_dh_params(keylen); + } + + /* +diff --git a/modules/ssl/ssl_private.h b/modules/ssl/ssl_private.h +index 744af9e..f47ed47 100644 +--- a/modules/ssl/ssl_private.h ++++ b/modules/ssl/ssl_private.h +@@ -990,6 +990,11 @@ OCSP_RESPONSE *modssl_dispatch_ocsp_request(const apr_uri_t *uri, + conn_rec *c, apr_pool_t *p); + #endif + ++/* Retrieve DH parameters for given key length. Return value should ++ * be treated as unmutable, since it is stored in process-global ++ * memory. */ ++DH *modssl_get_dh_params(unsigned keylen); ++ + #endif /* SSL_PRIVATE_H */ + /** @} */ + diff --git a/SOURCES/httpd-2.4.6-sslmultiproxy.patch b/SOURCES/httpd-2.4.6-sslmultiproxy.patch new file mode 100644 index 0000000..f8a3b4b --- /dev/null +++ b/SOURCES/httpd-2.4.6-sslmultiproxy.patch @@ -0,0 +1,94 @@ + +Ugly hack to enable mod_ssl and mod_nss to "share" hooks. + +--- httpd-2.4.6/modules/ssl/mod_ssl.c.sslmultiproxy ++++ httpd-2.4.6/modules/ssl/mod_ssl.c +@@ -369,6 +369,9 @@ static SSLConnRec *ssl_init_connection_c + return sslconn; + } + ++static typeof(ssl_proxy_enable) *othermod_proxy_enable; ++static typeof(ssl_engine_disable) *othermod_engine_disable; ++ + int ssl_proxy_enable(conn_rec *c) + { + SSLSrvConfigRec *sc; +@@ -377,6 +380,12 @@ int ssl_proxy_enable(conn_rec *c) + sc = mySrvConfig(sslconn->server); + + if (!sc->proxy_enabled) { ++ if (othermod_proxy_enable) { ++ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, ++ "mod_ssl proxy not configured, passing through to other module."); ++ return othermod_proxy_enable(c); ++ } ++ + ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c, APLOGNO(01961) + "SSL Proxy requested for %s but not enabled " + "[Hint: SSLProxyEngine]", sc->vhost_id); +@@ -396,6 +405,10 @@ int ssl_engine_disable(conn_rec *c) + + SSLConnRec *sslconn = myConnConfig(c); + ++ if (othermod_engine_disable) { ++ othermod_engine_disable(c); ++ } ++ + if (sslconn) { + sc = mySrvConfig(sslconn->server); + } +@@ -612,6 +625,9 @@ static void ssl_register_hooks(apr_pool_ + ap_hook_post_read_request(ssl_hook_ReadReq, pre_prr,NULL, APR_HOOK_MIDDLE); + + ssl_var_register(p); ++ ++ othermod_proxy_enable = APR_RETRIEVE_OPTIONAL_FN(ssl_proxy_enable); ++ othermod_engine_disable = APR_RETRIEVE_OPTIONAL_FN(ssl_engine_disable); + + APR_REGISTER_OPTIONAL_FN(ssl_proxy_enable); + APR_REGISTER_OPTIONAL_FN(ssl_engine_disable); +--- httpd-2.4.6/modules/ssl/ssl_engine_vars.c.sslmultiproxy ++++ httpd-2.4.6/modules/ssl/ssl_engine_vars.c +@@ -53,10 +53,15 @@ static void ssl_var_lookup_ssl_cipher_b + static char *ssl_var_lookup_ssl_version(apr_pool_t *p, char *var); + static char *ssl_var_lookup_ssl_compress_meth(SSL *ssl); + ++static APR_OPTIONAL_FN_TYPE(ssl_is_https) *othermod_is_https; ++static APR_OPTIONAL_FN_TYPE(ssl_var_lookup) *othermod_var_lookup; ++ + static int ssl_is_https(conn_rec *c) + { + SSLConnRec *sslconn = myConnConfig(c); +- return sslconn && sslconn->ssl; ++ ++ return (sslconn && sslconn->ssl) ++ || (othermod_is_https && othermod_is_https(c)); + } + + static const char var_interface[] = "mod_ssl/" AP_SERVER_BASEREVISION; +@@ -106,6 +111,9 @@ void ssl_var_register(apr_pool_t *p) + { + char *cp, *cp2; + ++ othermod_is_https = APR_RETRIEVE_OPTIONAL_FN(ssl_is_https); ++ othermod_var_lookup = APR_RETRIEVE_OPTIONAL_FN(ssl_var_lookup); ++ + APR_REGISTER_OPTIONAL_FN(ssl_is_https); + APR_REGISTER_OPTIONAL_FN(ssl_var_lookup); + APR_REGISTER_OPTIONAL_FN(ssl_ext_list); +@@ -241,6 +249,15 @@ char *ssl_var_lookup(apr_pool_t *p, serv + */ + if (result == NULL && c != NULL) { + SSLConnRec *sslconn = myConnConfig(c); ++ ++ if (strlen(var) > 4 && strcEQn(var, "SSL_", 4) ++ && (!sslconn || !sslconn->ssl) && othermod_var_lookup) { ++ /* For an SSL_* variable, if mod_ssl is not enabled for ++ * this connection and another SSL module is present, pass ++ * through to that module. */ ++ return othermod_var_lookup(p, s, c, r, var); ++ } ++ + if (strlen(var) > 4 && strcEQn(var, "SSL_", 4) + && sslconn && sslconn->ssl) + result = ssl_var_lookup_ssl(p, c, r, var+4); diff --git a/SOURCES/httpd-2.4.6-statements-comment.patch b/SOURCES/httpd-2.4.6-statements-comment.patch new file mode 100644 index 0000000..fd56e11 --- /dev/null +++ b/SOURCES/httpd-2.4.6-statements-comment.patch @@ -0,0 +1,16 @@ +diff --git a/modules/aaa/mod_access_compat.c b/modules/aaa/mod_access_compat.c +index 46d8da0..0a5d5a1 100644 +--- a/modules/aaa/mod_access_compat.c ++++ b/modules/aaa/mod_access_compat.c +@@ -152,6 +152,11 @@ static const char *allow_cmd(cmd_parms *cmd, void *dv, const char *from, + if (strcasecmp(from, "from")) + return "allow and deny must be followed by 'from'"; + ++ s = ap_strchr(where, '#'); ++ if (s) { ++ *s = '\0'; ++ } ++ + a = (allowdeny *) apr_array_push(cmd->info ? d->allows : d->denys); + a->x.from = where; + a->limited = cmd->limited; diff --git a/SOURCES/httpd-2.4.6-uds.patch b/SOURCES/httpd-2.4.6-uds.patch new file mode 100644 index 0000000..8a00b5a --- /dev/null +++ b/SOURCES/httpd-2.4.6-uds.patch @@ -0,0 +1,1456 @@ +diff --git a/modules/mappers/mod_rewrite.c b/modules/mappers/mod_rewrite.c +index 6061e53..75c2a35 100644 +--- a/modules/mappers/mod_rewrite.c ++++ b/modules/mappers/mod_rewrite.c +@@ -4120,6 +4120,7 @@ static int apply_rewrite_rule(rewriterule_entry *p, rewrite_ctx *ctx) + r->filename)); + + r->filename = apr_pstrcat(r->pool, "proxy:", r->filename, NULL); ++ apr_table_setn(r->notes, "rewrite-proxy", "1"); + return 1; + } + +diff --git a/modules/proxy/mod_proxy.c b/modules/proxy/mod_proxy.c +index b9d71fa..7f96aff 100644 +--- a/modules/proxy/mod_proxy.c ++++ b/modules/proxy/mod_proxy.c +@@ -1348,7 +1348,6 @@ static void *merge_proxy_dir_config(apr_pool_t *p, void *basev, void *addv) + return new; + } + +- + static const char * + add_proxy(cmd_parms *cmd, void *dummy, const char *f1, const char *r1, int regex) + { +@@ -1423,6 +1422,36 @@ static const char * + return add_proxy(cmd, dummy, f1, r1, 1); + } + ++static char *de_socketfy(apr_pool_t *p, char *url) ++{ ++ char *ptr; ++ /* ++ * We could be passed a URL during the config stage that contains ++ * the UDS path... ignore it ++ */ ++ if (!strncasecmp(url, "unix:", 5) && ++ ((ptr = ap_strchr(url, '|')) != NULL)) { ++ /* move past the 'unix:...|' UDS path info */ ++ char *ret, *c; ++ ++ ret = ptr + 1; ++ /* special case: "unix:....|scheme:" is OK, expand ++ * to "unix:....|scheme://localhost" ++ * */ ++ c = ap_strchr(ret, ':'); ++ if (c == NULL) { ++ return NULL; ++ } ++ if (c[1] == '\0') { ++ return apr_pstrcat(p, ret, "//localhost", NULL); ++ } ++ else { ++ return ret; ++ } ++ } ++ return url; ++} ++ + static const char * + add_pass(cmd_parms *cmd, void *dummy, const char *arg, int is_regex) + { +@@ -1514,7 +1543,7 @@ static const char * + } + + new->fake = apr_pstrdup(cmd->pool, f); +- new->real = apr_pstrdup(cmd->pool, r); ++ new->real = apr_pstrdup(cmd->pool, de_socketfy(cmd->pool, r)); + new->flags = flags; + if (use_regex) { + new->regex = ap_pregcomp(cmd->pool, f, AP_REG_EXTENDED); +@@ -1550,26 +1579,41 @@ static const char * + new->balancer = balancer; + } + else { +- proxy_worker *worker = ap_proxy_get_worker(cmd->temp_pool, NULL, conf, r); ++ proxy_worker *worker = ap_proxy_get_worker(cmd->temp_pool, NULL, conf, new->real); + int reuse = 0; + if (!worker) { +- const char *err = ap_proxy_define_worker(cmd->pool, &worker, NULL, conf, r, 0); ++ const char *err; ++ if (use_regex) { ++ err = ap_proxy_define_match_worker(cmd->pool, &worker, NULL, ++ conf, r, 0); ++ } ++ else { ++ err = ap_proxy_define_worker(cmd->pool, &worker, NULL, ++ conf, r, 0); ++ } + if (err) + return apr_pstrcat(cmd->temp_pool, "ProxyPass ", err, NULL); + + PROXY_COPY_CONF_PARAMS(worker, conf); +- } else { ++ } ++ else if ((use_regex != 0) ^ (worker->s->is_name_matchable)) { ++ return apr_pstrcat(cmd->temp_pool, "ProxyPass/ and " ++ "ProxyPassMatch/ can't be used " ++ "altogether with the same worker name ", ++ "(", worker->s->name, ")", NULL); ++ } ++ else { + reuse = 1; + ap_log_error(APLOG_MARK, APLOG_INFO, 0, cmd->server, APLOGNO(01145) + "Sharing worker '%s' instead of creating new worker '%s'", +- worker->s->name, new->real); ++ ap_proxy_worker_name(cmd->pool, worker), new->real); + } + + for (i = 0; i < arr->nelts; i++) { + if (reuse) { + ap_log_error(APLOG_MARK, APLOG_WARNING, 0, cmd->server, APLOGNO(01146) + "Ignoring parameter '%s=%s' for worker '%s' because of worker sharing", +- elts[i].key, elts[i].val, worker->s->name); ++ elts[i].key, elts[i].val, ap_proxy_worker_name(cmd->pool, worker)); + } else { + const char *err = set_worker_param(cmd->pool, worker, elts[i].key, + elts[i].val); +@@ -2026,7 +2070,7 @@ static const char *add_member(cmd_parms *cmd, void *dummy, const char *arg) + } + + /* Try to find existing worker */ +- worker = ap_proxy_get_worker(cmd->temp_pool, balancer, conf, name); ++ worker = ap_proxy_get_worker(cmd->temp_pool, balancer, conf, de_socketfy(cmd->temp_pool, name)); + if (!worker) { + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, cmd->server, APLOGNO(01147) + "Defining worker '%s' for balancer '%s'", +@@ -2035,13 +2079,13 @@ static const char *add_member(cmd_parms *cmd, void *dummy, const char *arg) + return apr_pstrcat(cmd->temp_pool, "BalancerMember ", err, NULL); + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, cmd->server, APLOGNO(01148) + "Defined worker '%s' for balancer '%s'", +- worker->s->name, balancer->s->name); ++ ap_proxy_worker_name(cmd->pool, worker), balancer->s->name); + PROXY_COPY_CONF_PARAMS(worker, conf); + } else { + reuse = 1; + ap_log_error(APLOG_MARK, APLOG_INFO, 0, cmd->server, APLOGNO(01149) + "Sharing worker '%s' instead of creating new worker '%s'", +- worker->s->name, name); ++ ap_proxy_worker_name(cmd->pool, worker), name); + } + + arr = apr_table_elts(params); +@@ -2050,7 +2094,7 @@ static const char *add_member(cmd_parms *cmd, void *dummy, const char *arg) + if (reuse) { + ap_log_error(APLOG_MARK, APLOG_WARNING, 0, cmd->server, APLOGNO(01150) + "Ignoring parameter '%s=%s' for worker '%s' because of worker sharing", +- elts[i].key, elts[i].val, worker->s->name); ++ elts[i].key, elts[i].val, ap_proxy_worker_name(cmd->pool, worker)); + } else { + err = set_worker_param(cmd->pool, worker, elts[i].key, + elts[i].val); +@@ -2112,7 +2156,7 @@ static const char * + } + } + else { +- worker = ap_proxy_get_worker(cmd->temp_pool, NULL, conf, name); ++ worker = ap_proxy_get_worker(cmd->temp_pool, NULL, conf, de_socketfy(cmd->temp_pool, name)); + if (!worker) { + if (in_proxy_section) { + err = ap_proxy_define_worker(cmd->pool, &worker, NULL, +@@ -2170,6 +2214,7 @@ static const char *proxysection(cmd_parms *cmd, void *mconfig, const char *arg) + char *word, *val; + proxy_balancer *balancer = NULL; + proxy_worker *worker = NULL; ++ int use_regex = 0; + + const char *err = ap_check_cmd_context(cmd, NOT_IN_DIR_LOC_FILE); + proxy_server_conf *sconf = +@@ -2219,6 +2264,7 @@ static const char *proxysection(cmd_parms *cmd, void *mconfig, const char *arg) + if (!r) { + return "Regex could not be compiled"; + } ++ use_regex = 1; + } + + /* initialize our config and fetch it */ +@@ -2258,14 +2304,26 @@ static const char *proxysection(cmd_parms *cmd, void *mconfig, const char *arg) + } + else { + worker = ap_proxy_get_worker(cmd->temp_pool, NULL, sconf, +- conf->p); ++ de_socketfy(cmd->temp_pool, (char*)conf->p)); + if (!worker) { +- err = ap_proxy_define_worker(cmd->pool, &worker, NULL, +- sconf, conf->p, 0); ++ if (use_regex) { ++ err = ap_proxy_define_match_worker(cmd->pool, &worker, NULL, ++ sconf, conf->p, 0); ++ } ++ else { ++ err = ap_proxy_define_worker(cmd->pool, &worker, NULL, ++ sconf, conf->p, 0); ++ } + if (err) + return apr_pstrcat(cmd->temp_pool, thiscmd->name, + " ", err, NULL); + } ++ else if ((use_regex != 0) ^ (worker->s->is_name_matchable)) { ++ return apr_pstrcat(cmd->temp_pool, "ProxyPass/ and " ++ "ProxyPassMatch/ can't be used " ++ "altogether with the same worker name ", ++ "(", worker->s->name, ")", NULL); ++ } + } + if (worker == NULL && balancer == NULL) { + return apr_pstrcat(cmd->pool, thiscmd->name, +@@ -2570,6 +2628,8 @@ static void child_init(apr_pool_t *p, server_rec *s) + ap_proxy_hashfunc(conf->forward->s->name, PROXY_HASHFUNC_FNV); + /* Do not disable worker in case of errors */ + conf->forward->s->status |= PROXY_WORKER_IGNORE_ERRORS; ++ /* Mark as the "generic" worker */ ++ conf->forward->s->status |= PROXY_WORKER_GENERIC; + ap_proxy_initialize_worker(conf->forward, s, conf->pool); + /* Disable address cache for generic forward worker */ + conf->forward->s->is_address_reusable = 0; +@@ -2585,6 +2645,8 @@ static void child_init(apr_pool_t *p, server_rec *s) + ap_proxy_hashfunc(reverse->s->name, PROXY_HASHFUNC_FNV); + /* Do not disable worker in case of errors */ + reverse->s->status |= PROXY_WORKER_IGNORE_ERRORS; ++ /* Mark as the "generic" worker */ ++ reverse->s->status |= PROXY_WORKER_GENERIC; + conf->reverse = reverse; + ap_proxy_initialize_worker(conf->reverse, s, conf->pool); + /* Disable address cache for generic reverse worker */ +diff --git a/modules/proxy/mod_proxy.h b/modules/proxy/mod_proxy.h +index 81fd14c..4fb21c7 100644 +--- a/modules/proxy/mod_proxy.h ++++ b/modules/proxy/mod_proxy.h +@@ -249,6 +249,7 @@ typedef struct { + unsigned int need_flush:1; /* Flag to decide whether we need to flush the + * filter chain or not */ + unsigned int inreslist:1; /* connection in apr_reslist? */ ++ const char *uds_path; /* Unix domain socket path */ + } proxy_conn_rec; + + typedef struct { +@@ -269,6 +270,7 @@ struct proxy_conn_pool { + #define PROXY_WORKER_INITIALIZED 0x0001 + #define PROXY_WORKER_IGNORE_ERRORS 0x0002 + #define PROXY_WORKER_DRAIN 0x0004 ++#define PROXY_WORKER_GENERIC 0x0008 + #define PROXY_WORKER_IN_SHUTDOWN 0x0010 + #define PROXY_WORKER_DISABLED 0x0020 + #define PROXY_WORKER_STOPPED 0x0040 +@@ -280,6 +282,7 @@ struct proxy_conn_pool { + #define PROXY_WORKER_INITIALIZED_FLAG 'O' + #define PROXY_WORKER_IGNORE_ERRORS_FLAG 'I' + #define PROXY_WORKER_DRAIN_FLAG 'N' ++#define PROXY_WORKER_GENERIC_FLAG 'G' + #define PROXY_WORKER_IN_SHUTDOWN_FLAG 'U' + #define PROXY_WORKER_DISABLED_FLAG 'D' + #define PROXY_WORKER_STOPPED_FLAG 'S' +@@ -300,6 +303,8 @@ PROXY_WORKER_DISABLED | PROXY_WORKER_STOPPED | PROXY_WORKER_IN_ERROR ) + + #define PROXY_WORKER_IS_DRAINING(f) ( (f)->s->status & PROXY_WORKER_DRAIN ) + ++#define PROXY_WORKER_IS_GENERIC(f) ( (f)->s->status & PROXY_WORKER_GENERIC ) ++ + /* default worker retry timeout in seconds */ + #define PROXY_WORKER_DEFAULT_RETRY 60 + +@@ -341,6 +346,7 @@ typedef struct { + char route[PROXY_WORKER_MAX_ROUTE_SIZE]; /* balancing route */ + char redirect[PROXY_WORKER_MAX_ROUTE_SIZE]; /* temporary balancing redirection route */ + char flusher[PROXY_WORKER_MAX_SCHEME_SIZE]; /* flush provider used by mod_proxy_fdpass */ ++ char uds_path[PROXY_WORKER_MAX_NAME_SIZE]; /* path to worker's unix domain socket if applicable */ + int lbset; /* load balancer cluster set */ + int retries; /* number of retries on this worker */ + int lbstatus; /* Current lbstatus */ +@@ -387,6 +393,7 @@ typedef struct { + unsigned int keepalive_set:1; + unsigned int disablereuse_set:1; + unsigned int was_malloced:1; ++ unsigned int is_name_matchable:1; + } proxy_worker_shared; + + #define ALIGNED_PROXY_WORKER_SHARED_SIZE (APR_ALIGN_DEFAULT(sizeof(proxy_worker_shared))) +@@ -586,6 +593,16 @@ typedef __declspec(dllimport) const char * + + /* Connection pool API */ + /** ++ * Return the user-land, UDS aware worker name ++ * @param p memory pool used for displaying worker name ++ * @param worker the worker ++ * @return name ++ */ ++ ++PROXY_DECLARE(char *) ap_proxy_worker_name(apr_pool_t *p, ++ proxy_worker *worker); ++ ++/** + * Get the worker from proxy configuration + * @param p memory pool used for finding worker + * @param balancer the balancer that the worker belongs to +@@ -615,6 +632,24 @@ PROXY_DECLARE(char *) ap_proxy_define_worker(apr_pool_t *p, + int do_malloc); + + /** ++ * Define and Allocate space for the ap_strcmp_match()able worker to proxy ++ * configuration. ++ * @param p memory pool to allocate worker from ++ * @param worker the new worker ++ * @param balancer the balancer that the worker belongs to ++ * @param conf current proxy server configuration ++ * @param url url containing worker name (produces match pattern) ++ * @param do_malloc true if shared struct should be malloced ++ * @return error message or NULL if successful (*worker is new worker) ++ */ ++PROXY_DECLARE(char *) ap_proxy_define_match_worker(apr_pool_t *p, ++ proxy_worker **worker, ++ proxy_balancer *balancer, ++ proxy_server_conf *conf, ++ const char *url, ++ int do_malloc); ++ ++/** + * Share a defined proxy worker via shm + * @param worker worker to be shared + * @param shm location of shared info +@@ -983,6 +1018,13 @@ APR_DECLARE_OPTIONAL_FN(int, ap_proxy_clear_connection, + */ + int ap_proxy_lb_workers(void); + ++/** ++ * Return the port number of a known scheme (eg: http -> 80). ++ * @param scheme scheme to test ++ * @return port number or 0 if unknown ++ */ ++PROXY_DECLARE(apr_port_t) ap_proxy_port_of_scheme(const char *scheme); ++ + extern module PROXY_DECLARE_DATA proxy_module; + + #endif /*MOD_PROXY_H*/ +diff --git a/modules/proxy/mod_proxy_ajp.c b/modules/proxy/mod_proxy_ajp.c +index 3736156..cf52a7d 100644 +--- a/modules/proxy/mod_proxy_ajp.c ++++ b/modules/proxy/mod_proxy_ajp.c +@@ -32,7 +32,7 @@ static int proxy_ajp_canon(request_rec *r, char *url) + char *host, *path, sport[7]; + char *search = NULL; + const char *err; +- apr_port_t port = AJP13_DEF_PORT; ++ apr_port_t port, def_port; + + /* ap_port_of_scheme() */ + if (strncasecmp(url, "ajp:", 4) == 0) { +@@ -48,6 +48,8 @@ static int proxy_ajp_canon(request_rec *r, char *url) + * do syntactic check. + * We break the URL into host, port, path, search + */ ++ port = def_port = ap_proxy_port_of_scheme("ajp"); ++ + err = ap_proxy_canon_netloc(r->pool, &url, NULL, NULL, &host, &port); + if (err) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00867) "error parsing URL %s: %s", +@@ -71,7 +73,10 @@ static int proxy_ajp_canon(request_rec *r, char *url) + if (path == NULL) + return HTTP_BAD_REQUEST; + +- apr_snprintf(sport, sizeof(sport), ":%d", port); ++ if (port != def_port) ++ apr_snprintf(sport, sizeof(sport), ":%d", port); ++ else ++ sport[0] = '\0'; + + if (ap_strchr_c(host, ':')) { + /* if literal IPv6 address */ +diff --git a/modules/proxy/mod_proxy_balancer.c b/modules/proxy/mod_proxy_balancer.c +index 0f45be7..514b8d8 100644 +--- a/modules/proxy/mod_proxy_balancer.c ++++ b/modules/proxy/mod_proxy_balancer.c +@@ -118,7 +118,8 @@ static void init_balancer_members(apr_pool_t *p, server_rec *s, + int worker_is_initialized; + proxy_worker *worker = *workers; + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(01158) +- "Looking at %s -> %s initialized?", balancer->s->name, worker->s->name); ++ "Looking at %s -> %s initialized?", balancer->s->name, ++ ap_proxy_worker_name(p, worker)); + worker_is_initialized = PROXY_WORKER_IS_INITIALIZED(worker); + if (!worker_is_initialized) { + ap_proxy_initialize_worker(worker, s, p); +@@ -638,10 +639,11 @@ static int proxy_balancer_post_request(proxy_worker *worker, + int val = ((int *)balancer->errstatuses->elts)[i]; + if (r->status == val) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01174) +- "%s: Forcing worker (%s) into error state " ++ "%s: Forcing worker (%s) into error state " + "due to status code %d matching 'failonstatus' " + "balancer parameter", +- balancer->s->name, worker->s->name, val); ++ balancer->s->name, ap_proxy_worker_name(r->pool, worker), ++ val); + worker->s->status |= PROXY_WORKER_IN_ERROR; + worker->s->error_time = apr_time_now(); + break; +@@ -654,7 +656,7 @@ static int proxy_balancer_post_request(proxy_worker *worker, + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02460) + "%s: Forcing worker (%s) into error state " + "due to timeout and 'failonstatus' parameter being set", +- balancer->s->name, worker->s->name); ++ balancer->s->name, ap_proxy_worker_name(r->pool, worker)); + worker->s->status |= PROXY_WORKER_IN_ERROR; + worker->s->error_time = apr_time_now(); + +@@ -1282,7 +1284,7 @@ static int balancer_handler(request_rec *r) + worker = *workers; + /* Start proxy_worker */ + ap_rputs(" \n", r); +- ap_rvputs(r, " ", worker->s->name, ++ ap_rvputs(r, " ", ap_proxy_worker_name(r->pool, worker), + "\n", NULL); + ap_rvputs(r, " ", worker->s->scheme, + "\n", NULL); +@@ -1524,7 +1526,8 @@ static int balancer_handler(request_rec *r) + ap_escape_uri(r->pool, worker->s->name), + "&nonce=", balancer->s->nonce, + "\">", NULL); +- ap_rvputs(r, worker->s->name, "", NULL); ++ ap_rvputs(r, (*worker->s->uds_path ? "" : ""), ap_proxy_worker_name(r->pool, worker), ++ (*worker->s->uds_path ? "" : ""), "", NULL); + ap_rvputs(r, "

    ", ap_escape_html(r->pool, worker->s->route), + NULL); + ap_rvputs(r, "", +@@ -1549,7 +1552,7 @@ static int balancer_handler(request_rec *r) + ap_rputs("
    \n", r); + if (wsel && bsel) { + ap_rputs("

    Edit worker settings for ", r); +- ap_rvputs(r, wsel->s->name, "

    \n", NULL); ++ ap_rvputs(r, (*wsel->s->uds_path?"":""), ap_proxy_worker_name(r->pool, wsel), (*wsel->s->uds_path?"":""), "\n", NULL); + ap_rputs("
    pool, action), "\">\n", NULL); + ap_rputs("
    \n
    Load factor:pool, &url, NULL, NULL, &host, &port); + if (err) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01059) +@@ -96,7 +97,10 @@ static int proxy_fcgi_canon(request_rec *r, char *url) + return HTTP_BAD_REQUEST; + } + +- apr_snprintf(sport, sizeof(sport), ":%d", port); ++ if (port != def_port) ++ apr_snprintf(sport, sizeof(sport), ":%d", port); ++ else ++ sport[0] = '\0'; + + if (ap_strchr_c(host, ':')) { + /* if literal IPv6 address */ +@@ -930,7 +934,7 @@ static int proxy_fcgi_handler(request_rec *r, proxy_worker *worker, + int status; + char server_portstr[32]; + conn_rec *origin = NULL; +- proxy_conn_rec *backend = NULL; ++ proxy_conn_rec *backend; + + proxy_dir_conf *dconf = ap_get_module_config(r->per_dir_config, + &proxy_module); +@@ -943,10 +947,7 @@ static int proxy_fcgi_handler(request_rec *r, proxy_worker *worker, + "url: %s proxyname: %s proxyport: %d", + url, proxyname, proxyport); + +- if (strncasecmp(url, "fcgi:", 5) == 0) { +- url += 5; +- } +- else { ++ if (strncasecmp(url, "fcgi:", 5) != 0) { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01077) "declining URL %s", url); + return DECLINED; + } +@@ -954,16 +955,14 @@ static int proxy_fcgi_handler(request_rec *r, proxy_worker *worker, + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01078) "serving URL %s", url); + + /* Create space for state information */ +- if (! backend) { +- status = ap_proxy_acquire_connection(FCGI_SCHEME, &backend, worker, +- r->server); +- if (status != OK) { +- if (backend) { +- backend->close = 1; +- ap_proxy_release_connection(FCGI_SCHEME, backend, r->server); +- } +- return status; ++ status = ap_proxy_acquire_connection(FCGI_SCHEME, &backend, worker, ++ r->server); ++ if (status != OK) { ++ if (backend) { ++ backend->close = 1; ++ ap_proxy_release_connection(FCGI_SCHEME, backend, r->server); + } ++ return status; + } + + backend->is_ssl = 0; +diff --git a/modules/proxy/mod_proxy_http.c b/modules/proxy/mod_proxy_http.c +index 05f33b4..f0bb0ed 100644 +--- a/modules/proxy/mod_proxy_http.c ++++ b/modules/proxy/mod_proxy_http.c +@@ -54,7 +54,7 @@ static int proxy_http_canon(request_rec *r, char *url) + else { + return DECLINED; + } +- def_port = apr_uri_port_of_scheme(scheme); ++ port = def_port = ap_proxy_port_of_scheme(scheme); + + ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, + "HTTP: canonicalising URL %s", url); +@@ -62,7 +62,6 @@ static int proxy_http_canon(request_rec *r, char *url) + /* do syntatic check. + * We break the URL into host, port, path, search + */ +- port = def_port; + err = ap_proxy_canon_netloc(r->pool, &url, NULL, NULL, &host, &port); + if (err) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01083) +diff --git a/modules/proxy/mod_proxy_scgi.c b/modules/proxy/mod_proxy_scgi.c +index f77a986..6deae78 100644 +--- a/modules/proxy/mod_proxy_scgi.c ++++ b/modules/proxy/mod_proxy_scgi.c +@@ -176,13 +176,15 @@ static int scgi_canon(request_rec *r, char *url) + { + char *host, sport[sizeof(":65535")]; + const char *err, *path; +- apr_port_t port = SCGI_DEFAULT_PORT; ++ apr_port_t port, def_port; + + if (strncasecmp(url, SCHEME "://", sizeof(SCHEME) + 2)) { + return DECLINED; + } + url += sizeof(SCHEME); /* Keep slashes */ + ++ port = def_port = SCGI_DEFAULT_PORT; ++ + err = ap_proxy_canon_netloc(r->pool, &url, NULL, NULL, &host, &port); + if (err) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00857) +@@ -190,7 +192,12 @@ static int scgi_canon(request_rec *r, char *url) + return HTTP_BAD_REQUEST; + } + +- apr_snprintf(sport, sizeof(sport), ":%u", port); ++ if (port != def_port) { ++ apr_snprintf(sport, sizeof(sport), ":%u", port); ++ } ++ else { ++ sport[0] = '\0'; ++ } + + if (ap_strchr(host, ':')) { /* if literal IPv6 address */ + host = apr_pstrcat(r->pool, "[", host, "]", NULL); +diff --git a/modules/proxy/proxy_util.c b/modules/proxy/proxy_util.c +index 8bc9fab..dea2b10 100644 +--- a/modules/proxy/proxy_util.c ++++ b/modules/proxy/proxy_util.c +@@ -21,6 +21,7 @@ + #include "apr_version.h" + #include "apr_hash.h" + #include "proxy_util.h" ++#include "ajp.h" + + #if APR_HAVE_UNISTD_H + #include /* for getpid() */ +@@ -31,6 +32,13 @@ + #define apr_socket_create apr_socket_create_ex + #endif + ++#if APR_HAVE_SYS_UN_H ++#include ++#endif ++#if (APR_MAJOR_VERSION < 2) ++#include "apr_support.h" /* for apr_wait_for_io_or_timeout() */ ++#endif ++ + APLOG_USE_MODULE(proxy); + + /* +@@ -86,14 +94,20 @@ PROXY_DECLARE(apr_status_t) ap_proxy_strncpy(char *dst, const char *src, + char *thenil; + apr_size_t thelen; + ++ /* special case handling */ ++ if (!dlen) { ++ /* XXX: APR_ENOSPACE would be better */ ++ return APR_EGENERAL; ++ } ++ if (!src) { ++ *dst = '\0'; ++ return APR_SUCCESS; ++ } + thenil = apr_cpystrn(dst, src, dlen); + thelen = thenil - dst; +- /* Assume the typical case is smaller copying into bigger +- so we have a fast return */ +- if ((thelen < dlen-1) || ((strlen(src)) == thelen)) { ++ if (src[thelen] == '\0') { + return APR_SUCCESS; + } +- /* XXX: APR_ENOSPACE would be better */ + return APR_EGENERAL; + } + +@@ -1218,11 +1232,11 @@ PROXY_DECLARE(apr_status_t) ap_proxy_share_balancer(proxy_balancer *balancer, + } else { + action = "re-using"; + } ++ balancer->s = shm; ++ balancer->s->index = i; + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(02337) + "%s shm[%d] (0x%pp) for %s", action, i, (void *)shm, + balancer->s->name); +- balancer->s = shm; +- balancer->s->index = i; + /* the below should always succeed */ + lbmethod = ap_lookup_provider(PROXY_LBMETHOD, balancer->s->lbpname, "0"); + if (lbmethod) { +@@ -1356,7 +1370,7 @@ static apr_status_t connection_cleanup(void *theconn) + ap_log_perror(APLOG_MARK, APLOG_ERR, 0, conn->pool, APLOGNO(00923) + "Pooled connection 0x%pp for worker %s has been" + " already returned to the connection pool.", conn, +- worker->s->name); ++ ap_proxy_worker_name(conn->pool, worker)); + return APR_SUCCESS; + } + +@@ -1480,6 +1494,55 @@ static apr_status_t connection_destructor(void *resource, void *params, + * WORKER related... + */ + ++PROXY_DECLARE(char *) ap_proxy_worker_name(apr_pool_t *p, ++ proxy_worker *worker) ++{ ++ if (!(*worker->s->uds_path) || !p) { ++ /* just in case */ ++ return worker->s->name; ++ } ++ return apr_pstrcat(p, "unix:", worker->s->uds_path, "|", worker->s->name, NULL); ++} ++ ++/* ++ * Taken from ap_strcmp_match() : ++ * Match = 0, NoMatch = 1, Abort = -1, Inval = -2 ++ * Based loosely on sections of wildmat.c by Rich Salz ++ * Hmmm... shouldn't this really go component by component? ++ * ++ * Adds handling of the "\" => "" unescaping. ++ */ ++static int ap_proxy_strcmp_ematch(const char *str, const char *expected) ++{ ++ apr_size_t x, y; ++ ++ for (x = 0, y = 0; expected[y]; ++y, ++x) { ++ if ((!str[x]) && (expected[y] != '$' || !apr_isdigit(expected[y + 1]))) ++ return -1; ++ if (expected[y] == '$' && apr_isdigit(expected[y + 1])) { ++ while (expected[y] == '$' && apr_isdigit(expected[y + 1])) ++ y += 2; ++ if (!expected[y]) ++ return 0; ++ while (str[x]) { ++ int ret; ++ if ((ret = ap_proxy_strcmp_ematch(&str[x++], &expected[y])) != 1) ++ return ret; ++ } ++ return -1; ++ } ++ else if (expected[y] == '\\') { ++ /* NUL is an invalid char! */ ++ if (!expected[++y]) ++ return -2; ++ } ++ if (str[x] != expected[y]) ++ return 1; ++ } ++ /* We got all the way through the worker path without a difference */ ++ return 0; ++} ++ + PROXY_DECLARE(proxy_worker *) ap_proxy_get_worker(apr_pool_t *p, + proxy_balancer *balancer, + proxy_server_conf *conf, +@@ -1495,6 +1558,10 @@ PROXY_DECLARE(proxy_worker *) ap_proxy_get_worker(apr_pool_t *p, + char *url_copy; + int i; + ++ if (!url) { ++ return NULL; ++ } ++ + c = ap_strchr_c(url, ':'); + if (c == NULL || c[1] != '/' || c[2] != '/' || c[3] == '\0') { + return NULL; +@@ -1536,11 +1603,15 @@ PROXY_DECLARE(proxy_worker *) ap_proxy_get_worker(apr_pool_t *p, + if ( ((worker_name_length = strlen(worker->s->name)) <= url_length) + && (worker_name_length >= min_match) + && (worker_name_length > max_match) +- && (strncmp(url_copy, worker->s->name, worker_name_length) == 0) ) { ++ && (worker->s->is_name_matchable ++ || strncmp(url_copy, worker->s->name, ++ worker_name_length) == 0) ++ && (!worker->s->is_name_matchable ++ || ap_proxy_strcmp_ematch(url_copy, ++ worker->s->name) == 0) ) { + max_worker = worker; + max_match = worker_name_length; + } +- + } + } else { + worker = (proxy_worker *)conf->workers->elts; +@@ -1548,7 +1619,12 @@ PROXY_DECLARE(proxy_worker *) ap_proxy_get_worker(apr_pool_t *p, + if ( ((worker_name_length = strlen(worker->s->name)) <= url_length) + && (worker_name_length >= min_match) + && (worker_name_length > max_match) +- && (strncmp(url_copy, worker->s->name, worker_name_length) == 0) ) { ++ && (worker->s->is_name_matchable ++ || strncmp(url_copy, worker->s->name, ++ worker_name_length) == 0) ++ && (!worker->s->is_name_matchable ++ || ap_proxy_strcmp_ematch(url_copy, ++ worker->s->name) == 0) ) { + max_worker = worker; + max_match = worker_name_length; + } +@@ -1573,20 +1649,47 @@ PROXY_DECLARE(char *) ap_proxy_define_worker(apr_pool_t *p, + int do_malloc) + { + int rv; +- apr_uri_t uri; ++ apr_uri_t uri, urisock; + proxy_worker_shared *wshared; +- char *ptr; ++ char *ptr, *sockpath = NULL; + ++ /* ++ * Look to see if we are using UDS: ++ * require format: unix:/path/foo/bar.sock|http://ignored/path2/ ++ * This results in talking http to the socket at /path/foo/bar.sock ++ */ ++ ptr = ap_strchr((char *)url, '|'); ++ if (ptr) { ++ *ptr = '\0'; ++ rv = apr_uri_parse(p, url, &urisock); ++ if (rv == APR_SUCCESS && !strcasecmp(urisock.scheme, "unix")) { ++ sockpath = ap_runtime_dir_relative(p, urisock.path);; ++ url = ptr+1; /* so we get the scheme for the uds */ ++ } ++ else { ++ *ptr = '|'; ++ } ++ } + rv = apr_uri_parse(p, url, &uri); + + if (rv != APR_SUCCESS) { +- return "Unable to parse URL"; ++ return apr_pstrcat(p, "Unable to parse URL: ", url, NULL); + } +- if (!uri.hostname || !uri.scheme) { +- return "URL must be absolute!"; ++ if (!uri.scheme) { ++ return apr_pstrcat(p, "URL must be absolute!: ", url, NULL); ++ } ++ /* allow for unix:/path|http: */ ++ if (!uri.hostname) { ++ if (sockpath) { ++ uri.hostname = "localhost"; ++ } ++ else { ++ return apr_pstrcat(p, "URL must be absolute!: ", url, NULL); ++ } ++ } ++ else { ++ ap_str_tolower(uri.hostname); + } +- +- ap_str_tolower(uri.hostname); + ap_str_tolower(uri.scheme); + /* + * Workers can be associated w/ balancers or on their +@@ -1642,6 +1745,16 @@ PROXY_DECLARE(char *) ap_proxy_define_worker(apr_pool_t *p, + wshared->hash.def = ap_proxy_hashfunc(wshared->name, PROXY_HASHFUNC_DEFAULT); + wshared->hash.fnv = ap_proxy_hashfunc(wshared->name, PROXY_HASHFUNC_FNV); + wshared->was_malloced = (do_malloc != 0); ++ wshared->is_name_matchable = 0; ++ if (sockpath) { ++ if (PROXY_STRNCPY(wshared->uds_path, sockpath) != APR_SUCCESS) { ++ return apr_psprintf(p, "worker uds path (%s) too long", sockpath); ++ } ++ ++ } ++ else { ++ *wshared->uds_path = '\0'; ++ } + + (*worker)->hash = wshared->hash; + (*worker)->context = NULL; +@@ -1652,6 +1765,24 @@ PROXY_DECLARE(char *) ap_proxy_define_worker(apr_pool_t *p, + return NULL; + } + ++PROXY_DECLARE(char *) ap_proxy_define_match_worker(apr_pool_t *p, ++ proxy_worker **worker, ++ proxy_balancer *balancer, ++ proxy_server_conf *conf, ++ const char *url, ++ int do_malloc) ++{ ++ char *err; ++ ++ err = ap_proxy_define_worker(p, worker, balancer, conf, url, do_malloc); ++ if (err) { ++ return err; ++ } ++ ++ (*worker)->s->is_name_matchable = 1; ++ return NULL; ++} ++ + /* + * Create an already defined worker and free up memory + */ +@@ -1670,12 +1801,18 @@ PROXY_DECLARE(apr_status_t) ap_proxy_share_worker(proxy_worker *worker, proxy_wo + } else { + action = "re-using"; + } +- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(02338) +- "%s shm[%d] (0x%pp) for worker: %s", action, i, (void *)shm, +- worker->s->name); +- + worker->s = shm; + worker->s->index = i; ++ { ++ apr_pool_t *pool; ++ apr_pool_create(&pool, ap_server_conf->process->pool); ++ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(02338) ++ "%s shm[%d] (0x%pp) for worker: %s", action, i, (void *)shm, ++ ap_proxy_worker_name(pool, worker)); ++ if (pool) { ++ apr_pool_destroy(pool); ++ } ++ } + return APR_SUCCESS; + } + +@@ -1687,11 +1824,13 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_worker(proxy_worker *worker, ser + if (worker->s->status & PROXY_WORKER_INITIALIZED) { + /* The worker is already initialized */ + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00924) +- "worker %s shared already initialized", worker->s->name); ++ "worker %s shared already initialized", ++ ap_proxy_worker_name(p, worker)); + } + else { + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00925) +- "initializing worker %s shared", worker->s->name); ++ "initializing worker %s shared", ++ ap_proxy_worker_name(p, worker)); + /* Set default parameters */ + if (!worker->s->retry_set) { + worker->s->retry = apr_time_from_sec(PROXY_WORKER_DEFAULT_RETRY); +@@ -1727,11 +1866,13 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_worker(proxy_worker *worker, ser + /* What if local is init'ed and shm isn't?? Even possible? */ + if (worker->local_status & PROXY_WORKER_INITIALIZED) { + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00926) +- "worker %s local already initialized", worker->s->name); ++ "worker %s local already initialized", ++ ap_proxy_worker_name(p, worker)); + } + else { + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00927) +- "initializing worker %s local", worker->s->name); ++ "initializing worker %s local", ++ ap_proxy_worker_name(p, worker)); + apr_global_mutex_lock(proxy_mutex); + /* Now init local worker data */ + if (worker->tmutex == NULL) { +@@ -1853,6 +1994,8 @@ PROXY_DECLARE(int) ap_proxy_pre_request(proxy_worker **worker, + } + else if (r->proxyreq == PROXYREQ_REVERSE) { + if (conf->reverse) { ++ char *ptr; ++ char *ptr2; + ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, + "*: found reverse proxy worker for %s", *url); + *balancer = NULL; +@@ -1864,6 +2007,36 @@ PROXY_DECLARE(int) ap_proxy_pre_request(proxy_worker **worker, + * regarding the Connection header in the request. + */ + apr_table_setn(r->subprocess_env, "proxy-nokeepalive", "1"); ++ /* ++ * In the case of the generic reverse proxy, we need to see if we ++ * were passed a UDS url (eg: from mod_proxy) and adjust uds_path ++ * as required. ++ * ++ * NOTE: Here we use a quick note lookup, but we could also ++ * check to see if r->filename starts with 'proxy:' ++ */ ++ if (apr_table_get(r->notes, "rewrite-proxy") && ++ (ptr2 = ap_strcasestr(r->filename, "unix:")) && ++ (ptr = ap_strchr(ptr2, '|'))) { ++ apr_uri_t urisock; ++ apr_status_t rv; ++ *ptr = '\0'; ++ rv = apr_uri_parse(r->pool, ptr2, &urisock); ++ if (rv == APR_SUCCESS) { ++ char *rurl = ptr+1; ++ char *sockpath = ap_runtime_dir_relative(r->pool, urisock.path); ++ apr_table_setn(r->notes, "uds_path", sockpath); ++ *url = apr_pstrdup(r->pool, rurl); /* so we get the scheme for the uds */ ++ /* r->filename starts w/ "proxy:", so add after that */ ++ memmove(r->filename+6, rurl, strlen(rurl)+1); ++ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, ++ "*: rewrite of url due to UDS(%s): %s (%s)", ++ sockpath, *url, r->filename); ++ } ++ else { ++ *ptr = '|'; ++ } ++ } + } + } + } +@@ -2053,6 +2226,7 @@ ap_proxy_determine_connection(apr_pool_t *p, request_rec *r, + int server_port; + apr_status_t err = APR_SUCCESS; + apr_status_t uerr = APR_SUCCESS; ++ const char *uds_path; + + /* + * Break up the URL to determine the host to connect to +@@ -2065,7 +2239,7 @@ ap_proxy_determine_connection(apr_pool_t *p, request_rec *r, + NULL)); + } + if (!uri->port) { +- uri->port = apr_uri_port_of_scheme(uri->scheme); ++ uri->port = ap_proxy_port_of_scheme(uri->scheme); + } + + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00944) +@@ -2093,73 +2267,117 @@ ap_proxy_determine_connection(apr_pool_t *p, request_rec *r, + * to check host and port on the conn and be careful about + * spilling the cached addr from the worker. + */ +- if (!conn->hostname || !worker->s->is_address_reusable || +- worker->s->disablereuse) { +- if (proxyname) { +- conn->hostname = apr_pstrdup(conn->pool, proxyname); +- conn->port = proxyport; +- /* +- * If we have a forward proxy and the protocol is HTTPS, +- * then we need to prepend a HTTP CONNECT request before +- * sending our actual HTTPS requests. +- * Save our real backend data for using it later during HTTP CONNECT. +- */ +- if (conn->is_ssl) { +- const char *proxy_auth; +- +- forward_info *forward = apr_pcalloc(conn->pool, sizeof(forward_info)); +- conn->forward = forward; +- forward->use_http_connect = 1; +- forward->target_host = apr_pstrdup(conn->pool, uri->hostname); +- forward->target_port = uri->port; +- /* Do we want to pass Proxy-Authorization along? +- * If we haven't used it, then YES +- * If we have used it then MAYBE: RFC2616 says we MAY propagate it. +- * So let's make it configurable by env. +- * The logic here is the same used in mod_proxy_http. +- */ +- proxy_auth = apr_table_get(r->headers_in, "Proxy-Authorization"); +- if (proxy_auth != NULL && +- proxy_auth[0] != '\0' && +- r->user == NULL && /* we haven't yet authenticated */ +- apr_table_get(r->subprocess_env, "Proxy-Chain-Auth")) { +- forward->proxy_auth = apr_pstrdup(conn->pool, proxy_auth); +- } +- } ++ uds_path = (*worker->s->uds_path ? worker->s->uds_path : apr_table_get(r->notes, "uds_path")); ++ if (uds_path) { ++ if (conn->uds_path == NULL) { ++ /* use (*conn)->pool instead of worker->cp->pool to match lifetime */ ++ conn->uds_path = apr_pstrdup(conn->pool, uds_path); + } +- else { +- conn->hostname = apr_pstrdup(conn->pool, uri->hostname); +- conn->port = uri->port; +- } +- socket_cleanup(conn); +- err = apr_sockaddr_info_get(&(conn->addr), +- conn->hostname, APR_UNSPEC, +- conn->port, 0, +- conn->pool); +- } +- else if (!worker->cp->addr) { +- if ((err = PROXY_THREAD_LOCK(worker)) != APR_SUCCESS) { +- ap_log_rerror(APLOG_MARK, APLOG_ERR, err, r, APLOGNO(00945) "lock"); +- return HTTP_INTERNAL_SERVER_ERROR; ++ if (conn->uds_path) { ++ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02545) ++ "%s: has determined UDS as %s", ++ uri->scheme, conn->uds_path); + } ++ else { ++ /* should never happen */ ++ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02546) ++ "%s: cannot determine UDS (%s)", ++ uri->scheme, uds_path); + ++ } + /* +- * Worker can have the single constant backend adress. +- * The single DNS lookup is used once per worker. +- * If dynamic change is needed then set the addr to NULL +- * inside dynamic config to force the lookup. ++ * In UDS cases, some structs are NULL. Protect from de-refs ++ * and provide info for logging at the same time. + */ +- err = apr_sockaddr_info_get(&(worker->cp->addr), +- conn->hostname, APR_UNSPEC, +- conn->port, 0, +- worker->cp->pool); +- conn->addr = worker->cp->addr; +- if ((uerr = PROXY_THREAD_UNLOCK(worker)) != APR_SUCCESS) { +- ap_log_rerror(APLOG_MARK, APLOG_ERR, uerr, r, APLOGNO(00946) "unlock"); ++ if (!conn->addr) { ++ apr_sockaddr_t *sa; ++ apr_sockaddr_info_get(&sa, NULL, APR_UNSPEC, 0, 0, conn->pool); ++ conn->addr = sa; + } ++ conn->hostname = "httpd-UDS"; ++ conn->port = 0; + } + else { +- conn->addr = worker->cp->addr; ++ int will_reuse = worker->s->is_address_reusable && !worker->s->disablereuse; ++ if (!conn->hostname || !will_reuse) { ++ if (proxyname) { ++ conn->hostname = apr_pstrdup(conn->pool, proxyname); ++ conn->port = proxyport; ++ /* ++ * If we have a forward proxy and the protocol is HTTPS, ++ * then we need to prepend a HTTP CONNECT request before ++ * sending our actual HTTPS requests. ++ * Save our real backend data for using it later during HTTP CONNECT. ++ */ ++ if (conn->is_ssl) { ++ const char *proxy_auth; ++ ++ forward_info *forward = apr_pcalloc(conn->pool, sizeof(forward_info)); ++ conn->forward = forward; ++ forward->use_http_connect = 1; ++ forward->target_host = apr_pstrdup(conn->pool, uri->hostname); ++ forward->target_port = uri->port; ++ /* Do we want to pass Proxy-Authorization along? ++ * If we haven't used it, then YES ++ * If we have used it then MAYBE: RFC2616 says we MAY propagate it. ++ * So let's make it configurable by env. ++ * The logic here is the same used in mod_proxy_http. ++ */ ++ proxy_auth = apr_table_get(r->headers_in, "Proxy-Authorization"); ++ if (proxy_auth != NULL && ++ proxy_auth[0] != '\0' && ++ r->user == NULL && /* we haven't yet authenticated */ ++ apr_table_get(r->subprocess_env, "Proxy-Chain-Auth")) { ++ forward->proxy_auth = apr_pstrdup(conn->pool, proxy_auth); ++ } ++ } ++ } ++ else { ++ conn->hostname = apr_pstrdup(conn->pool, uri->hostname); ++ conn->port = uri->port; ++ } ++ if (!will_reuse) { ++ /* ++ * Only do a lookup if we should not reuse the backend address. ++ * Otherwise we will look it up once for the worker. ++ */ ++ err = apr_sockaddr_info_get(&(conn->addr), ++ conn->hostname, APR_UNSPEC, ++ conn->port, 0, ++ conn->pool); ++ } ++ socket_cleanup(conn); ++ } ++ if (will_reuse) { ++ /* ++ * Looking up the backend address for the worker only makes sense if ++ * we can reuse the address. ++ */ ++ if (!worker->cp->addr) { ++ if ((err = PROXY_THREAD_LOCK(worker)) != APR_SUCCESS) { ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, err, r, APLOGNO(00945) "lock"); ++ return HTTP_INTERNAL_SERVER_ERROR; ++ } ++ ++ /* ++ * Worker can have the single constant backend adress. ++ * The single DNS lookup is used once per worker. ++ * If dynamic change is needed then set the addr to NULL ++ * inside dynamic config to force the lookup. ++ */ ++ err = apr_sockaddr_info_get(&(worker->cp->addr), ++ conn->hostname, APR_UNSPEC, ++ conn->port, 0, ++ worker->cp->pool); ++ conn->addr = worker->cp->addr; ++ if ((uerr = PROXY_THREAD_UNLOCK(worker)) != APR_SUCCESS) { ++ ap_log_rerror(APLOG_MARK, APLOG_ERR, uerr, r, APLOGNO(00946) "unlock"); ++ } ++ } ++ else { ++ conn->addr = worker->cp->addr; ++ } ++ } + } + /* Close a possible existing socket if we are told to do so */ + if (conn->close) { +@@ -2360,6 +2578,52 @@ static apr_status_t send_http_connect(proxy_conn_rec *backend, + } + + ++#if APR_HAVE_SYS_UN_H ++/* lifted from mod_proxy_fdpass.c; tweaked addrlen in connect() call */ ++static apr_status_t socket_connect_un(apr_socket_t *sock, ++ struct sockaddr_un *sa) ++{ ++ apr_status_t rv; ++ apr_os_sock_t rawsock; ++ apr_interval_time_t t; ++ ++ rv = apr_os_sock_get(&rawsock, sock); ++ if (rv != APR_SUCCESS) { ++ return rv; ++ } ++ ++ rv = apr_socket_timeout_get(sock, &t); ++ if (rv != APR_SUCCESS) { ++ return rv; ++ } ++ ++ do { ++ const socklen_t addrlen = APR_OFFSETOF(struct sockaddr_un, sun_path) ++ + strlen(sa->sun_path) + 1; ++ rv = connect(rawsock, (struct sockaddr*)sa, addrlen); ++ } while (rv == -1 && errno == EINTR); ++ ++ if ((rv == -1) && (errno == EINPROGRESS || errno == EALREADY) ++ && (t > 0)) { ++#if APR_MAJOR_VERSION < 2 ++ rv = apr_wait_for_io_or_timeout(NULL, sock, 0); ++#else ++ rv = apr_socket_wait(sock, APR_WAIT_WRITE); ++#endif ++ ++ if (rv != APR_SUCCESS) { ++ return rv; ++ } ++ } ++ ++ if (rv == -1 && errno != EISCONN) { ++ return errno; ++ } ++ ++ return APR_SUCCESS; ++} ++#endif ++ + PROXY_DECLARE(int) ap_proxy_connect_backend(const char *proxy_function, + proxy_conn_rec *conn, + proxy_worker *worker, +@@ -2384,93 +2648,131 @@ PROXY_DECLARE(int) ap_proxy_connect_backend(const char *proxy_function, + proxy_function); + } + } +- while (backend_addr && !connected) { +- if ((rv = apr_socket_create(&newsock, backend_addr->family, +- SOCK_STREAM, APR_PROTO_TCP, +- conn->scpool)) != APR_SUCCESS) { +- loglevel = backend_addr->next ? APLOG_DEBUG : APLOG_ERR; +- ap_log_error(APLOG_MARK, loglevel, rv, s, APLOGNO(00952) +- "%s: error creating fam %d socket for target %s", +- proxy_function, +- backend_addr->family, +- worker->s->hostname); +- /* +- * this could be an IPv6 address from the DNS but the +- * local machine won't give us an IPv6 socket; hopefully the +- * DNS returned an additional address to try +- */ +- backend_addr = backend_addr->next; +- continue; +- } +- conn->connection = NULL; ++ while ((backend_addr || conn->uds_path) && !connected) { ++#if APR_HAVE_SYS_UN_H ++ if (conn->uds_path) ++ { ++ struct sockaddr_un sa; + +- if (worker->s->recv_buffer_size > 0 && +- (rv = apr_socket_opt_set(newsock, APR_SO_RCVBUF, +- worker->s->recv_buffer_size))) { +- ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(00953) +- "apr_socket_opt_set(SO_RCVBUF): Failed to set " +- "ProxyReceiveBufferSize, using default"); +- } ++ rv = apr_socket_create(&newsock, AF_UNIX, SOCK_STREAM, 0, ++ conn->scpool); ++ if (rv != APR_SUCCESS) { ++ loglevel = APLOG_ERR; ++ ap_log_error(APLOG_MARK, loglevel, rv, s, APLOGNO(02453) ++ "%s: error creating Unix domain socket for " ++ "target %s", ++ proxy_function, ++ worker->s->hostname); ++ break; ++ } ++ conn->connection = NULL; + +- rv = apr_socket_opt_set(newsock, APR_TCP_NODELAY, 1); +- if (rv != APR_SUCCESS && rv != APR_ENOTIMPL) { +- ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(00954) +- "apr_socket_opt_set(APR_TCP_NODELAY): " +- "Failed to set"); +- } ++ sa.sun_family = AF_UNIX; ++ apr_cpystrn(sa.sun_path, conn->uds_path, sizeof(sa.sun_path)); + +- /* Set a timeout for connecting to the backend on the socket */ +- if (worker->s->conn_timeout_set) { +- apr_socket_timeout_set(newsock, worker->s->conn_timeout); +- } +- else if (worker->s->timeout_set) { +- apr_socket_timeout_set(newsock, worker->s->timeout); +- } +- else if (conf->timeout_set) { +- apr_socket_timeout_set(newsock, conf->timeout); +- } +- else { +- apr_socket_timeout_set(newsock, s->timeout); +- } +- /* Set a keepalive option */ +- if (worker->s->keepalive) { +- if ((rv = apr_socket_opt_set(newsock, +- APR_SO_KEEPALIVE, 1)) != APR_SUCCESS) { +- ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(00955) +- "apr_socket_opt_set(SO_KEEPALIVE): Failed to set" +- " Keepalive"); ++ rv = socket_connect_un(newsock, &sa); ++ if (rv != APR_SUCCESS) { ++ apr_socket_close(newsock); ++ ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(02454) ++ "%s: attempt to connect to Unix domain socket " ++ "%s (%s) failed", ++ proxy_function, ++ conn->uds_path, ++ worker->s->hostname); ++ break; + } + } +- ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, s, +- "%s: fam %d socket created to connect to %s", +- proxy_function, backend_addr->family, worker->s->hostname); ++ else ++#endif ++ { ++ if ((rv = apr_socket_create(&newsock, backend_addr->family, ++ SOCK_STREAM, APR_PROTO_TCP, ++ conn->scpool)) != APR_SUCCESS) { ++ loglevel = backend_addr->next ? APLOG_DEBUG : APLOG_ERR; ++ ap_log_error(APLOG_MARK, loglevel, rv, s, APLOGNO(00952) ++ "%s: error creating fam %d socket for " ++ "target %s", ++ proxy_function, ++ backend_addr->family, ++ worker->s->hostname); ++ /* ++ * this could be an IPv6 address from the DNS but the ++ * local machine won't give us an IPv6 socket; hopefully the ++ * DNS returned an additional address to try ++ */ ++ backend_addr = backend_addr->next; ++ continue; ++ } ++ conn->connection = NULL; ++ ++ if (worker->s->recv_buffer_size > 0 && ++ (rv = apr_socket_opt_set(newsock, APR_SO_RCVBUF, ++ worker->s->recv_buffer_size))) { ++ ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(00953) ++ "apr_socket_opt_set(SO_RCVBUF): Failed to set " ++ "ProxyReceiveBufferSize, using default"); ++ } + +- if (conf->source_address_set) { +- local_addr = apr_pmemdup(conn->pool, conf->source_address, +- sizeof(apr_sockaddr_t)); +- local_addr->pool = conn->pool; +- rv = apr_socket_bind(newsock, local_addr); +- if (rv != APR_SUCCESS) { +- ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(00956) +- "%s: failed to bind socket to local address", +- proxy_function); ++ rv = apr_socket_opt_set(newsock, APR_TCP_NODELAY, 1); ++ if (rv != APR_SUCCESS && rv != APR_ENOTIMPL) { ++ ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(00954) ++ "apr_socket_opt_set(APR_TCP_NODELAY): " ++ "Failed to set"); + } +- } + +- /* make the connection out of the socket */ +- rv = apr_socket_connect(newsock, backend_addr); ++ /* Set a timeout for connecting to the backend on the socket */ ++ if (worker->s->conn_timeout_set) { ++ apr_socket_timeout_set(newsock, worker->s->conn_timeout); ++ } ++ else if (worker->s->timeout_set) { ++ apr_socket_timeout_set(newsock, worker->s->timeout); ++ } ++ else if (conf->timeout_set) { ++ apr_socket_timeout_set(newsock, conf->timeout); ++ } ++ else { ++ apr_socket_timeout_set(newsock, s->timeout); ++ } ++ /* Set a keepalive option */ ++ if (worker->s->keepalive) { ++ if ((rv = apr_socket_opt_set(newsock, ++ APR_SO_KEEPALIVE, 1)) != APR_SUCCESS) { ++ ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(00955) ++ "apr_socket_opt_set(SO_KEEPALIVE): Failed to set" ++ " Keepalive"); ++ } ++ } ++ ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, s, ++ "%s: fam %d socket created to connect to %s", ++ proxy_function, backend_addr->family, worker->s->hostname); ++ ++ if (conf->source_address_set) { ++ local_addr = apr_pmemdup(conn->pool, conf->source_address, ++ sizeof(apr_sockaddr_t)); ++ local_addr->pool = conn->pool; ++ rv = apr_socket_bind(newsock, local_addr); ++ if (rv != APR_SUCCESS) { ++ ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(00956) ++ "%s: failed to bind socket to local address", ++ proxy_function); ++ } ++ } + +- /* if an error occurred, loop round and try again */ +- if (rv != APR_SUCCESS) { +- apr_socket_close(newsock); +- loglevel = backend_addr->next ? APLOG_DEBUG : APLOG_ERR; +- ap_log_error(APLOG_MARK, loglevel, rv, s, APLOGNO(00957) +- "%s: attempt to connect to %pI (%s) failed", +- proxy_function, +- backend_addr, +- worker->s->hostname); +- backend_addr = backend_addr->next; +- continue; ++ /* make the connection out of the socket */ ++ rv = apr_socket_connect(newsock, backend_addr); ++ ++ /* if an error occurred, loop round and try again */ ++ if (rv != APR_SUCCESS) { ++ apr_socket_close(newsock); ++ loglevel = backend_addr->next ? APLOG_DEBUG : APLOG_ERR; ++ ap_log_error(APLOG_MARK, loglevel, rv, s, APLOGNO(00957) ++ "%s: attempt to connect to %pI (%s) failed", ++ proxy_function, ++ backend_addr, ++ worker->s->hostname); ++ backend_addr = backend_addr->next; ++ continue; ++ } + } + + /* Set a timeout on the socket */ +@@ -2486,7 +2788,7 @@ PROXY_DECLARE(int) ap_proxy_connect_backend(const char *proxy_function, + + conn->sock = newsock; + +- if (conn->forward) { ++ if (!conn->uds_path && conn->forward) { + forward_info *forward = (forward_info *)conn->forward; + /* + * For HTTP CONNECT we need to prepend CONNECT request before +@@ -2767,7 +3069,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_sync_balancer(proxy_balancer *b, server_rec + found = 1; + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(02402) + "re-grabbing shm[%d] (0x%pp) for worker: %s", i, (void *)shm, +- worker->s->name); ++ ap_proxy_worker_name(conf->pool, worker)); + break; + } + } +@@ -3201,6 +3503,39 @@ PROXY_DECLARE(int) ap_proxy_pass_brigade(apr_bucket_alloc_t *bucket_alloc, + return OK; + } + ++/* Fill in unknown schemes from apr_uri_port_of_scheme() */ ++ ++typedef struct proxy_schemes_t { ++ const char *name; ++ apr_port_t default_port; ++} proxy_schemes_t ; ++ ++static proxy_schemes_t pschemes[] = ++{ ++ {"fcgi", 8000}, ++ {"ajp", AJP13_DEF_PORT}, ++ {"scgi", 4000}, ++ { NULL, 0xFFFF } /* unknown port */ ++}; ++ ++PROXY_DECLARE(apr_port_t) ap_proxy_port_of_scheme(const char *scheme) ++{ ++ if (scheme) { ++ apr_port_t port; ++ if ((port = apr_uri_port_of_scheme(scheme)) != 0) { ++ return port; ++ } else { ++ proxy_schemes_t *pscheme; ++ for (pscheme = pschemes; pscheme->name != NULL; ++pscheme) { ++ if (strcasecmp(scheme, pscheme->name) == 0) { ++ return pscheme->default_port; ++ } ++ } ++ } ++ } ++ return 0; ++} ++ + void proxy_util_register_hooks(apr_pool_t *p) + { + APR_REGISTER_OPTIONAL_FN(ap_proxy_retry_worker); diff --git a/SOURCES/httpd-2.4.6-upn.patch b/SOURCES/httpd-2.4.6-upn.patch new file mode 100644 index 0000000..ce7f19c --- /dev/null +++ b/SOURCES/httpd-2.4.6-upn.patch @@ -0,0 +1,381 @@ +diff --git a/modules/ssl/mod_ssl.c b/modules/ssl/mod_ssl.c +index 926e05e..bbe1d20 100644 +--- a/modules/ssl/mod_ssl.c ++++ b/modules/ssl/mod_ssl.c +@@ -333,6 +333,11 @@ static int ssl_hook_pre_config(apr_pool_t *pconf, + OpenSSL_add_all_algorithms(); + OPENSSL_load_builtin_modules(); + ++ if (OBJ_txt2nid("id-on-dnsSRV") == NID_undef) { ++ (void)OBJ_create("1.3.6.1.5.5.7.8.7", "id-on-dnsSRV", ++ "SRVName otherName form"); ++ } ++ + /* + * Let us cleanup the ssl library when the module is unloaded + */ +diff --git a/modules/ssl/ssl_engine_kernel.c b/modules/ssl/ssl_engine_kernel.c +index eb11a38..27eaa5a 100644 +--- a/modules/ssl/ssl_engine_kernel.c ++++ b/modules/ssl/ssl_engine_kernel.c +@@ -1156,6 +1156,7 @@ int ssl_hook_Fixup(request_rec *r) + /* standard SSL environment variables */ + if (dc->nOptions & SSL_OPT_STDENVVARS) { + modssl_var_extract_dns(env, sslconn->ssl, r->pool); ++ modssl_var_extract_san_entries(env, sslconn->ssl, r->pool); + + for (i = 0; ssl_hook_Fixup_vars[i]; i++) { + var = (char *)ssl_hook_Fixup_vars[i]; +diff --git a/modules/ssl/ssl_engine_vars.c b/modules/ssl/ssl_engine_vars.c +index c508fff..2b7c9ba 100644 +--- a/modules/ssl/ssl_engine_vars.c ++++ b/modules/ssl/ssl_engine_vars.c +@@ -42,6 +42,7 @@ + static char *ssl_var_lookup_ssl(apr_pool_t *p, conn_rec *c, request_rec *r, char *var); + static char *ssl_var_lookup_ssl_cert(apr_pool_t *p, request_rec *r, X509 *xs, char *var); + static char *ssl_var_lookup_ssl_cert_dn(apr_pool_t *p, X509_NAME *xsname, char *var); ++static char *ssl_var_lookup_ssl_cert_san(apr_pool_t *p, X509 *xs, char *var); + static char *ssl_var_lookup_ssl_cert_valid(apr_pool_t *p, ASN1_TIME *tm); + static char *ssl_var_lookup_ssl_cert_remain(apr_pool_t *p, ASN1_TIME *tm); + static char *ssl_var_lookup_ssl_cert_serial(apr_pool_t *p, X509 *xs); +@@ -509,6 +510,10 @@ static char *ssl_var_lookup_ssl_cert(apr_pool_t *p, request_rec *r, X509 *xs, + result = ssl_var_lookup_ssl_cert_dn(p, xsname, var+5); + resdup = FALSE; + } ++ else if (strlen(var) > 4 && strcEQn(var, "SAN_", 4)) { ++ result = ssl_var_lookup_ssl_cert_san(p, xs, var+4); ++ resdup = FALSE; ++ } + else if (strcEQ(var, "A_SIG")) { + nid = OBJ_obj2nid((ASN1_OBJECT *)(xs->cert_info->signature->algorithm)); + result = apr_pstrdup(p, +@@ -597,6 +602,49 @@ static char *ssl_var_lookup_ssl_cert_dn(apr_pool_t *p, X509_NAME *xsname, char * + return result; + } + ++static char *ssl_var_lookup_ssl_cert_san(apr_pool_t *p, X509 *xs, char *var) ++{ ++ int type, numlen; ++ const char *onf = NULL; ++ apr_array_header_t *entries; ++ ++ if (strcEQn(var, "Email_", 6)) { ++ type = GEN_EMAIL; ++ var += 6; ++ } ++ else if (strcEQn(var, "DNS_", 4)) { ++ type = GEN_DNS; ++ var += 4; ++ } ++ else if (strcEQn(var, "OTHER_", 6)) { ++ type = GEN_OTHERNAME; ++ var += 6; ++ if (strEQn(var, "msUPN_", 6)) { ++ var += 6; ++ onf = "msUPN"; ++ } ++ else if (strEQn(var, "dnsSRV_", 7)) { ++ var += 7; ++ onf = "id-on-dnsSRV"; ++ } ++ else ++ return NULL; ++ } ++ else ++ return NULL; ++ ++ /* sanity check: number must be between 1 and 4 digits */ ++ numlen = strspn(var, "0123456789"); ++ if ((numlen < 1) || (numlen > 4) || (numlen != strlen(var))) ++ return NULL; ++ ++ if (SSL_X509_getSAN(p, xs, type, onf, atoi(var), &entries)) ++ /* return the first entry from this 1-element array */ ++ return APR_ARRAY_IDX(entries, 0, char *); ++ else ++ return NULL; ++} ++ + static char *ssl_var_lookup_ssl_cert_valid(apr_pool_t *p, ASN1_TIME *tm) + { + char *result; +@@ -890,6 +938,54 @@ void modssl_var_extract_dns(apr_table_t *t, SSL *ssl, apr_pool_t *p) + } + } + ++static void extract_san_array(apr_table_t *t, const char *pfx, ++ apr_array_header_t *entries, apr_pool_t *p) ++{ ++ int i; ++ ++ for (i = 0; i < entries->nelts; i++) { ++ const char *key = apr_psprintf(p, "%s_%d", pfx, i); ++ apr_table_setn(t, key, APR_ARRAY_IDX(entries, i, const char *)); ++ } ++} ++ ++void modssl_var_extract_san_entries(apr_table_t *t, SSL *ssl, apr_pool_t *p) ++{ ++ X509 *xs; ++ apr_array_header_t *entries; ++ ++ /* subjectAltName entries of the server certificate */ ++ xs = SSL_get_certificate(ssl); ++ if (xs) { ++ if (SSL_X509_getSAN(p, xs, GEN_EMAIL, NULL, -1, &entries)) { ++ extract_san_array(t, "SSL_SERVER_SAN_Email", entries, p); ++ } ++ if (SSL_X509_getSAN(p, xs, GEN_DNS, NULL, -1, &entries)) { ++ extract_san_array(t, "SSL_SERVER_SAN_DNS", entries, p); ++ } ++ if (SSL_X509_getSAN(p, xs, GEN_OTHERNAME, "id-on-dnsSRV", -1, ++ &entries)) { ++ extract_san_array(t, "SSL_SERVER_SAN_OTHER_dnsSRV", entries, p); ++ } ++ /* no need to free xs (refcount does not increase) */ ++ } ++ ++ /* subjectAltName entries of the client certificate */ ++ xs = SSL_get_peer_certificate(ssl); ++ if (xs) { ++ if (SSL_X509_getSAN(p, xs, GEN_EMAIL, NULL, -1, &entries)) { ++ extract_san_array(t, "SSL_CLIENT_SAN_Email", entries, p); ++ } ++ if (SSL_X509_getSAN(p, xs, GEN_DNS, NULL, -1, &entries)) { ++ extract_san_array(t, "SSL_CLIENT_SAN_DNS", entries, p); ++ } ++ if (SSL_X509_getSAN(p, xs, GEN_OTHERNAME, "msUPN", -1, &entries)) { ++ extract_san_array(t, "SSL_CLIENT_SAN_OTHER_msUPN", entries, p); ++ } ++ X509_free(xs); ++ } ++} ++ + /* For an extension type which OpenSSL does not recognize, attempt to + * parse the extension type as a primitive string. This will fail for + * any structured extension type per the docs. Returns non-zero on +diff --git a/modules/ssl/ssl_private.h b/modules/ssl/ssl_private.h +index a5ede6e..80e1e8e 100644 +--- a/modules/ssl/ssl_private.h ++++ b/modules/ssl/ssl_private.h +@@ -974,6 +974,10 @@ void ssl_var_log_config_register(apr_pool_t *p); + * allocating from 'p': */ + void modssl_var_extract_dns(apr_table_t *t, SSL *ssl, apr_pool_t *p); + ++/* Extract SSL_*_SAN_* variables (subjectAltName entries) into table 't' ++ * from SSL object 'ssl', allocating from 'p'. */ ++void modssl_var_extract_san_entries(apr_table_t *t, SSL *ssl, apr_pool_t *p); ++ + #ifndef OPENSSL_NO_OCSP + /* Perform OCSP validation of the current cert in the given context. + * Returns non-zero on success or zero on failure. On failure, the +diff --git a/modules/ssl/ssl_util_ssl.c b/modules/ssl/ssl_util_ssl.c +index 588ceba..09a9877 100644 +--- a/modules/ssl/ssl_util_ssl.c ++++ b/modules/ssl/ssl_util_ssl.c +@@ -236,22 +236,32 @@ BOOL SSL_X509_getBC(X509 *cert, int *ca, int *pathlen) + return TRUE; + } + +-/* convert a NAME_ENTRY to UTF8 string */ +-char *SSL_X509_NAME_ENTRY_to_string(apr_pool_t *p, X509_NAME_ENTRY *xsne) ++/* convert an ASN.1 string to a UTF-8 string (escaping control characters) */ ++char *SSL_ASN1_STRING_to_utf8(apr_pool_t *p, ASN1_STRING *asn1str) + { + char *result = NULL; +- BIO* bio; ++ BIO *bio; + int len; + + if ((bio = BIO_new(BIO_s_mem())) == NULL) + return NULL; +- ASN1_STRING_print_ex(bio, X509_NAME_ENTRY_get_data(xsne), +- ASN1_STRFLGS_ESC_CTRL|ASN1_STRFLGS_UTF8_CONVERT); ++ ++ ASN1_STRING_print_ex(bio, asn1str, ASN1_STRFLGS_ESC_CTRL| ++ ASN1_STRFLGS_UTF8_CONVERT); + len = BIO_pending(bio); +- result = apr_palloc(p, len+1); +- len = BIO_read(bio, result, len); +- result[len] = NUL; ++ if (len > 0) { ++ result = apr_palloc(p, len+1); ++ len = BIO_read(bio, result, len); ++ result[len] = NUL; ++ } + BIO_free(bio); ++ return result; ++} ++ ++/* convert a NAME_ENTRY to UTF8 string */ ++char *SSL_X509_NAME_ENTRY_to_string(apr_pool_t *p, X509_NAME_ENTRY *xsne) ++{ ++ char *result = SSL_ASN1_STRING_to_utf8(p, X509_NAME_ENTRY_get_data(xsne)); + ap_xlate_proto_from_ascii(result, len); + return result; + } +@@ -288,51 +298,123 @@ char *SSL_X509_NAME_to_string(apr_pool_t *p, X509_NAME *dn, int maxlen) + return result; + } + +-/* return an array of (RFC 6125 coined) DNS-IDs and CN-IDs in a certificate */ +-BOOL SSL_X509_getIDs(apr_pool_t *p, X509 *x509, apr_array_header_t **ids) ++static void parse_otherName_value(apr_pool_t *p, ASN1_TYPE *value, ++ const char *onf, apr_array_header_t **entries) ++{ ++ const char *str; ++ int nid = onf ? OBJ_txt2nid(onf) : NID_undef; ++ ++ if (!value || (nid == NID_undef) || !*entries) ++ return; ++ ++ /* ++ * Currently supported otherName forms (values for "onf"): ++ * "msUPN" (1.3.6.1.4.1.311.20.2.3): Microsoft User Principal Name ++ * "id-on-dnsSRV" (1.3.6.1.5.5.7.8.7): SRVName, as specified in RFC 4985 ++ */ ++ if ((nid == NID_ms_upn) && (value->type == V_ASN1_UTF8STRING) && ++ (str = SSL_ASN1_STRING_to_utf8(p, value->value.utf8string))) { ++ APR_ARRAY_PUSH(*entries, const char *) = str; ++ } else if (strEQ(onf, "id-on-dnsSRV") && ++ (value->type == V_ASN1_IA5STRING) && ++ (str = SSL_ASN1_STRING_to_utf8(p, value->value.ia5string))) { ++ APR_ARRAY_PUSH(*entries, const char *) = str; ++ } ++} ++ ++/* ++ * Return an array of subjectAltName entries of type "type". If idx is -1, ++ * return all entries of the given type, otherwise return an array consisting ++ * of the n-th occurrence of that type only. Currently supported types: ++ * GEN_EMAIL (rfc822Name) ++ * GEN_DNS (dNSName) ++ * GEN_OTHERNAME (requires the otherName form ["onf"] argument to be supplied, ++ * see parse_otherName_value for the currently supported forms) ++ */ ++BOOL SSL_X509_getSAN(apr_pool_t *p, X509 *x509, int type, const char *onf, ++ int idx, apr_array_header_t **entries) + { + STACK_OF(GENERAL_NAME) *names; +- BIO *bio; +- X509_NAME *subj; +- char **cpp; +- int i, n; ++ int nid = onf ? OBJ_txt2nid(onf) : NID_undef; + +- if (!x509 || !(*ids = apr_array_make(p, 0, sizeof(char *)))) { +- *ids = NULL; ++ if (!x509 || (type < GEN_OTHERNAME) || ++ ((type == GEN_OTHERNAME) && (nid == NID_undef)) || ++ (type > GEN_RID) || (idx < -1) || ++ !(*entries = apr_array_make(p, 0, sizeof(char *)))) { ++ *entries = NULL; + return FALSE; + } + +- /* First, the DNS-IDs (dNSName entries in the subjectAltName extension) */ +- if ((names = X509_get_ext_d2i(x509, NID_subject_alt_name, NULL, NULL)) && +- (bio = BIO_new(BIO_s_mem()))) { ++ if ((names = X509_get_ext_d2i(x509, NID_subject_alt_name, NULL, NULL))) { ++ int i, n = 0; + GENERAL_NAME *name; ++ const char *utf8str; + + for (i = 0; i < sk_GENERAL_NAME_num(names); i++) { + name = sk_GENERAL_NAME_value(names, i); +- if (name->type == GEN_DNS) { +- ASN1_STRING_print_ex(bio, name->d.ia5, ASN1_STRFLGS_ESC_CTRL| +- ASN1_STRFLGS_UTF8_CONVERT); +- n = BIO_pending(bio); +- if (n > 0) { +- cpp = (char **)apr_array_push(*ids); +- *cpp = apr_palloc(p, n+1); +- n = BIO_read(bio, *cpp, n); +- (*cpp)[n] = NUL; ++ ++ if (name->type != type) ++ continue; ++ ++ switch (type) { ++ case GEN_EMAIL: ++ case GEN_DNS: ++ if (((idx == -1) || (n == idx)) && ++ (utf8str = SSL_ASN1_STRING_to_utf8(p, name->d.ia5))) { ++ APR_ARRAY_PUSH(*entries, const char *) = utf8str; ++ } ++ n++; ++ break; ++ case GEN_OTHERNAME: ++ if (OBJ_obj2nid(name->d.otherName->type_id) == nid) { ++ if (((idx == -1) || (n == idx))) { ++ parse_otherName_value(p, name->d.otherName->value, ++ onf, entries); ++ } ++ n++; + } ++ break; ++ default: ++ /* ++ * Not implemented right now: ++ * GEN_X400 (x400Address) ++ * GEN_DIRNAME (directoryName) ++ * GEN_EDIPARTY (ediPartyName) ++ * GEN_URI (uniformResourceIdentifier) ++ * GEN_IPADD (iPAddress) ++ * GEN_RID (registeredID) ++ */ ++ break; + } ++ ++ if ((idx != -1) && (n > idx)) ++ break; + } +- BIO_free(bio); +- } + +- if (names) + sk_GENERAL_NAME_pop_free(names, GENERAL_NAME_free); ++ } ++ ++ return apr_is_empty_array(*entries) ? FALSE : TRUE; ++} ++ ++/* return an array of (RFC 6125 coined) DNS-IDs and CN-IDs in a certificate */ ++BOOL SSL_X509_getIDs(apr_pool_t *p, X509 *x509, apr_array_header_t **ids) ++{ ++ X509_NAME *subj; ++ int i = -1; ++ ++ /* First, the DNS-IDs (dNSName entries in the subjectAltName extension) */ ++ if (!x509 || ++ (SSL_X509_getSAN(p, x509, GEN_DNS, NULL, -1, ids) == FALSE && !*ids)) { ++ *ids = NULL; ++ return FALSE; ++ } + + /* Second, the CN-IDs (commonName attributes in the subject DN) */ + subj = X509_get_subject_name(x509); +- i = -1; + while ((i = X509_NAME_get_index_by_NID(subj, NID_commonName, i)) != -1) { +- cpp = (char **)apr_array_push(*ids); +- *cpp = SSL_X509_NAME_ENTRY_to_string(p, X509_NAME_get_entry(subj, i)); ++ APR_ARRAY_PUSH(*ids, const char *) = ++ SSL_X509_NAME_ENTRY_to_string(p, X509_NAME_get_entry(subj, i)); + } + + return apr_is_empty_array(*ids) ? FALSE : TRUE; +diff --git a/modules/ssl/ssl_util_ssl.h b/modules/ssl/ssl_util_ssl.h +index 4b882db..be07ab7 100644 +--- a/modules/ssl/ssl_util_ssl.h ++++ b/modules/ssl/ssl_util_ssl.h +@@ -65,8 +65,10 @@ EVP_PKEY *SSL_read_PrivateKey(char *, EVP_PKEY **, pem_password_cb *, void *); + int SSL_smart_shutdown(SSL *ssl); + BOOL SSL_X509_isSGC(X509 *); + BOOL SSL_X509_getBC(X509 *, int *, int *); ++char *SSL_ASN1_STRING_to_utf8(apr_pool_t *, ASN1_STRING *); + char *SSL_X509_NAME_ENTRY_to_string(apr_pool_t *p, X509_NAME_ENTRY *xsne); + char *SSL_X509_NAME_to_string(apr_pool_t *, X509_NAME *, int); ++BOOL SSL_X509_getSAN(apr_pool_t *, X509 *, int, const char *, int, apr_array_header_t **); + BOOL SSL_X509_getIDs(apr_pool_t *, X509 *, apr_array_header_t **); + BOOL SSL_X509_match_name(apr_pool_t *, X509 *, const char *, BOOL, server_rec *); + BOOL SSL_X509_INFO_load_file(apr_pool_t *, STACK_OF(X509_INFO) *, const char *); diff --git a/SOURCES/httpd-ssl-pass-dialog b/SOURCES/httpd-ssl-pass-dialog new file mode 100755 index 0000000..1e850cd --- /dev/null +++ b/SOURCES/httpd-ssl-pass-dialog @@ -0,0 +1,3 @@ +#!/bin/sh + +exec /bin/systemd-ask-password "Enter SSL pass phrase for $1 ($2) : " diff --git a/SOURCES/httpd.conf b/SOURCES/httpd.conf new file mode 100644 index 0000000..a7af0dc --- /dev/null +++ b/SOURCES/httpd.conf @@ -0,0 +1,353 @@ +# +# This is the main Apache HTTP server configuration file. It contains the +# configuration directives that give the server its instructions. +# See for detailed information. +# In particular, see +# +# for a discussion of each configuration directive. +# +# Do NOT simply read the instructions in here without understanding +# what they do. They're here only as hints or reminders. If you are unsure +# consult the online docs. You have been warned. +# +# Configuration and logfile names: If the filenames you specify for many +# of the server's control files begin with "/" (or "drive:/" for Win32), the +# server will use that explicit path. If the filenames do *not* begin +# with "/", the value of ServerRoot is prepended -- so 'log/access_log' +# with ServerRoot set to '/www' will be interpreted by the +# server as '/www/log/access_log', where as '/log/access_log' will be +# interpreted as '/log/access_log'. + +# +# ServerRoot: The top of the directory tree under which the server's +# configuration, error, and log files are kept. +# +# Do not add a slash at the end of the directory path. If you point +# ServerRoot at a non-local disk, be sure to specify a local disk on the +# Mutex directive, if file-based mutexes are used. If you wish to share the +# same ServerRoot for multiple httpd daemons, you will need to change at +# least PidFile. +# +ServerRoot "/etc/httpd" + +# +# Listen: Allows you to bind Apache to specific IP addresses and/or +# ports, instead of the default. See also the +# directive. +# +# Change this to Listen on specific IP addresses as shown below to +# prevent Apache from glomming onto all bound IP addresses. +# +#Listen 12.34.56.78:80 +Listen 80 + +# +# Dynamic Shared Object (DSO) Support +# +# To be able to use the functionality of a module which was built as a DSO you +# have to place corresponding `LoadModule' lines at this location so the +# directives contained in it are actually available _before_ they are used. +# Statically compiled modules (those listed by `httpd -l') do not need +# to be loaded here. +# +# Example: +# LoadModule foo_module modules/mod_foo.so +# +Include conf.modules.d/*.conf + +# +# If you wish httpd to run as a different user or group, you must run +# httpd as root initially and it will switch. +# +# User/Group: The name (or #number) of the user/group to run httpd as. +# It is usually good practice to create a dedicated user and group for +# running httpd, as with most system services. +# +User apache +Group apache + +# 'Main' server configuration +# +# The directives in this section set up the values used by the 'main' +# server, which responds to any requests that aren't handled by a +# definition. These values also provide defaults for +# any containers you may define later in the file. +# +# All of these directives may appear inside containers, +# in which case these default settings will be overridden for the +# virtual host being defined. +# + +# +# ServerAdmin: Your address, where problems with the server should be +# e-mailed. This address appears on some server-generated pages, such +# as error documents. e.g. admin@your-domain.com +# +ServerAdmin root@localhost + +# +# ServerName gives the name and port that the server uses to identify itself. +# This can often be determined automatically, but we recommend you specify +# it explicitly to prevent problems during startup. +# +# If your host doesn't have a registered DNS name, enter its IP address here. +# +#ServerName www.example.com:80 + +# +# Deny access to the entirety of your server's filesystem. You must +# explicitly permit access to web content directories in other +# blocks below. +# + + AllowOverride none + Require all denied + + +# +# Note that from this point forward you must specifically allow +# particular features to be enabled - so if something's not working as +# you might expect, make sure that you have specifically enabled it +# below. +# + +# +# DocumentRoot: The directory out of which you will serve your +# documents. By default, all requests are taken from this directory, but +# symbolic links and aliases may be used to point to other locations. +# +DocumentRoot "/var/www/html" + +# +# Relax access to content within /var/www. +# + + AllowOverride None + # Allow open access: + Require all granted + + +# Further relax access to the default document root: + + # + # Possible values for the Options directive are "None", "All", + # or any combination of: + # Indexes Includes FollowSymLinks SymLinksifOwnerMatch ExecCGI MultiViews + # + # Note that "MultiViews" must be named *explicitly* --- "Options All" + # doesn't give it to you. + # + # The Options directive is both complicated and important. Please see + # http://httpd.apache.org/docs/2.4/mod/core.html#options + # for more information. + # + Options Indexes FollowSymLinks + + # + # AllowOverride controls what directives may be placed in .htaccess files. + # It can be "All", "None", or any combination of the keywords: + # Options FileInfo AuthConfig Limit + # + AllowOverride None + + # + # Controls who can get stuff from this server. + # + Require all granted + + +# +# DirectoryIndex: sets the file that Apache will serve if a directory +# is requested. +# + + DirectoryIndex index.html + + +# +# The following lines prevent .htaccess and .htpasswd files from being +# viewed by Web clients. +# + + Require all denied + + +# +# ErrorLog: The location of the error log file. +# If you do not specify an ErrorLog directive within a +# container, error messages relating to that virtual host will be +# logged here. If you *do* define an error logfile for a +# container, that host's errors will be logged there and not here. +# +ErrorLog "logs/error_log" + +# +# LogLevel: Control the number of messages logged to the error_log. +# Possible values include: debug, info, notice, warn, error, crit, +# alert, emerg. +# +LogLevel warn + + + # + # The following directives define some format nicknames for use with + # a CustomLog directive (see below). + # + LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined + LogFormat "%h %l %u %t \"%r\" %>s %b" common + + + # You need to enable mod_logio.c to use %I and %O + LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %I %O" combinedio + + + # + # The location and format of the access logfile (Common Logfile Format). + # If you do not define any access logfiles within a + # container, they will be logged here. Contrariwise, if you *do* + # define per- access logfiles, transactions will be + # logged therein and *not* in this file. + # + #CustomLog "logs/access_log" common + + # + # If you prefer a logfile with access, agent, and referer information + # (Combined Logfile Format) you can use the following directive. + # + CustomLog "logs/access_log" combined + + + + # + # Redirect: Allows you to tell clients about documents that used to + # exist in your server's namespace, but do not anymore. The client + # will make a new request for the document at its new location. + # Example: + # Redirect permanent /foo http://www.example.com/bar + + # + # Alias: Maps web paths into filesystem paths and is used to + # access content that does not live under the DocumentRoot. + # Example: + # Alias /webpath /full/filesystem/path + # + # If you include a trailing / on /webpath then the server will + # require it to be present in the URL. You will also likely + # need to provide a section to allow access to + # the filesystem path. + + # + # ScriptAlias: This controls which directories contain server scripts. + # ScriptAliases are essentially the same as Aliases, except that + # documents in the target directory are treated as applications and + # run by the server when requested rather than as documents sent to the + # client. The same rules about trailing "/" apply to ScriptAlias + # directives as to Alias. + # + ScriptAlias /cgi-bin/ "/var/www/cgi-bin/" + + + +# +# "/var/www/cgi-bin" should be changed to whatever your ScriptAliased +# CGI directory exists, if you have that configured. +# + + AllowOverride None + Options None + Require all granted + + + + # + # TypesConfig points to the file containing the list of mappings from + # filename extension to MIME-type. + # + TypesConfig /etc/mime.types + + # + # AddType allows you to add to or override the MIME configuration + # file specified in TypesConfig for specific file types. + # + #AddType application/x-gzip .tgz + # + # AddEncoding allows you to have certain browsers uncompress + # information on the fly. Note: Not all browsers support this. + # + #AddEncoding x-compress .Z + #AddEncoding x-gzip .gz .tgz + # + # If the AddEncoding directives above are commented-out, then you + # probably should define those extensions to indicate media types: + # + AddType application/x-compress .Z + AddType application/x-gzip .gz .tgz + + # + # AddHandler allows you to map certain file extensions to "handlers": + # actions unrelated to filetype. These can be either built into the server + # or added with the Action directive (see below) + # + # To use CGI scripts outside of ScriptAliased directories: + # (You will also need to add "ExecCGI" to the "Options" directive.) + # + #AddHandler cgi-script .cgi + + # For type maps (negotiated resources): + #AddHandler type-map var + + # + # Filters allow you to process content before it is sent to the client. + # + # To parse .shtml files for server-side includes (SSI): + # (You will also need to add "Includes" to the "Options" directive.) + # + AddType text/html .shtml + AddOutputFilter INCLUDES .shtml + + +# +# Specify a default charset for all content served; this enables +# interpretation of all content as UTF-8 by default. To use the +# default browser choice (ISO-8859-1), or to allow the META tags +# in HTML content to override this choice, comment out this +# directive: +# +AddDefaultCharset UTF-8 + + + # + # The mod_mime_magic module allows the server to use various hints from the + # contents of the file itself to determine its type. The MIMEMagicFile + # directive tells the module where the hint definitions are located. + # + MIMEMagicFile conf/magic + + +# +# Customizable error responses come in three flavors: +# 1) plain text 2) local redirects 3) external redirects +# +# Some examples: +#ErrorDocument 500 "The server made a boo boo." +#ErrorDocument 404 /missing.html +#ErrorDocument 404 "/cgi-bin/missing_handler.pl" +#ErrorDocument 402 http://www.example.com/subscription_info.html +# + +# +# EnableMMAP and EnableSendfile: On systems that support it, +# memory-mapping or the sendfile syscall may be used to deliver +# files. This usually improves server performance, but must +# be turned off when serving from networked-mounted +# filesystems or if support for these functions is otherwise +# broken on your system. +# Defaults if commented: EnableMMAP On, EnableSendfile Off +# +#EnableMMAP off +EnableSendfile on + +# Supplemental configuration +# +# Load config files in the "/etc/httpd/conf.d" directory, if any. +IncludeOptional conf.d/*.conf diff --git a/SOURCES/httpd.logrotate b/SOURCES/httpd.logrotate new file mode 100644 index 0000000..28c9730 --- /dev/null +++ b/SOURCES/httpd.logrotate @@ -0,0 +1,9 @@ +/var/log/httpd/*log { + missingok + notifempty + sharedscripts + delaycompress + postrotate + /bin/systemctl reload httpd.service > /dev/null 2>/dev/null || true + endscript +} diff --git a/SOURCES/httpd.service b/SOURCES/httpd.service new file mode 100644 index 0000000..e5538ee --- /dev/null +++ b/SOURCES/httpd.service @@ -0,0 +1,22 @@ +[Unit] +Description=The Apache HTTP Server +After=network.target remote-fs.target nss-lookup.target +Documentation=man:httpd(8) +Documentation=man:apachectl(8) + +[Service] +Type=notify +EnvironmentFile=/etc/sysconfig/httpd +ExecStart=/usr/sbin/httpd $OPTIONS -DFOREGROUND +ExecReload=/usr/sbin/httpd $OPTIONS -k graceful +ExecStop=/bin/kill -WINCH ${MAINPID} +# We want systemd to give httpd some time to finish gracefully, but still want +# it to kill httpd after TimeoutStopSec if something went wrong during the +# graceful stop. Normally, Systemd sends SIGTERM signal right after the +# ExecStop, which would kill httpd. We are sending useless SIGCONT here to give +# httpd time to finish. +KillSignal=SIGCONT +PrivateTmp=true + +[Install] +WantedBy=multi-user.target diff --git a/SOURCES/httpd.sysconf b/SOURCES/httpd.sysconf new file mode 100644 index 0000000..1b38411 --- /dev/null +++ b/SOURCES/httpd.sysconf @@ -0,0 +1,25 @@ +# +# This file can be used to set additional environment variables for +# the httpd process, or pass additional options to the httpd +# executable. +# +# Note: With previous versions of httpd, the MPM could be changed by +# editing an "HTTPD" variable here. With the current version, that +# variable is now ignored. The MPM is a loadable module, and the +# choice of MPM can be changed by editing the configuration file +# /etc/httpd/conf.modules.d/00-mpm.conf. +# + +# +# To pass additional options (for instance, -D definitions) to the +# httpd binary at startup, set OPTIONS here. +# +#OPTIONS= + +# +# This setting ensures the httpd process is started in the "C" locale +# by default. (Some modules will not behave correctly if +# case-sensitive string comparisons are performed in a different +# locale.) +# +LANG=C diff --git a/SOURCES/httpd.tmpfiles b/SOURCES/httpd.tmpfiles new file mode 100644 index 0000000..f148886 --- /dev/null +++ b/SOURCES/httpd.tmpfiles @@ -0,0 +1,2 @@ +d /run/httpd 710 root apache +d /run/httpd/htcacheclean 700 apache apache diff --git a/SOURCES/index.html b/SOURCES/index.html new file mode 100644 index 0000000..06ad3fc --- /dev/null +++ b/SOURCES/index.html @@ -0,0 +1,123 @@ + + + + + Test Page for the Apache HTTP Server on Red Hat Enterprise Linux + + + + + +

    Red Hat Enterprise Linux Test Page

    + +
    +
    +

    This page is used to test the proper operation of the Apache HTTP server after it has been installed. If you can read this page, it means that the Apache HTTP server installed at this site is working properly.

    +
    +
    + +
    +
    +

    If you are a member of the general public:

    + +

    The fact that you are seeing this page indicates that the website you just visited is either experiencing problems, or is undergoing routine maintenance.

    + +

    If you would like to let the administrators of this website know that you've seen this page instead of the page you expected, you should send them e-mail. In general, mail sent to the name "webmaster" and directed to the website's domain should reach the appropriate person.

    + +

    For example, if you experienced problems while visiting www.example.com, you should send e-mail to "webmaster@example.com".

    + +

    For information on Red Hat Enterprise Linux, please visit the Red Hat, Inc. website. The documentation for Red Hat Enterprise Linux is available on the Red Hat, Inc. website.

    +
    +
    + +
    +

    If you are the website administrator:

    + +

    You may now add content to the directory /var/www/html/. Note that until you do so, people visiting your website will see this page, and not your content. To prevent this page from ever being used, follow the instructions in the file /etc/httpd/conf.d/welcome.conf.

    + +

    You are free to use the image below on web sites powered by the Apache HTTP Server:

    + +

    [ Powered by Apache ]

    + +
    +
    +
    + + diff --git a/SOURCES/manual.conf b/SOURCES/manual.conf new file mode 100644 index 0000000..cf626ac --- /dev/null +++ b/SOURCES/manual.conf @@ -0,0 +1,13 @@ +# +# This configuration file allows the manual to be accessed at +# http://localhost/manual/ +# +Alias /manual /usr/share/httpd/manual + + + Options Indexes + AllowOverride None + Require all granted + + RedirectMatch 301 ^/manual/(?:de|en|fr|ja|ko|ru)(/.*)$ "/manual$1" + diff --git a/SOURCES/ssl.conf b/SOURCES/ssl.conf new file mode 100644 index 0000000..5283a93 --- /dev/null +++ b/SOURCES/ssl.conf @@ -0,0 +1,217 @@ +# +# When we also provide SSL we have to listen to the +# the HTTPS port in addition. +# +Listen 443 https + +## +## SSL Global Context +## +## All SSL configuration in this context applies both to +## the main server and all SSL-enabled virtual hosts. +## + +# Pass Phrase Dialog: +# Configure the pass phrase gathering process. +# The filtering dialog program (`builtin' is a internal +# terminal dialog) has to provide the pass phrase on stdout. +SSLPassPhraseDialog exec:/usr/libexec/httpd-ssl-pass-dialog + +# Inter-Process Session Cache: +# Configure the SSL Session Cache: First the mechanism +# to use and second the expiring timeout (in seconds). +SSLSessionCache shmcb:/run/httpd/sslcache(512000) +SSLSessionCacheTimeout 300 + +# Pseudo Random Number Generator (PRNG): +# Configure one or more sources to seed the PRNG of the +# SSL library. The seed data should be of good random quality. +# WARNING! On some platforms /dev/random blocks if not enough entropy +# is available. This means you then cannot use the /dev/random device +# because it would lead to very long connection times (as long as +# it requires to make more entropy available). But usually those +# platforms additionally provide a /dev/urandom device which doesn't +# block. So, if available, use this one instead. Read the mod_ssl User +# Manual for more details. +SSLRandomSeed startup file:/dev/urandom 256 +SSLRandomSeed connect builtin +#SSLRandomSeed startup file:/dev/random 512 +#SSLRandomSeed connect file:/dev/random 512 +#SSLRandomSeed connect file:/dev/urandom 512 + +# +# Use "SSLCryptoDevice" to enable any supported hardware +# accelerators. Use "openssl engine -v" to list supported +# engine names. NOTE: If you enable an accelerator and the +# server does not start, consult the error logs and ensure +# your accelerator is functioning properly. +# +SSLCryptoDevice builtin +#SSLCryptoDevice ubsec + +## +## SSL Virtual Host Context +## + + + +# General setup for the virtual host, inherited from global configuration +#DocumentRoot "/var/www/html" +#ServerName www.example.com:443 + +# Use separate log files for the SSL virtual host; note that LogLevel +# is not inherited from httpd.conf. +ErrorLog logs/ssl_error_log +TransferLog logs/ssl_access_log +LogLevel warn + +# SSL Engine Switch: +# Enable/Disable SSL for this virtual host. +SSLEngine on + +# SSL Protocol support: +# List the enable protocol levels with which clients will be able to +# connect. Disable SSLv2 access by default: +SSLProtocol all -SSLv2 -SSLv3 + +# SSL Cipher Suite: +# List the ciphers that the client is permitted to negotiate. +# See the mod_ssl documentation for a complete list. +SSLCipherSuite HIGH:3DES:!aNULL:!MD5:!SEED:!IDEA + +# Speed-optimized SSL Cipher configuration: +# If speed is your main concern (on busy HTTPS servers e.g.), +# you might want to force clients to specific, performance +# optimized ciphers. In this case, prepend those ciphers +# to the SSLCipherSuite list, and enable SSLHonorCipherOrder. +# Caveat: by giving precedence to RC4-SHA and AES128-SHA +# (as in the example below), most connections will no longer +# have perfect forward secrecy - if the server's key is +# compromised, captures of past or future traffic must be +# considered compromised, too. +#SSLCipherSuite RC4-SHA:AES128-SHA:HIGH:MEDIUM:!aNULL:!MD5 +#SSLHonorCipherOrder on + +# Server Certificate: +# Point SSLCertificateFile at a PEM encoded certificate. If +# the certificate is encrypted, then you will be prompted for a +# pass phrase. Note that a kill -HUP will prompt again. A new +# certificate can be generated using the genkey(1) command. +SSLCertificateFile /etc/pki/tls/certs/localhost.crt + +# Server Private Key: +# If the key is not combined with the certificate, use this +# directive to point at the key file. Keep in mind that if +# you've both a RSA and a DSA private key you can configure +# both in parallel (to also allow the use of DSA ciphers, etc.) +SSLCertificateKeyFile /etc/pki/tls/private/localhost.key + +# Server Certificate Chain: +# Point SSLCertificateChainFile at a file containing the +# concatenation of PEM encoded CA certificates which form the +# certificate chain for the server certificate. Alternatively +# the referenced file can be the same as SSLCertificateFile +# when the CA certificates are directly appended to the server +# certificate for convinience. +#SSLCertificateChainFile /etc/pki/tls/certs/server-chain.crt + +# Certificate Authority (CA): +# Set the CA certificate verification path where to find CA +# certificates for client authentication or alternatively one +# huge file containing all of them (file must be PEM encoded) +#SSLCACertificateFile /etc/pki/tls/certs/ca-bundle.crt + +# Client Authentication (Type): +# Client certificate verification type and depth. Types are +# none, optional, require and optional_no_ca. Depth is a +# number which specifies how deeply to verify the certificate +# issuer chain before deciding the certificate is not valid. +#SSLVerifyClient require +#SSLVerifyDepth 10 + +# Access Control: +# With SSLRequire you can do per-directory access control based +# on arbitrary complex boolean expressions containing server +# variable checks and other lookup directives. The syntax is a +# mixture between C and Perl. See the mod_ssl documentation +# for more details. +# +#SSLRequire ( %{SSL_CIPHER} !~ m/^(EXP|NULL)/ \ +# and %{SSL_CLIENT_S_DN_O} eq "Snake Oil, Ltd." \ +# and %{SSL_CLIENT_S_DN_OU} in {"Staff", "CA", "Dev"} \ +# and %{TIME_WDAY} >= 1 and %{TIME_WDAY} <= 5 \ +# and %{TIME_HOUR} >= 8 and %{TIME_HOUR} <= 20 ) \ +# or %{REMOTE_ADDR} =~ m/^192\.76\.162\.[0-9]+$/ +# + +# SSL Engine Options: +# Set various options for the SSL engine. +# o FakeBasicAuth: +# Translate the client X.509 into a Basic Authorisation. This means that +# the standard Auth/DBMAuth methods can be used for access control. The +# user name is the `one line' version of the client's X.509 certificate. +# Note that no password is obtained from the user. Every entry in the user +# file needs this password: `xxj31ZMTZzkVA'. +# o ExportCertData: +# This exports two additional environment variables: SSL_CLIENT_CERT and +# SSL_SERVER_CERT. These contain the PEM-encoded certificates of the +# server (always existing) and the client (only existing when client +# authentication is used). This can be used to import the certificates +# into CGI scripts. +# o StdEnvVars: +# This exports the standard SSL/TLS related `SSL_*' environment variables. +# Per default this exportation is switched off for performance reasons, +# because the extraction step is an expensive operation and is usually +# useless for serving static content. So one usually enables the +# exportation for CGI and SSI requests only. +# o StrictRequire: +# This denies access when "SSLRequireSSL" or "SSLRequire" applied even +# under a "Satisfy any" situation, i.e. when it applies access is denied +# and no other module can change it. +# o OptRenegotiate: +# This enables optimized SSL connection renegotiation handling when SSL +# directives are used in per-directory context. +#SSLOptions +FakeBasicAuth +ExportCertData +StrictRequire + + SSLOptions +StdEnvVars + + + SSLOptions +StdEnvVars + + +# SSL Protocol Adjustments: +# The safe and default but still SSL/TLS standard compliant shutdown +# approach is that mod_ssl sends the close notify alert but doesn't wait for +# the close notify alert from client. When you need a different shutdown +# approach you can use one of the following variables: +# o ssl-unclean-shutdown: +# This forces an unclean shutdown when the connection is closed, i.e. no +# SSL close notify alert is send or allowed to received. This violates +# the SSL/TLS standard but is needed for some brain-dead browsers. Use +# this when you receive I/O errors because of the standard approach where +# mod_ssl sends the close notify alert. +# o ssl-accurate-shutdown: +# This forces an accurate shutdown when the connection is closed, i.e. a +# SSL close notify alert is send and mod_ssl waits for the close notify +# alert of the client. This is 100% SSL/TLS standard compliant, but in +# practice often causes hanging connections with brain-dead browsers. Use +# this only for browsers where you know that their SSL implementation +# works correctly. +# Notice: Most problems of broken clients are also related to the HTTP +# keep-alive facility, so you usually additionally want to disable +# keep-alive for those clients, too. Use variable "nokeepalive" for this. +# Similarly, one has to force some clients to use HTTP/1.0 to workaround +# their broken HTTP/1.1 implementation. Use variables "downgrade-1.0" and +# "force-response-1.0" for this. +BrowserMatch "MSIE [2-5]" \ + nokeepalive ssl-unclean-shutdown \ + downgrade-1.0 force-response-1.0 + +# Per-Server Logging: +# The home of a custom SSL log file. Use this when you want a +# compact non-error SSL logfile on a virtual host basis. +CustomLog logs/ssl_request_log \ + "%t %h %{SSL_PROTOCOL}x %{SSL_CIPHER}x \"%r\" %b" + + + diff --git a/SOURCES/userdir.conf b/SOURCES/userdir.conf new file mode 100644 index 0000000..b5d7a49 --- /dev/null +++ b/SOURCES/userdir.conf @@ -0,0 +1,36 @@ +# +# UserDir: The name of the directory that is appended onto a user's home +# directory if a ~user request is received. +# +# The path to the end user account 'public_html' directory must be +# accessible to the webserver userid. This usually means that ~userid +# must have permissions of 711, ~userid/public_html must have permissions +# of 755, and documents contained therein must be world-readable. +# Otherwise, the client will only receive a "403 Forbidden" message. +# + + # + # UserDir is disabled by default since it can confirm the presence + # of a username on the system (depending on home directory + # permissions). + # + UserDir disabled + + # + # To enable requests to /~user/ to serve the user's public_html + # directory, remove the "UserDir disabled" line above, and uncomment + # the following line instead: + # + #UserDir public_html + + +# +# Control access to UserDir directories. The following is an example +# for a site where these directories are restricted to read-only. +# + + AllowOverride FileInfo AuthConfig Limit Indexes + Options MultiViews Indexes SymLinksIfOwnerMatch IncludesNoExec + Require method GET POST OPTIONS + + diff --git a/SOURCES/welcome.conf b/SOURCES/welcome.conf new file mode 100644 index 0000000..5d1e452 --- /dev/null +++ b/SOURCES/welcome.conf @@ -0,0 +1,18 @@ +# +# This configuration file enables the default "Welcome" page if there +# is no default index page present for the root URL. To disable the +# Welcome page, comment out all the lines below. +# +# NOTE: if this file is removed, it will be restored on upgrades. +# + + Options -Indexes + ErrorDocument 403 /.noindex.html + + + + AllowOverride None + Require all granted + + +Alias /.noindex.html /usr/share/httpd/noindex/index.html diff --git a/SPECS/httpd.spec b/SPECS/httpd.spec new file mode 100644 index 0000000..52a032a --- /dev/null +++ b/SPECS/httpd.spec @@ -0,0 +1,1620 @@ +%define contentdir %{_datadir}/httpd +%define docroot /var/www +%define suexec_caller apache +%define mmn 20120211 +%define oldmmnisa %{mmn}-%{__isa_name}-%{__isa_bits} +%define mmnisa %{mmn}%{__isa_name}%{__isa_bits} +%define vstring %(source /etc/os-release; echo ${REDHAT_SUPPORT_PRODUCT}) + +# Drop automatic provides for module DSOs +%{?filter_setup: +%filter_provides_in %{_libdir}/httpd/modules/.*\.so$ +%filter_setup +} + +Summary: Apache HTTP Server +Name: httpd +Version: 2.4.6 +Release: 92%{?dist} +URL: http://httpd.apache.org/ +Source0: http://www.apache.org/dist/httpd/httpd-%{version}.tar.bz2 +Source1: index.html +Source2: httpd.logrotate +Source3: httpd.sysconf +Source4: httpd-ssl-pass-dialog +Source5: httpd.tmpfiles +Source6: httpd.service +Source7: action-graceful.sh +Source8: action-configtest.sh +Source10: httpd.conf +Source11: 00-base.conf +Source12: 00-mpm.conf +Source13: 00-lua.conf +Source14: 01-cgi.conf +Source15: 00-dav.conf +Source16: 00-proxy.conf +Source17: 00-ssl.conf +Source18: 01-ldap.conf +Source19: 00-proxyhtml.conf +Source20: userdir.conf +Source21: ssl.conf +Source22: welcome.conf +Source23: manual.conf +Source24: 00-systemd.conf +Source25: 01-session.conf +# Documentation +Source30: README.confd +Source40: htcacheclean.service +Source41: htcacheclean.sysconf +# build/scripts patches +Patch1: httpd-2.4.1-apctl.patch +Patch2: httpd-2.4.3-apxs.patch +Patch3: httpd-2.4.1-deplibs.patch +Patch5: httpd-2.4.3-layout.patch +Patch6: httpd-2.4.3-apctl-systemd.patch +# Features/functional changes +Patch21: httpd-2.4.6-full-release.patch +Patch23: httpd-2.4.4-export.patch +Patch24: httpd-2.4.1-corelimit.patch +Patch25: httpd-2.4.1-selinux.patch +Patch26: httpd-2.4.4-r1337344+.patch +Patch27: httpd-2.4.2-icons.patch +Patch28: httpd-2.4.6-r1332643+.patch +Patch29: httpd-2.4.3-mod_systemd.patch +Patch30: httpd-2.4.4-cachehardmax.patch +Patch31: httpd-2.4.6-sslmultiproxy.patch +Patch32: httpd-2.4.6-r1537535.patch +Patch33: httpd-2.4.6-r1542327.patch +Patch34: httpd-2.4.6-ssl-large-keys.patch +Patch35: httpd-2.4.6-pre_htaccess.patch +Patch36: httpd-2.4.6-r1573626.patch +Patch37: httpd-2.4.6-uds.patch +Patch38: httpd-2.4.6-upn.patch +Patch39: httpd-2.4.6-r1664565.patch +Patch40: httpd-2.4.6-r1861793+.patch +# Bug fixes +Patch51: httpd-2.4.3-sslsninotreq.patch +Patch55: httpd-2.4.4-malformed-host.patch +Patch56: httpd-2.4.4-mod_unique_id.patch +Patch57: httpd-2.4.6-ldaprefer.patch +Patch58: httpd-2.4.6-r1507681+.patch +Patch59: httpd-2.4.6-r1556473.patch +Patch60: httpd-2.4.6-r1553540.patch +Patch61: httpd-2.4.6-rewrite-clientaddr.patch +Patch62: httpd-2.4.6-ab-overflow.patch +Patch63: httpd-2.4.6-sigint.patch +Patch64: httpd-2.4.6-ssl-ecdh-auto.patch +Patch65: httpd-2.4.6-r1556818.patch +Patch66: httpd-2.4.6-r1618851.patch +Patch67: httpd-2.4.6-r1526189.patch +Patch68: httpd-2.4.6-r1663647.patch +Patch69: httpd-2.4.6-r1569006.patch +Patch70: httpd-2.4.6-r1506474.patch +Patch71: httpd-2.4.6-bomb.patch +Patch72: httpd-2.4.6-r1604460.patch +Patch73: httpd-2.4.6-r1624349.patch +Patch74: httpd-2.4.6-ap-ipv6.patch +Patch75: httpd-2.4.6-r1530280.patch +Patch76: httpd-2.4.6-r1633085.patch +Patch78: httpd-2.4.6-ssl-error-free.patch +Patch79: httpd-2.4.6-r1528556.patch +Patch80: httpd-2.4.6-r1594625.patch +Patch81: httpd-2.4.6-r1674222.patch +Patch82: httpd-2.4.6-apachectl-httpd-env.patch +Patch83: httpd-2.4.6-rewrite-dir.patch +Patch84: httpd-2.4.6-r1420184.patch +Patch85: httpd-2.4.6-r1524368.patch +Patch86: httpd-2.4.6-r1528958.patch +Patch87: httpd-2.4.6-r1651083.patch +Patch88: httpd-2.4.6-r1688399.patch +Patch89: httpd-2.4.6-r1527509.patch +Patch90: httpd-2.4.6-apachectl-status.patch +Patch91: httpd-2.4.6-r1650655.patch +Patch92: httpd-2.4.6-r1533448.patch +Patch93: httpd-2.4.6-r1610013.patch +Patch94: httpd-2.4.6-r1705528.patch +Patch95: httpd-2.4.6-r1684462.patch +Patch96: httpd-2.4.6-r1650677.patch +Patch97: httpd-2.4.6-r1621601.patch +Patch98: httpd-2.4.6-r1610396.patch +Patch99: httpd-2.4.6-rotatelog-timezone.patch +Patch100: httpd-2.4.6-ab-ssl-error.patch +Patch101: httpd-2.4.6-r1723522.patch +Patch102: httpd-2.4.6-r1681107.patch +Patch103: httpd-2.4.6-dhparams-free.patch +Patch104: httpd-2.4.6-r1651658.patch +Patch105: httpd-2.4.6-r1560093.patch +Patch106: httpd-2.4.6-r1748212.patch +Patch107: httpd-2.4.6-r1570327.patch +Patch108: httpd-2.4.6-r1631119.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1406184 +Patch109: httpd-2.4.6-r1593002.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1389535 +Patch110: httpd-2.4.6-r1662640.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1348019 +Patch111: httpd-2.4.6-r1348019.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1396197 +Patch112: httpd-2.4.6-r1587053.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1376835 +# https://bugzilla.redhat.com/show_bug.cgi?id=1527295 +Patch113: httpd-2.4.6-mpm-segfault.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1372692 +Patch114: httpd-2.4.6-r1681114.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1371876 +Patch115: httpd-2.4.6-r1775832.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1353740 +Patch116: httpd-2.4.6-r1726019.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1364604 +Patch117: httpd-2.4.6-r1683112.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1378946 +Patch118: httpd-2.4.6-r1651653.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1414258 +Patch119: httpd-2.4.6-r1634529.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1397241 +Patch120: httpd-2.4.6-r1738878.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1445885 +Patch121: httpd-2.4.6-http-protocol-options-define.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1332242 +Patch122: httpd-2.4.6-statements-comment.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1451333 +Patch123: httpd-2.4.6-rotatelogs-zombie.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1368491 +Patch124: httpd-2.4.6-mod_authz_dbd-missing-query.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1288395 +Patch125: httpd-2.4.6-r1668532.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1499253 +Patch126: httpd-2.4.6-r1681289.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1430640 +Patch127: httpd-2.4.6-r1805099.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1448892 +Patch128: httpd-2.4.6-r1811831.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1464406 +Patch129: httpd-2.4.6-r1811746.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1440590 +Patch130: httpd-2.4.6-r1811976.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1506392 +Patch131: httpd-2.4.6-r1650310.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1557785 +Patch132: httpd-2.4.6-r1530999.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1533793 +Patch133: httpd-2.4.6-r1555539.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1523536 +Patch134: httpd-2.4.6-r1737363.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1548501 +Patch135: httpd-2.4.6-r1826995.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1556761 +Patch136: httpd-2.4.6-default-port-worker.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1493181 +Patch137: httpd-2.4.6-r1825120.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1458364 +Patch138: httpd-2.4.6-r1515372.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1458364 +Patch139: httpd-2.4.6-r1824872.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1583218 +Patch140: httpd-2.4.6-r1833014.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1673457 +Patch141: httpd-2.4.6-r1583175.patch +# https://bugzilla.redhat.com/show_bug.cgi?id=1649470 +Patch142: httpd-2.4.6-r1862604.patch + +# Security fixes +Patch200: httpd-2.4.6-CVE-2013-6438.patch +Patch201: httpd-2.4.6-CVE-2014-0098.patch +Patch202: httpd-2.4.6-CVE-2014-0231.patch +Patch203: httpd-2.4.6-CVE-2014-0117.patch +Patch204: httpd-2.4.6-CVE-2014-0118.patch +Patch205: httpd-2.4.6-CVE-2014-0226.patch +Patch206: httpd-2.4.6-CVE-2013-4352.patch +Patch207: httpd-2.4.6-CVE-2013-5704.patch +Patch208: httpd-2.4.6-CVE-2014-3581.patch +Patch209: httpd-2.4.6-CVE-2015-3185.patch +Patch210: httpd-2.4.6-CVE-2015-3183.patch +Patch211: httpd-2.4.6-CVE-2016-5387.patch +Patch212: httpd-2.4.6-CVE-2016-8743.patch +Patch213: httpd-2.4.6-CVE-2016-0736.patch +Patch214: httpd-2.4.6-CVE-2016-2161.patch +Patch215: httpd-2.4.6-CVE-2017-3167.patch +Patch216: httpd-2.4.6-CVE-2017-3169.patch +Patch217: httpd-2.4.6-CVE-2017-7668.patch +Patch218: httpd-2.4.6-CVE-2017-7679.patch +Patch219: httpd-2.4.6-CVE-2017-9788.patch +Patch220: httpd-2.4.6-CVE-2017-9798.patch +Patch221: httpd-2.4.6-CVE-2018-1312.patch +Patch222: httpd-2.4.6-CVE-2019-0217.patch +Patch223: httpd-2.4.6-CVE-2019-0220.patch +Patch224: httpd-2.4.6-CVE-2017-15710.patch +Patch225: httpd-2.4.6-CVE-2018-1301.patch + +License: ASL 2.0 +Group: System Environment/Daemons +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root +BuildRequires: autoconf, perl, pkgconfig, findutils, xmlto +BuildRequires: zlib-devel, libselinux-devel, lua-devel +BuildRequires: apr-devel >= 1.4.0, apr-util-devel >= 1.2.0, pcre-devel >= 5.0 +BuildRequires: systemd-devel +Requires: /etc/mime.types, system-logos >= 7.92.1-1 +Obsoletes: httpd-suexec +Provides: webserver +Provides: mod_dav = %{version}-%{release}, httpd-suexec = %{version}-%{release} +Provides: httpd-mmn = %{mmn}, httpd-mmn = %{mmnisa}, httpd-mmn = %{oldmmnisa} +Requires: httpd-tools = %{version}-%{release} +Requires(pre): /usr/sbin/useradd +Requires(pre): /usr/sbin/groupadd +Requires(preun): systemd-units +Requires(postun): systemd-units +Requires(post): systemd-units + +%description +The Apache HTTP Server is a powerful, efficient, and extensible +web server. + +%package devel +Group: Development/Libraries +Summary: Development interfaces for the Apache HTTP server +Obsoletes: secureweb-devel, apache-devel, stronghold-apache-devel +Requires: apr-devel, apr-util-devel, pkgconfig +Requires: httpd = %{version}-%{release} + +%description devel +The httpd-devel package contains the APXS binary and other files +that you need to build Dynamic Shared Objects (DSOs) for the +Apache HTTP Server. + +If you are installing the Apache HTTP server and you want to be +able to compile or develop additional modules for Apache, you need +to install this package. + +%package manual +Group: Documentation +Summary: Documentation for the Apache HTTP server +Requires: httpd = %{version}-%{release} +Obsoletes: secureweb-manual, apache-manual +BuildArch: noarch + +%description manual +The httpd-manual package contains the complete manual and +reference guide for the Apache HTTP server. The information can +also be found at http://httpd.apache.org/docs/2.2/. + +%package tools +Group: System Environment/Daemons +Summary: Tools for use with the Apache HTTP Server + +%description tools +The httpd-tools package contains tools which can be used with +the Apache HTTP Server. + +%package -n mod_ssl +Group: System Environment/Daemons +Summary: SSL/TLS module for the Apache HTTP Server +Epoch: 1 +BuildRequires: openssl-devel >= 1:1.0.1e-37 +Requires: openssl-libs >= 1:1.0.1e-37 +Requires(post): openssl, /bin/cat, hostname +Requires(pre): httpd +Requires: httpd = 0:%{version}-%{release}, httpd-mmn = %{mmnisa} +Obsoletes: stronghold-mod_ssl + +%description -n mod_ssl +The mod_ssl module provides strong cryptography for the Apache Web +server via the Secure Sockets Layer (SSL) and Transport Layer +Security (TLS) protocols. + +%package -n mod_proxy_html +Group: System Environment/Daemons +Summary: HTML and XML content filters for the Apache HTTP Server +Requires: httpd = 0:%{version}-%{release}, httpd-mmn = %{mmnisa} +BuildRequires: libxml2-devel +Epoch: 1 +Obsoletes: mod_proxy_html < 1:2.4.1-2 + +%description -n mod_proxy_html +The mod_proxy_html and mod_xml2enc modules provide filters which can +transform and modify HTML and XML content. + +%package -n mod_ldap +Group: System Environment/Daemons +Summary: LDAP authentication modules for the Apache HTTP Server +Requires: httpd = 0:%{version}-%{release}, httpd-mmn = %{mmnisa} +Requires: apr-util-ldap + +%description -n mod_ldap +The mod_ldap and mod_authnz_ldap modules add support for LDAP +authentication to the Apache HTTP Server. + +%package -n mod_session +Group: System Environment/Daemons +Summary: Session interface for the Apache HTTP Server +Requires: httpd = 0:%{version}-%{release}, httpd-mmn = %{mmnisa} +Requires: apr-util-openssl + +%description -n mod_session +The mod_session module and associated backends provide an abstract +interface for storing and accessing per-user session data. + +%prep +%setup -q +%patch1 -p1 -b .apctl +%patch2 -p1 -b .apxs +%patch3 -p1 -b .deplibs +%patch5 -p1 -b .layout +%patch6 -p1 -b .apctlsystemd + +%patch21 -p1 -b .fullrelease +%patch23 -p1 -b .export +%patch24 -p1 -b .corelimit +%patch25 -p1 -b .selinux +%patch26 -p1 -b .r1337344+ +%patch27 -p1 -b .icons +%patch28 -p1 -b .r1332643+ +%patch29 -p1 -b .systemd +%patch30 -p1 -b .cachehardmax +%patch31 -p1 -b .sslmultiproxy +%patch32 -p1 -b .r1537535 +%patch33 -p1 -b .r1542327 +rm modules/ssl/ssl_engine_dh.c +%patch34 -p1 -b .ssllargekeys +%patch35 -p1 -b .prehtaccess +%patch36 -p1 -b .r1573626 +%patch37 -p1 -b .uds +%patch38 -p1 -b .upn +%patch39 -p1 -b .r1664565 +%patch40 -p1 -b .r1861793+ + +%patch51 -p1 -b .sninotreq +%patch55 -p1 -b .malformedhost +%patch56 -p1 -b .uniqueid +%patch57 -p1 -b .ldaprefer +%patch58 -p1 -b .r1507681+ +%patch59 -p1 -b .r1556473 +%patch60 -p1 -b .r1553540 +%patch61 -p1 -b .clientaddr +%patch62 -p1 -b .aboverflow +%patch63 -p1 -b .sigint +%patch64 -p1 -b .sslecdhauto +%patch65 -p1 -b .r1556818 +%patch66 -p1 -b .r1618851 +%patch67 -p1 -b .r1526189 +%patch68 -p1 -b .r1663647 +%patch69 -p1 -b .1569006 +%patch70 -p1 -b .r1506474 +%patch71 -p1 -b .bomb +%patch72 -p1 -b .r1604460 +%patch73 -p1 -b .r1624349 +%patch74 -p1 -b .abipv6 +%patch75 -p1 -b .r1530280 +%patch76 -p1 -b .r1633085 +%patch78 -p1 -b .sslerrorfree +%patch79 -p1 -b .r1528556 +%patch80 -p1 -b .r1594625 +%patch81 -p1 -b .r1674222 +%patch82 -p1 -b .envhttpd +%patch83 -p1 -b .rewritedir +%patch84 -p1 -b .r1420184 +%patch85 -p1 -b .r1524368 +%patch86 -p1 -b .r1528958 +%patch87 -p1 -b .r1651083 +%patch88 -p1 -b .r1688399 +%patch89 -p1 -b .r1527509 +%patch90 -p1 -b .apachectlstatus +%patch91 -p1 -b .r1650655 +%patch92 -p1 -b .r1533448 +%patch93 -p1 -b .r1610013 +%patch94 -p1 -b .r1705528 +%patch95 -p1 -b .r1684462 +%patch96 -p1 -b .r1650677 +%patch97 -p1 -b .r1621601 +%patch98 -p1 -b .r1610396 +%patch99 -p1 -b .rotatelogtimezone +%patch100 -p1 -b .absslerror +%patch101 -p1 -b .r1723522 +%patch102 -p1 -b .r1681107 +%patch103 -p1 -b .dhparamsfree +%patch104 -p1 -b .r1651658 +%patch105 -p1 -b .r1560093 +%patch106 -p1 -b .r1748212 +%patch107 -p1 -b .r1570327 +%patch108 -p1 -b .r1631119 +%patch109 -p1 -b .r1593002 +%patch110 -p1 -b .r1662640 +%patch111 -p1 -b .r1348019 +%patch112 -p1 -b .r1587053 +%patch113 -p1 -b .mpmsegfault +%patch114 -p1 -b .r1681114 +%patch115 -p1 -b .r1371876 +%patch116 -p1 -b .r1726019 +%patch117 -p1 -b .r1683112 +%patch118 -p1 -b .r1651653 +%patch119 -p1 -b .r1634529 +%patch120 -p1 -b .r1738878 +%patch121 -p1 -b .httpprotdefine +%patch122 -p1 -b .statement-comment +%patch123 -p1 -b .logrotate-zombie +%patch124 -p1 -b .modauthzdbd-segfault +%patch125 -p1 -b .r1668532 +%patch126 -p1 -b .r1681289 +%patch127 -p1 -b .r1805099 +%patch128 -p1 -b .r1811831 +%patch129 -p1 -b .r1811746 +%patch130 -p1 -b .r1811976 +%patch131 -p1 -b .r1650310 +%patch132 -p1 -b .r1530999 +%patch133 -p1 -b .r1555539 +%patch134 -p1 -b .r1523536 +%patch135 -p1 -b .r1826995 +%patch136 -p1 -b .defaultport-proxy +%patch137 -p1 -b .r1825120 +%patch138 -p1 -b .r1515372 +%patch139 -p1 -b .r1824872 +%patch140 -p1 -b .r1833014 +%patch141 -p1 -b .r1583175 +%patch142 -p1 -b .1862604 + + +%patch200 -p1 -b .cve6438 +%patch201 -p1 -b .cve0098 +%patch202 -p1 -b .cve0231 +%patch203 -p1 -b .cve0117 +%patch204 -p1 -b .cve0118 +%patch205 -p1 -b .cve0226 +%patch206 -p1 -b .cve4352 +%patch207 -p1 -b .cve5704 +%patch208 -p1 -b .cve3581 +%patch209 -p1 -b .cve3185 +%patch210 -p1 -b .cve3183 +%patch211 -p1 -b .cve5387 +%patch212 -p1 -b .cve8743 +%patch213 -p1 -b .cve0736 +%patch214 -p1 -b .cve2161 +%patch215 -p1 -b .cve3167 +%patch216 -p1 -b .cve3169 +%patch217 -p1 -b .cve7668 +%patch218 -p1 -b .cve7679 +%patch219 -p1 -b .cve9788 +%patch220 -p1 -b .cve9798 +%patch221 -p1 -b .cve1312 +%patch222 -p1 -b .cve0217 +%patch223 -p1 -b .cve0220 +%patch224 -p1 -b .cve15710 +%patch225 -p1 -b .cve1301 + +# Patch in the vendor string and the release string +sed -i '/^#define PLATFORM/s/Unix/%{vstring}/' os/unix/os.h +sed -i 's/@RELEASE@/%{release}/' server/core.c + +# Prevent use of setcap in "install-suexec-caps" target. +sed -i '/suexec/s,setcap ,echo Skipping setcap for ,' Makefile.in + +# Safety check: prevent build if defined MMN does not equal upstream MMN. +vmmn=`echo MODULE_MAGIC_NUMBER_MAJOR | cpp -include include/ap_mmn.h | sed -n '/^2/p'` +if test "x${vmmn}" != "x%{mmn}"; then + : Error: Upstream MMN is now ${vmmn}, packaged MMN is %{mmn} + : Update the mmn macro and rebuild. + exit 1 +fi + +: Building with MMN %{mmn}, MMN-ISA %{mmnisa} and vendor string '%{vstring}' + +%build +# forcibly prevent use of bundled apr, apr-util, pcre +rm -rf srclib/{apr,apr-util,pcre} + +# regenerate configure scripts +autoheader && autoconf || exit 1 + +# Before configure; fix location of build dir in generated apxs +%{__perl} -pi -e "s:\@exp_installbuilddir\@:%{_libdir}/httpd/build:g" \ + support/apxs.in + +export CFLAGS=$RPM_OPT_FLAGS +export LDFLAGS="-Wl,-z,relro,-z,now" + +%ifarch ppc64 ppc64le +%global _performance_build 1 +%endif + +# Hard-code path to links to avoid unnecessary builddep +export LYNX_PATH=/usr/bin/links + +# Build the daemon +%configure \ + --prefix=%{_sysconfdir}/httpd \ + --exec-prefix=%{_prefix} \ + --bindir=%{_bindir} \ + --sbindir=%{_sbindir} \ + --mandir=%{_mandir} \ + --libdir=%{_libdir} \ + --sysconfdir=%{_sysconfdir}/httpd/conf \ + --includedir=%{_includedir}/httpd \ + --libexecdir=%{_libdir}/httpd/modules \ + --datadir=%{contentdir} \ + --enable-layout=Fedora \ + --with-installbuilddir=%{_libdir}/httpd/build \ + --enable-mpms-shared=all \ + --with-apr=%{_prefix} --with-apr-util=%{_prefix} \ + --enable-suexec --with-suexec \ + --enable-suexec-capabilities \ + --with-suexec-caller=%{suexec_caller} \ + --with-suexec-docroot=%{docroot} \ + --without-suexec-logfile \ + --with-suexec-syslog \ + --with-suexec-bin=%{_sbindir}/suexec \ + --with-suexec-uidmin=500 --with-suexec-gidmin=100 \ + --enable-pie \ + --with-pcre \ + --enable-mods-shared=all \ + --enable-ssl --with-ssl --disable-distcache \ + --enable-proxy \ + --enable-cache \ + --enable-disk-cache \ + --enable-ldap --enable-authnz-ldap \ + --enable-cgid --enable-cgi \ + --enable-authn-anon --enable-authn-alias \ + --disable-imagemap \ + $* +make %{?_smp_mflags} + +%install +rm -rf $RPM_BUILD_ROOT + +make DESTDIR=$RPM_BUILD_ROOT install + +# Install systemd service files +mkdir -p $RPM_BUILD_ROOT%{_unitdir} +for s in httpd htcacheclean; do + install -p -m 644 $RPM_SOURCE_DIR/${s}.service \ + $RPM_BUILD_ROOT%{_unitdir}/${s}.service +done + +# install conf file/directory +mkdir $RPM_BUILD_ROOT%{_sysconfdir}/httpd/conf.d \ + $RPM_BUILD_ROOT%{_sysconfdir}/httpd/conf.modules.d +install -m 644 $RPM_SOURCE_DIR/README.confd \ + $RPM_BUILD_ROOT%{_sysconfdir}/httpd/conf.d/README +for f in 00-base.conf 00-mpm.conf 00-lua.conf 01-cgi.conf 00-dav.conf \ + 00-proxy.conf 00-ssl.conf 01-ldap.conf 00-proxyhtml.conf \ + 01-ldap.conf 00-systemd.conf 01-session.conf; do + install -m 644 -p $RPM_SOURCE_DIR/$f \ + $RPM_BUILD_ROOT%{_sysconfdir}/httpd/conf.modules.d/$f +done + +for f in welcome.conf ssl.conf manual.conf userdir.conf; do + install -m 644 -p $RPM_SOURCE_DIR/$f \ + $RPM_BUILD_ROOT%{_sysconfdir}/httpd/conf.d/$f +done + +# Split-out extra config shipped as default in conf.d: +for f in autoindex; do + mv docs/conf/extra/httpd-${f}.conf \ + $RPM_BUILD_ROOT%{_sysconfdir}/httpd/conf.d/${f}.conf +done + +# Extra config trimmed: +rm -v docs/conf/extra/httpd-{ssl,userdir}.conf + +rm $RPM_BUILD_ROOT%{_sysconfdir}/httpd/conf/*.conf +install -m 644 -p $RPM_SOURCE_DIR/httpd.conf \ + $RPM_BUILD_ROOT%{_sysconfdir}/httpd/conf/httpd.conf + +mkdir $RPM_BUILD_ROOT%{_sysconfdir}/sysconfig +for s in httpd htcacheclean; do + install -m 644 -p $RPM_SOURCE_DIR/${s}.sysconf \ + $RPM_BUILD_ROOT%{_sysconfdir}/sysconfig/${s} +done + +# tmpfiles.d configuration +mkdir -p $RPM_BUILD_ROOT%{_prefix}/lib/tmpfiles.d +install -m 644 -p $RPM_SOURCE_DIR/httpd.tmpfiles \ + $RPM_BUILD_ROOT%{_prefix}/lib/tmpfiles.d/httpd.conf + +# Other directories +mkdir -p $RPM_BUILD_ROOT%{_localstatedir}/lib/dav \ + $RPM_BUILD_ROOT/run/httpd/htcacheclean + +# Create cache directory +mkdir -p $RPM_BUILD_ROOT%{_localstatedir}/cache/httpd \ + $RPM_BUILD_ROOT%{_localstatedir}/cache/httpd/proxy \ + $RPM_BUILD_ROOT%{_localstatedir}/cache/httpd/ssl + +# Make the MMN accessible to module packages +echo %{mmnisa} > $RPM_BUILD_ROOT%{_includedir}/httpd/.mmn +mkdir -p $RPM_BUILD_ROOT%{_sysconfdir}/rpm +cat > $RPM_BUILD_ROOT%{_sysconfdir}/rpm/macros.httpd < $RPM_BUILD_ROOT%{_mandir}/man8/httpd.8 + +# Make ap_config_layout.h libdir-agnostic +sed -i '/.*DEFAULT_..._LIBEXECDIR/d;/DEFAULT_..._INSTALLBUILDDIR/d' \ + $RPM_BUILD_ROOT%{_includedir}/httpd/ap_config_layout.h + +# Fix path to instdso in special.mk +sed -i '/instdso/s,top_srcdir,top_builddir,' \ + $RPM_BUILD_ROOT%{_libdir}/httpd/build/special.mk + +# Remove unpackaged files +rm -vf \ + $RPM_BUILD_ROOT%{_libdir}/*.exp \ + $RPM_BUILD_ROOT/etc/httpd/conf/mime.types \ + $RPM_BUILD_ROOT%{_libdir}/httpd/modules/*.exp \ + $RPM_BUILD_ROOT%{_libdir}/httpd/build/config.nice \ + $RPM_BUILD_ROOT%{_bindir}/{ap?-config,dbmmanage} \ + $RPM_BUILD_ROOT%{_sbindir}/{checkgid,envvars*} \ + $RPM_BUILD_ROOT%{contentdir}/htdocs/* \ + $RPM_BUILD_ROOT%{_mandir}/man1/dbmmanage.* \ + $RPM_BUILD_ROOT%{contentdir}/cgi-bin/* + +rm -rf $RPM_BUILD_ROOT/etc/httpd/conf/{original,extra} + +%pre +# Add the "apache" group and user +/usr/sbin/groupadd -g 48 -r apache 2> /dev/null || : +/usr/sbin/useradd -c "Apache" -u 48 -g apache \ + -s /sbin/nologin -r -d %{contentdir} apache 2> /dev/null || : + +%post +%systemd_post httpd.service htcacheclean.service + +%preun +%systemd_preun httpd.service htcacheclean.service + +%postun +%systemd_postun + +# Trigger for conversion from SysV, per guidelines at: +# https://fedoraproject.org/wiki/Packaging:ScriptletSnippets#Systemd +%triggerun -- httpd < 2.2.21-5 +# Save the current service runlevel info +# User must manually run systemd-sysv-convert --apply httpd +# to migrate them to systemd targets +/usr/bin/systemd-sysv-convert --save httpd.service >/dev/null 2>&1 ||: + +# Run these because the SysV package being removed won't do them +/sbin/chkconfig --del httpd >/dev/null 2>&1 || : + +%posttrans +test -f /etc/sysconfig/httpd-disable-posttrans || \ + /bin/systemctl try-restart httpd.service htcacheclean.service >/dev/null 2>&1 || : + +%define sslcert %{_sysconfdir}/pki/tls/certs/localhost.crt +%define sslkey %{_sysconfdir}/pki/tls/private/localhost.key + +%post -n mod_ssl +umask 077 + +if [ -f %{sslkey} -o -f %{sslcert} ]; then + exit 0 +fi + +%{_bindir}/openssl genrsa -rand /proc/apm:/proc/cpuinfo:/proc/dma:/proc/filesystems:/proc/interrupts:/proc/ioports:/proc/pci:/proc/rtc:/proc/uptime 2048 > %{sslkey} 2> /dev/null + +FQDN=`hostname` +if [ "x${FQDN}" = "x" -o ${#FQDN} -gt 59 ]; then + FQDN=localhost.localdomain +fi + +cat << EOF | %{_bindir}/openssl req -new -key %{sslkey} \ + -x509 -sha256 -days 365 -set_serial $RANDOM -extensions v3_req \ + -out %{sslcert} 2>/dev/null +-- +SomeState +SomeCity +SomeOrganization +SomeOrganizationalUnit +${FQDN} +root@${FQDN} +EOF + +%check +# Check the built modules are all PIC +if readelf -d $RPM_BUILD_ROOT%{_libdir}/httpd/modules/*.so | grep TEXTREL; then + : modules contain non-relocatable code + exit 1 +fi + +%clean +rm -rf $RPM_BUILD_ROOT + +%files +%defattr(-,root,root) + +%doc ABOUT_APACHE README CHANGES LICENSE VERSIONING NOTICE +%doc docs/conf/extra/*.conf + +%dir %{_sysconfdir}/httpd +%{_sysconfdir}/httpd/modules +%{_sysconfdir}/httpd/logs +%{_sysconfdir}/httpd/run +%dir %{_sysconfdir}/httpd/conf +%config(noreplace) %{_sysconfdir}/httpd/conf/httpd.conf +%config(noreplace) %{_sysconfdir}/httpd/conf/magic + +%config(noreplace) %{_sysconfdir}/logrotate.d/httpd + +%dir %{_sysconfdir}/httpd/conf.d +%{_sysconfdir}/httpd/conf.d/README +%config(noreplace) %{_sysconfdir}/httpd/conf.d/*.conf +%exclude %{_sysconfdir}/httpd/conf.d/ssl.conf +%exclude %{_sysconfdir}/httpd/conf.d/manual.conf + +%dir %{_sysconfdir}/httpd/conf.modules.d +%config(noreplace) %{_sysconfdir}/httpd/conf.modules.d/*.conf +%exclude %{_sysconfdir}/httpd/conf.modules.d/00-ssl.conf +%exclude %{_sysconfdir}/httpd/conf.modules.d/00-proxyhtml.conf +%exclude %{_sysconfdir}/httpd/conf.modules.d/01-ldap.conf +%exclude %{_sysconfdir}/httpd/conf.modules.d/01-session.conf + +%config(noreplace) %{_sysconfdir}/sysconfig/ht* +%{_prefix}/lib/tmpfiles.d/httpd.conf + +%dir %{_libexecdir}/initscripts/legacy-actions/httpd +%{_libexecdir}/initscripts/legacy-actions/httpd/* + +%{_sbindir}/ht* +%{_sbindir}/fcgistarter +%{_sbindir}/apachectl +%{_sbindir}/rotatelogs +%caps(cap_setuid,cap_setgid+pe) %attr(510,root,%{suexec_caller}) %{_sbindir}/suexec + +%dir %{_libdir}/httpd +%dir %{_libdir}/httpd/modules +%{_libdir}/httpd/modules/mod*.so +%exclude %{_libdir}/httpd/modules/mod_auth_form.so +%exclude %{_libdir}/httpd/modules/mod_ssl.so +%exclude %{_libdir}/httpd/modules/mod_*ldap.so +%exclude %{_libdir}/httpd/modules/mod_proxy_html.so +%exclude %{_libdir}/httpd/modules/mod_xml2enc.so +%exclude %{_libdir}/httpd/modules/mod_session*.so + +%dir %{contentdir} +%dir %{contentdir}/icons +%dir %{contentdir}/error +%dir %{contentdir}/error/include +%dir %{contentdir}/noindex +%{contentdir}/icons/* +%{contentdir}/error/README +%{contentdir}/error/*.var +%{contentdir}/error/include/*.html +%{contentdir}/noindex/index.html + +%dir %{docroot} +%dir %{docroot}/cgi-bin +%dir %{docroot}/html + +%attr(0710,root,apache) %dir /run/httpd +%attr(0700,apache,apache) %dir /run/httpd/htcacheclean +%attr(0700,root,root) %dir %{_localstatedir}/log/httpd +%attr(0700,apache,apache) %dir %{_localstatedir}/lib/dav +%attr(0700,apache,apache) %dir %{_localstatedir}/cache/httpd +%attr(0700,apache,apache) %dir %{_localstatedir}/cache/httpd/proxy + +%{_mandir}/man8/* + +%{_unitdir}/*.service + +%files tools +%defattr(-,root,root) +%{_bindir}/* +%{_mandir}/man1/* +%doc LICENSE NOTICE +%exclude %{_bindir}/apxs +%exclude %{_mandir}/man1/apxs.1* + +%files manual +%defattr(-,root,root) +%{contentdir}/manual +%config(noreplace) %{_sysconfdir}/httpd/conf.d/manual.conf + +%files -n mod_ssl +%defattr(-,root,root) +%{_libdir}/httpd/modules/mod_ssl.so +%config(noreplace) %{_sysconfdir}/httpd/conf.modules.d/00-ssl.conf +%config(noreplace) %{_sysconfdir}/httpd/conf.d/ssl.conf +%attr(0700,apache,root) %dir %{_localstatedir}/cache/httpd/ssl +%{_libexecdir}/httpd-ssl-pass-dialog + +%files -n mod_proxy_html +%defattr(-,root,root) +%{_libdir}/httpd/modules/mod_proxy_html.so +%{_libdir}/httpd/modules/mod_xml2enc.so +%config(noreplace) %{_sysconfdir}/httpd/conf.modules.d/00-proxyhtml.conf + +%files -n mod_ldap +%defattr(-,root,root) +%{_libdir}/httpd/modules/mod_*ldap.so +%config(noreplace) %{_sysconfdir}/httpd/conf.modules.d/01-ldap.conf + +%files -n mod_session +%defattr(-,root,root) +%{_libdir}/httpd/modules/mod_session*.so +%{_libdir}/httpd/modules/mod_auth_form.so +%config(noreplace) %{_sysconfdir}/httpd/conf.modules.d/01-session.conf + +%files devel +%defattr(-,root,root) +%{_includedir}/httpd +%{_bindir}/apxs +%{_mandir}/man1/apxs.1* +%dir %{_libdir}/httpd/build +%{_libdir}/httpd/build/*.mk +%{_libdir}/httpd/build/*.sh +%{_sysconfdir}/rpm/macros.httpd + +%changelog +* Thu Aug 22 2019 Joe Orton - 2.4.6-92 +- htpasswd: add SHA-2 crypt() support (#1486889) + +* Wed Jul 31 2019 Lubos Uhliarik - 2.4.6-91 +- Resolves: #1630886 - scriptlet can fail if hostname is not installed +- Resolves: #1565465 - CVE-2017-15710 httpd: Out of bound write in + mod_authnz_ldap when using too small Accept-Language values +- Resolves: #1568298 - CVE-2018-1301 httpd: Out of bounds access after + failure in reading the HTTP request +- Resolves: #1673457 - Apache child process crashes because ScriptAliasMatch + directive +- Resolves: #1633152 - mod_session missing apr-util-openssl +- Resolves: #1649470 - httpd response contains garbage in Content-Type header +- Resolves: #1724034 - Unexpected OCSP in proxy SSL connection + +* Sat Jun 08 2019 Lubos Uhliarik - 2.4.6-90 +- Resolves: #1566317 - CVE-2018-1312 httpd: Weak Digest auth nonce generation + in mod_auth_digest +- Resolves: #1696141 - CVE-2019-0217 httpd: mod_auth_digest: access control + bypass due to race condition +- Resolves: #1696096 - CVE-2019-0220 httpd: URL normalization inconsistency + +* Fri Mar 15 2019 Joe Orton - 2.4.6-89 +- fix per-request leak of bucket brigade structure (#1583218) + +* Thu Jun 21 2018 Luboš Uhliarik - 2.4.6-88 +- Resolves: #1527295 - httpd with worker/event mpm segfaults after multiple + SIGUSR1 + +* Thu Jun 21 2018 Luboš Uhliarik - 2.4.6-87 +- Resolves: #1458364 - RMM list corruption in ldap module results in server hang + +* Thu Jun 21 2018 Luboš Uhliarik - 2.4.6-86 +- Resolves: #1493181 - RFE: mod_ssl: allow sending multiple CA names which + differ only in case + +* Wed Jun 20 2018 Luboš Uhliarik - 2.4.6-85 +- Resolves: #1556761 - mod_proxy_wstunned config needs the default port number + +* Mon Jun 18 2018 Luboš Uhliarik - 2.4.6-84 +- Resolves: #1548501 - Make OCSP more configurable (like CRL) + +* Mon Jun 11 2018 Luboš Uhliarik - 2.4.6-83 +- Resolves: #1523536 - Backport Apache BZ#59230 mod_proxy_express uses db + after close + +* Mon Jun 11 2018 Luboš Uhliarik - 2.4.6-82 +- Resolves: #1533793 - Use Variable with mod_authnz_ldap + +* Mon Mar 26 2018 Joe Orton - 2.4.6-81 +- don't terminate connections during graceful stop/restart (#1557785) + +* Mon Jan 08 2018 Luboš Uhliarik - 2.4.6-80 +- Related: #1288395 - httpd segfault when logrotate invoked + +* Wed Nov 01 2017 Luboš Uhliarik - 2.4.6-79 +- Resolves: #1274890 - mod_ssl config: tighten defaults + +* Tue Oct 31 2017 Luboš Uhliarik - 2.4.6-78 +- Resolves: #1506392 - Backport: SSLSessionTickets directive support + +* Mon Oct 16 2017 Luboš Uhliarik - 2.4.6-77 +- Resolves: #1440590 - Need an option to disable UTF8-conversion + of certificate DN + +* Thu Oct 12 2017 Luboš Uhliarik - 2.4.6-76 +- Resolves: #1464406 - Apache consumes too much memory for CGI output + +* Thu Oct 12 2017 Luboš Uhliarik - 2.4.6-75 +- Resolves: #1448892 - Cannot override LD_LIBARY_PATH in Apache HTTPD + using SetEnv or PassEnv. Needs documentation. + +* Mon Oct 09 2017 Luboš Uhliarik - 2.4.6-74 +- Resolves: #1430640 - "ProxyAddHeaders Off" does not become effective + when it's defined outside setting + +* Fri Oct 06 2017 Luboš Uhliarik - 2.4.6-73 +- Resolves: #1499253 - ProxyRemote with HTTPS backend sends requests + with absoluteURI instead of abs_path + +* Tue Oct 03 2017 Luboš Uhliarik - 2.4.6-72 +- Resolves: #1288395 - httpd segfault when logrotate invoked + +* Tue Oct 03 2017 Luboš Uhliarik - 2.4.6-71 +- Resolves: #1368491 - mod_authz_dbd segfaults when AuthzDBDQuery missing + +* Mon Oct 02 2017 Luboš Uhliarik - 2.4.6-70 +- Resolves: #1467402 - rotatelogs: creation of zombie processes when -p is used + +* Tue Sep 19 2017 Luboš Uhliarik - 2.4.6-69 +- Resolves: #1493065 - CVE-2017-9798 httpd: Use-after-free by limiting + unregistered HTTP method + +* Tue Jul 25 2017 Luboš Uhliarik - 2.4.6-68 +- Resolves: #1463194 - CVE-2017-3167 httpd: ap_get_basic_auth_pw() + authentication bypass +- Resolves: #1463197 - CVE-2017-3169 httpd: mod_ssl NULL pointer dereference +- Resolves: #1463207 - CVE-2017-7679 httpd: mod_mime buffer overread +- Resolves: #1463205 - CVE-2017-7668 httpd: ap_find_token() buffer overread +- Resolves: #1470748 - CVE-2017-9788 httpd: Uninitialized memory reflection + in mod_auth_digest + +* Tue May 09 2017 Luboš Uhliarik - 2.4.6-67 +- Related: #1332242 - Explicitly disallow the '#' character in allow,deny + directives + +* Tue May 09 2017 Luboš Uhliarik - 2.4.6-66 +- Related: #1332242 - Explicitly disallow the '#' character in allow,deny + directives + +* Thu Apr 27 2017 Luboš Uhliarik - 2.4.6-65 +- Resolves: #1445885 - define _RH_HAS_HTTPPROTOCOLOPTIONS + +* Tue Apr 18 2017 Luboš Uhliarik - 2.4.6-64 +- Resolves: #1442872 - apache user is not created during httpd installation + when apache group already exist with GID other than 48 + +* Wed Mar 22 2017 Luboš Uhliarik - 2.4.6-63 +- Related: #1412976 - CVE-2016-0736 CVE-2016-2161 CVE-2016-8743 + httpd: various flaws + +* Wed Mar 15 2017 Luboš Uhliarik - 2.4.6-62 +- Resolves: #1397241 - Backport Apache Bug 53098 - mod_proxy_ajp: + patch to set worker secret passed to tomcat + +* Wed Mar 15 2017 Luboš Uhliarik - 2.4.6-61 +- Related: #1414258 - Crash during restart or at startup in mod_ssl, + in certinfo_free() function registered by ssl_stapling_ex_init() + +* Tue Mar 14 2017 Luboš Uhliarik - 2.4.6-60 +- Resolves: #1414258 - Crash during restart or at startup in mod_ssl, + in certinfo_free() function registered by ssl_stapling_ex_init() + +* Mon Mar 13 2017 Luboš Uhliarik - 2.4.6-59 +- Resolves: #1378946 - Backport of apache bug 55910: Continuation lines + are broken during buffer resize + +* Fri Mar 10 2017 Luboš Uhliarik - 2.4.6-58 +- Resolves: #1364604 - Upstream Bug 56925 - ErrorDocument directive misbehaves + with mod_proxy_http and mod_proxy_ajp + +* Thu Mar 09 2017 Luboš Uhliarik - 2.4.6-57 +- Resolves: #1324416 - Error 404 when switching language in HTML manual + more than once + +* Wed Mar 08 2017 Luboš Uhliarik - 2.4.6-56 +- Resolves: #1353740 - Backport Apache PR58118 to fix mod_proxy_fcgi + spamming non-errors: AH01075: Error dispatching request to : (passing + brigade to output filters) + +* Wed Mar 08 2017 Luboš Uhliarik - 2.4.6-55 +- Resolves: #1371876 - Apache httpd returns "200 OK" for a request + exceeding LimitRequestBody when enabling mod_ext_filter + +* Tue Mar 07 2017 Luboš Uhliarik - 2.4.6-54 +- Resolves: #1372692 - Apache httpd does not log status code "413" in + access_log when exceeding LimitRequestBody + +* Tue Mar 07 2017 Luboš Uhliarik - 2.4.6-53 +- Resolves: #1376835 - httpd with worker/event mpm segfaults after multiple + successive graceful reloads + +* Tue Mar 07 2017 Luboš Uhliarik - 2.4.6-52 +- Resolves: #1332242 - Explicitly disallow the '#' character in allow,deny + directives + +* Mon Mar 06 2017 Luboš Uhliarik - 2.4.6-51 +- Resolves: #1396197 - Backport: mod_proxy_wstunnel - AH02447: err/hup + on backconn + +* Mon Mar 06 2017 Luboš Uhliarik - 2.4.6-50 +- Resolves: #1348019 - mod_proxy: Fix a race condition that caused a failed + worker to be retried before the retry period is over + +* Mon Mar 06 2017 Luboš Uhliarik - 2.4.6-49 +- Resolves: #1389535 - Segmentation fault in SSL_renegotiate + +* Mon Mar 06 2017 Luboš Uhliarik - 2.4.6-48 +- Resolves: #1406184 - stapling_renew_response: abort early + (before apr_uri_parse) if ocspuri is empty + +* Tue Feb 7 2017 Joe Orton - 2.4.6-47 +- prefork: fix delay completing graceful restart (#1327624) +- mod_ldap: fix authz regression, failing to rebind (#1415257) + +* Thu Jan 26 2017 Luboš Uhliarik - 2.4.6-46 +- Resolves: #1412976 - CVE-2016-0736 CVE-2016-2161 CVE-2016-8743 + httpd: various flaws + +* Wed Aug 03 2016 Luboš Uhliarik - 2.4.6-45 +- RFE: run mod_rewrite external mapping program as non-root (#1316900) + +* Tue Jul 12 2016 Joe Orton - 2.4.6-44 +- add security fix for CVE-2016-5387 + +* Tue Jul 5 2016 Joe Orton - 2.4.6-43 +- add 451 (Unavailable For Legal Reasons) response status-code (#1343582) + +* Fri Jun 17 2016 Joe Orton - 2.4.6-42 +- mod_cache: treat cache as valid with changed Expires in 304 (#1331341) + +* Wed Feb 24 2016 Jan Kaluza - 2.4.6-41 +- mod_cache: merge r->err_headers_out into r->headers when the response + is cached for the first time (#1264989) +- mod_ssl: Do not send SSL warning when SNI hostname is not found as per + RFC 6066 (#1298148) +- mod_proxy_fcgi: Ignore body data from backend for 304 responses (#1263038) +- fix apache user creation when apache group already exists (#1299889) +- fix apache user creation when USERGROUPS_ENAB is set to 'no' (#1288757) +- mod_proxy: fix slow response time for reponses with error status code + when using ProxyErrorOverride (#1283653) +- mod_ldap: Respect LDAPConnectionPoolTTL for authn connections (#1300149) +- mod_ssl: use "localhost" in the dummy SSL cert for long FQDNs (#1240495) +- rotatelogs: improve support for localtime (#1244545) +- ab: fix read failure when targeting SSL server (#1255331) +- mod_log_debug: fix LogMessage example in documentation (#1279465) +- mod_authz_dbd, mod_authn_dbd, mod_session_dbd, mod_rewrite: Fix lifetime + of DB lookup entries independently of the selected DB engine (#1287844) +- mod_ssl: fix hardware crypto support with custom DH parms (#1291865) +- mod_proxy_fcgi: fix SCRIPT_FILENAME when a balancer is used (#1302797) + +* Thu Sep 17 2015 Jan Kaluza - 2.4.6-40 +- mod_dav: follow up fix for previous commit (#1263975) + +* Wed Aug 26 2015 Jan Kaluza - 2.4.6-39 +- mod_dav: treat dav_resource uri as escaped (#1255480) + +* Wed Aug 19 2015 Jan Kaluza - 2.4.6-38 +- mod_ssl: add support for User Principal Name in SSLUserName (#1242503) + +* Mon Aug 10 2015 Jan Kaluza - 2.4.6-37 +- core: fix chunk header parsing defect (CVE-2015-3183) +- core: replace of ap_some_auth_required with ap_some_authn_required + and ap_force_authn hook (CVE-2015-3185) + +* Tue Jul 14 2015 Jan Kaluza - 2.4.6-36 +- Revert fix for #1162152, it is not needed in RHEL7 +- mod_proxy_ajp: fix settings ProxyPass parameters for AJP backends (#1242416) + +* Wed Jul 01 2015 Jan Kaluza - 2.4.6-35 +- mod_remoteip: correct the trusted proxy match test (#1179306) +- mod_dav: send complete response when resource is created (#1235383) +- apachectl: correct the apachectl status man page (#1231924) + +* Wed Jun 03 2015 Jan Kaluza - 2.4.6-34 +- mod_proxy_fcgi: honor Timeout / ProxyTimeout (#1222328) +- do not show all vhosts twice in httpd -D DUMP_VHOSTS output (#1225820) +- fix -D[efined] or [d] variables lifetime accross restarts (#1227219) +- mod_ssl: do not send NPN extension with not configured (#1226015) + +* Mon May 18 2015 Jan Kaluza - 2.4.6-33 +- mod_authz_dbm: fix crash when using "Require dbm-file-group" (#1221575) + +* Wed Apr 15 2015 Jan Kaluza - 2.4.6-32 +- mod_authn_dbd: fix use-after-free bug with postgresql (#1188779) +- mod_remoteip: correct the trusted proxy match test (#1179306) +- mod_status: honor remote_ip as documented (#1169081) +- mod_deflate: fix decompression of files larger than 4GB (#1170214) +- core: improve error message for inaccessible DocumentRoot (#1170220) +- ab: try all addresses instead of failing on first one when not available (#1125276) +- mod_proxy_wstunnel: add support for SSL (#1180745) +- mod_proxy_wstunnel: load this module by default (#1180745) +- mod_rewrite: add support for WebSockets (#1180745) +- mod_rewrite: do not search for directory if a URL will be rewritten (#1210091) +- mod_ssl: Fix SSL_CLIENT_VERIFY value when optional_no_ca and SSLSessionCache + are used and SSL session is resumed (#1170206) +- mod_ssl: fix memory leak on httpd reloads (#1181690) +- mod_ssl: use SSLCipherSuite HIGH:MEDIUM:!aNULL:!MD5:!SEED:!IDEA (#1118476) +- mod_cgi: return error code 408 on timeout (#1162152) +- mod_dav_fs: set default value of DAVLockDB (#1176449) +- add Documentation= to the httpd.service and htcacheclean.service (#1184118) +- do not display "bomb" icon for files ending with "core" (#1170215) +- add missing Reason-Phrase in HTTP response headers (#1162159) +- fix BuildRequires to require openssl-devel >= 1:1.0.1e-37 (#1160625) +- apachectl: ignore HTTPD variable from sysconfig (#1214401) +- apachectl: fix "graceful" documentation (#1214398) +- apachectl: fix "graceful" behaviour when httpd is not running (#1214430) + +* Tue Dec 02 2014 Jan Kaluza - 2.4.6-31 +- mod_proxy_fcgi: determine if FCGI_CONN_CLOSE should be enabled + instead of hardcoding it (#1168050) +- mod_proxy: support Unix Domain Sockets (#1168081) + +* Tue Nov 25 2014 Jan Kaluza - 2.4.6-30 +- core: fix bypassing of mod_headers rules via chunked requests (CVE-2013-5704) +- mod_cache: fix NULL pointer dereference on empty Content-Type (CVE-2014-3581) + +* Tue Nov 04 2014 Jan Kaluza - 2.4.6-29 +- rebuild against proper version of OpenSSL (#1080125) + +* Wed Oct 22 2014 Jan Kaluza - 2.4.6-28 +- set vstring based on /etc/os-release (#1114123) + +* Mon Oct 06 2014 Jan Kaluza - 2.4.6-27 +- fix the dependency on openssl-libs to match the fix for #1080125 + +* Mon Sep 22 2014 Jan Kaluza - 2.4.6-26 +- allow 'es to be seen under virtual hosts (#1131847) + +* Fri Sep 19 2014 Jan Kaluza - 2.4.6-25 +- do not use hardcoded curve for ECDHE suites (#1080125) + +* Wed Sep 03 2014 Jan Kaluza - 2.4.6-24 +- allow reverse-proxy to be set via SetHandler (#1136290) + +* Thu Aug 21 2014 Jan Kaluza - 2.4.6-23 +- fix possible crash in SIGINT handling (#1131006) + +* Mon Aug 18 2014 Jan Kaluza - 2.4.6-22 +- ab: fix integer overflow when printing stats with lot of requests (#1092420) + +* Mon Aug 11 2014 Jan Kaluza - 2.4.6-21 +- add pre_htaccess so mpm-itk can be build as separate module (#1059143) + +* Tue Aug 05 2014 Jan Kaluza - 2.4.6-20 +- mod_ssl: prefer larger keys and support up to 8192-bit keys (#1073078) + +* Mon Aug 04 2014 Jan Kaluza - 2.4.6-19 +- fix build on ppc64le by using configure macro (#1125545) +- compile httpd with -O3 on ppc64le (#1123490) +- mod_rewrite: expose CONN_REMOTE_ADDR (#1060536) + +* Thu Jul 17 2014 Jan Kaluza - 2.4.6-18 +- mod_cgid: add security fix for CVE-2014-0231 (#1120608) +- mod_proxy: add security fix for CVE-2014-0117 (#1120608) +- mod_deflate: add security fix for CVE-2014-0118 (#1120608) +- mod_status: add security fix for CVE-2014-0226 (#1120608) +- mod_cache: add secutiry fix for CVE-2013-4352 (#1120608) + +* Thu Mar 20 2014 Jan Kaluza - 2.4.6-17 +- mod_dav: add security fix for CVE-2013-6438 (#1077907) +- mod_log_config: add security fix for CVE-2014-0098 (#1077907) + +* Wed Mar 5 2014 Joe Orton - 2.4.6-16 +- mod_ssl: improve DH temp key handling (#1057687) + +* Wed Mar 5 2014 Joe Orton - 2.4.6-15 +- mod_ssl: use 2048-bit RSA key with SHA-256 signature in dummy certificate (#1071276) + +* Fri Jan 24 2014 Daniel Mach - 2.4.6-14 +- Mass rebuild 2014-01-24 + +* Mon Jan 13 2014 Joe Orton - 2.4.6-13 +- mod_ssl: sanity-check use of "SSLCompression" (#1036666) +- mod_proxy_http: fix brigade memory usage (#1040447) + +* Fri Jan 10 2014 Joe Orton - 2.4.6-12 +- rebuild + +* Thu Jan 9 2014 Joe Orton - 2.4.6-11 +- build with -O3 on ppc64 (#1051066) + +* Tue Jan 7 2014 Joe Orton - 2.4.6-10 +- mod_dav: fix locktoken handling (#1004046) + +* Fri Dec 27 2013 Daniel Mach - 2.4.6-9 +- Mass rebuild 2013-12-27 + +* Fri Dec 20 2013 Joe Orton - 2.4.6-8 +- use unambiguous httpd-mmn (#1029360) + +* Fri Nov 1 2013 Jan Kaluza - 2.4.6-7 +- mod_ssl: allow SSLEngine to override Listen-based default (#1023168) + +* Thu Oct 31 2013 Jan Kaluza - 2.4.6-6 +- systemd: Use {MAINPID} notation in service file (#969972) + +* Thu Oct 24 2013 Jan Kaluza - 2.4.6-5 +- systemd: send SIGWINCH signal without httpd -k in ExecStop (#969972) + +* Thu Oct 03 2013 Jan Kaluza - 2.4.6-4 +- expand macros in macros.httpd (#1011393) + +* Mon Aug 26 2013 Jan Kaluza - 2.4.6-3 +- fix "LDAPReferrals off" to really disable LDAP Referrals + +* Wed Jul 31 2013 Jan Kaluza - 2.4.6-2 +- revert fix for dumping vhosts twice + +* Mon Jul 22 2013 Joe Orton - 2.4.6-1 +- update to 2.4.6 +- mod_ssl: use revised NPN API (r1487772) + +* Thu Jul 11 2013 Jan Kaluza - 2.4.4-12 +- mod_unique_id: replace use of hostname + pid with PRNG output (#976666) +- apxs: mention -p option in manpage + +* Tue Jul 2 2013 Joe Orton - 2.4.4-11 +- add patch for aarch64 (Dennis Gilmore, #925558) + +* Mon Jul 1 2013 Joe Orton - 2.4.4-10 +- remove duplicate apxs man page from httpd-tools + +* Mon Jun 17 2013 Joe Orton - 2.4.4-9 +- remove zombie dbmmanage script + +* Fri May 31 2013 Jan Kaluza - 2.4.4-8 +- return 400 Bad Request on malformed Host header + +* Mon May 20 2013 Jan Kaluza - 2.4.4-6 +- htpasswd/htdbm: fix hash generation bug (#956344) +- do not dump vhosts twice in httpd -S output (#928761) +- mod_cache: fix potential crash caused by uninitialized variable (#954109) + +* Thu Apr 18 2013 Jan Kaluza - 2.4.4-5 +- execute systemctl reload as result of apachectl graceful +- mod_ssl: ignore SNI hints unless required by config +- mod_cache: forward-port CacheMaxExpire "hard" option +- mod_ssl: fall back on another module's proxy hook if mod_ssl proxy + is not configured. + +* Tue Apr 16 2013 Jan Kaluza - 2.4.4-4 +- fix service file to not send SIGTERM after ExecStop (#906321, #912288) + +* Tue Mar 26 2013 Jan Kaluza - 2.4.4-3 +- protect MIMEMagicFile with IfModule (#893949) + +* Tue Feb 26 2013 Joe Orton - 2.4.4-2 +- really package mod_auth_form in mod_session (#915438) + +* Tue Feb 26 2013 Joe Orton - 2.4.4-1 +- update to 2.4.4 +- fix duplicate ownership of mod_session config (#914901) + +* Fri Feb 22 2013 Joe Orton - 2.4.3-17 +- add mod_session subpackage, move mod_auth_form there (#894500) + +* Thu Feb 14 2013 Fedora Release Engineering - 2.4.3-16 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_19_Mass_Rebuild + +* Tue Jan 8 2013 Joe Orton - 2.4.3-15 +- add systemd service for htcacheclean + +* Tue Nov 13 2012 Joe Orton - 2.4.3-14 +- drop patch for r1344712 + +* Tue Nov 13 2012 Joe Orton - 2.4.3-13 +- filter mod_*.so auto-provides (thanks to rcollet) +- pull in syslog logging fix from upstream (r1344712) + +* Fri Oct 26 2012 Joe Orton - 2.4.3-12 +- rebuild to pick up new apr-util-ldap + +* Tue Oct 23 2012 Joe Orton - 2.4.3-11 +- rebuild + +* Wed Oct 3 2012 Joe Orton - 2.4.3-10 +- pull upstream patch r1392850 in addition to r1387633 + +* Mon Oct 1 2012 Joe Orton - 2.4.3-9.1 +- restore "ServerTokens Full-Release" support (#811714) + +* Mon Oct 1 2012 Joe Orton - 2.4.3-9 +- define PLATFORM in os.h using vendor string + +* Mon Oct 1 2012 Joe Orton - 2.4.3-8 +- use systemd script unconditionally (#850149) + +* Mon Oct 1 2012 Joe Orton - 2.4.3-7 +- use systemd scriptlets if available (#850149) +- don't run posttrans restart if /etc/sysconfig/httpd-disable-posttrans exists + +* Mon Oct 01 2012 Jan Kaluza - 2.4.3-6 +- use systemctl from apachectl (#842736) + +* Wed Sep 19 2012 Joe Orton - 2.4.3-5 +- fix some error log spam with graceful-stop (r1387633) +- minor mod_systemd tweaks + +* Thu Sep 13 2012 Joe Orton - 2.4.3-4 +- use IncludeOptional for conf.d/*.conf inclusion + +* Fri Sep 07 2012 Jan Kaluza - 2.4.3-3 +- adding mod_systemd to integrate with systemd better + +* Tue Aug 21 2012 Joe Orton - 2.4.3-2 +- mod_ssl: add check for proxy keypair match (upstream r1374214) + +* Tue Aug 21 2012 Joe Orton - 2.4.3-1 +- update to 2.4.3 (#849883) +- own the docroot (#848121) + +* Mon Aug 6 2012 Joe Orton - 2.4.2-23 +- add mod_proxy fixes from upstream (r1366693, r1365604) + +* Thu Jul 19 2012 Fedora Release Engineering - 2.4.2-22 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_18_Mass_Rebuild + +* Fri Jul 6 2012 Joe Orton - 2.4.2-21 +- drop explicit version requirement on initscripts + +* Thu Jul 5 2012 Joe Orton - 2.4.2-20 +- mod_ext_filter: fix error_log warnings + +* Mon Jul 2 2012 Joe Orton - 2.4.2-19 +- support "configtest" and "graceful" as initscripts "legacy actions" + +* Fri Jun 8 2012 Joe Orton - 2.4.2-18 +- avoid use of "core" GIF for a "core" directory (#168776) +- drop use of "syslog.target" in systemd unit file + +* Thu Jun 7 2012 Joe Orton - 2.4.2-17 +- use _unitdir for systemd unit file +- use /run in unit file, ssl.conf + +* Thu Jun 7 2012 Joe Orton - 2.4.2-16 +- mod_ssl: fix NPN patch merge + +* Wed Jun 6 2012 Joe Orton - 2.4.2-15 +- move tmpfiles.d fragment into /usr/lib per new guidelines +- package /run/httpd not /var/run/httpd +- set runtimedir to /run/httpd likewise + +* Wed Jun 6 2012 Joe Orton - 2.4.2-14 +- fix htdbm/htpasswd crash on crypt() failure (#818684) + +* Wed Jun 6 2012 Joe Orton - 2.4.2-13 +- pull fix for NPN patch from upstream (r1345599) + +* Thu May 31 2012 Joe Orton - 2.4.2-12 +- update suexec patch to use LOG_AUTHPRIV facility + +* Thu May 24 2012 Joe Orton - 2.4.2-11 +- really fix autoindex.conf (thanks to remi@) + +* Thu May 24 2012 Joe Orton - 2.4.2-10 +- fix autoindex.conf to allow symlink to poweredby.png + +* Wed May 23 2012 Joe Orton - 2.4.2-9 +- suexec: use upstream version of patch for capability bit support + +* Wed May 23 2012 Joe Orton - 2.4.2-8 +- suexec: use syslog rather than suexec.log, drop dac_override capability + +* Tue May 1 2012 Joe Orton - 2.4.2-7 +- mod_ssl: add TLS NPN support (r1332643, #809599) + +* Tue May 1 2012 Joe Orton - 2.4.2-6 +- add BR on APR >= 1.4.0 + +* Fri Apr 27 2012 Joe Orton - 2.4.2-5 +- use systemctl from logrotate (#221073) + +* Fri Apr 27 2012 Joe Orton - 2.4.2-4 +- pull from upstream: + * use TLS close_notify alert for dummy_connection (r1326980+) + * cleanup symbol exports (r1327036+) + +* Fri Apr 27 2012 Joe Orton - 2.4.2-3.2 +- rebuild + +* Fri Apr 20 2012 Joe Orton - 2.4.2-3 +- really fix restart + +* Fri Apr 20 2012 Joe Orton - 2.4.2-2 +- tweak default ssl.conf +- fix restart handling (#814645) +- use graceful restart by default + +* Wed Apr 18 2012 Jan Kaluza - 2.4.2-1 +- update to 2.4.2 + +* Fri Mar 23 2012 Joe Orton - 2.4.1-6 +- fix macros + +* Fri Mar 23 2012 Joe Orton - 2.4.1-5 +- add _httpd_moddir to macros + +* Tue Mar 13 2012 Joe Orton - 2.4.1-4 +- fix symlink for poweredby.png +- fix manual.conf + +* Tue Mar 13 2012 Joe Orton - 2.4.1-3 +- add mod_proxy_html subpackage (w/mod_proxy_html + mod_xml2enc) +- move mod_ldap, mod_authnz_ldap to mod_ldap subpackage + +* Tue Mar 13 2012 Joe Orton - 2.4.1-2 +- clean docroot better +- ship proxy, ssl directories within /var/cache/httpd +- default config: + * unrestricted access to (only) /var/www + * remove (commented) Mutex, MaxRanges, ScriptSock + * split autoindex config to conf.d/autoindex.conf +- ship additional example configs in docdir + +* Tue Mar 6 2012 Joe Orton - 2.4.1-1 +- update to 2.4.1 +- adopt upstream default httpd.conf (almost verbatim) +- split all LoadModules to conf.modules.d/*.conf +- include conf.d/*.conf at end of httpd.conf +- trim %%changelog + +* Mon Feb 13 2012 Joe Orton - 2.2.22-2 +- fix build against PCRE 8.30 + +* Mon Feb 13 2012 Joe Orton - 2.2.22-1 +- update to 2.2.22 + +* Fri Feb 10 2012 Petr Pisar - 2.2.21-8 +- Rebuild against PCRE 8.30 + +* Mon Jan 23 2012 Jan Kaluza - 2.2.21-7 +- fix #783629 - start httpd after named + +* Mon Jan 16 2012 Joe Orton - 2.2.21-6 +- complete conversion to systemd, drop init script (#770311) +- fix comments in /etc/sysconfig/httpd (#771024) +- enable PrivateTmp in service file (#781440) +- set LANG=C in /etc/sysconfig/httpd + +* Fri Jan 13 2012 Fedora Release Engineering - 2.2.21-5 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_17_Mass_Rebuild + +* Tue Dec 06 2011 Jan Kaluza - 2.2.21-4 +- fix #751591 - start httpd after remote-fs + +* Mon Oct 24 2011 Jan Kaluza - 2.2.21-3 +- allow change state of BalancerMember in mod_proxy_balancer web interface + +* Thu Sep 22 2011 Ville Skyttä - 2.2.21-2 +- Make mmn available as %%{_httpd_mmn}. +- Add .svgz to AddEncoding x-gzip example in httpd.conf. + +* Tue Sep 13 2011 Joe Orton - 2.2.21-1 +- update to 2.2.21 + +* Mon Sep 5 2011 Joe Orton - 2.2.20-1 +- update to 2.2.20 +- fix MPM stub man page generation + +* Wed Aug 10 2011 Jan Kaluza - 2.2.19-5 +- fix #707917 - add httpd-ssl-pass-dialog to ask for SSL password using systemd + +* Fri Jul 22 2011 Iain Arnell 1:2.2.19-4 +- rebuild while rpm-4.9.1 is untagged to remove trailing slash in provided + directory names + +* Wed Jul 20 2011 Jan Kaluza - 2.2.19-3 +- fix #716621 - suexec now works without setuid bit + +* Thu Jul 14 2011 Jan Kaluza - 2.2.19-2 +- fix #689091 - backported patch from 2.3 branch to support IPv6 in logresolve + +* Fri Jul 1 2011 Joe Orton - 2.2.19-1 +- update to 2.2.19 +- enable dbd, authn_dbd in default config + +* Thu Apr 14 2011 Joe Orton - 2.2.17-13 +- fix path expansion in service files + +* Tue Apr 12 2011 Joe Orton - 2.2.17-12 +- add systemd service files (#684175, thanks to Jóhann B. Guðmundsson) + +* Wed Mar 23 2011 Joe Orton - 2.2.17-11 +- minor updates to httpd.conf +- drop old patches + +* Wed Mar 2 2011 Joe Orton - 2.2.17-10 +- rebuild + +* Wed Feb 23 2011 Joe Orton - 2.2.17-9 +- use arch-specific mmn + +* Wed Feb 09 2011 Fedora Release Engineering - 2.2.17-8 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_15_Mass_Rebuild + +* Mon Jan 31 2011 Joe Orton - 2.2.17-7 +- generate dummy mod_ssl cert with CA:FALSE constraint (#667841) +- add man page stubs for httpd.event, httpd.worker +- drop distcache support +- add STOP_TIMEOUT support to init script + +* Sat Jan 8 2011 Joe Orton - 2.2.17-6 +- update default SSLCipherSuite per upstream trunk + +* Wed Jan 5 2011 Joe Orton - 2.2.17-5 +- fix requires (#667397) + +* Wed Jan 5 2011 Joe Orton - 2.2.17-4 +- de-ghost /var/run/httpd + +* Tue Jan 4 2011 Joe Orton - 2.2.17-3 +- add tmpfiles.d configuration, ghost /var/run/httpd (#656600) + +* Sat Nov 20 2010 Joe Orton - 2.2.17-2 +- drop setuid bit, use capabilities for suexec binary + +* Wed Oct 27 2010 Joe Orton - 2.2.17-1 +- update to 2.2.17 + +* Fri Sep 10 2010 Joe Orton - 2.2.16-2 +- link everything using -z relro and -z now + +* Mon Jul 26 2010 Joe Orton - 2.2.16-1 +- update to 2.2.16 + +* Fri Jul 9 2010 Joe Orton - 2.2.15-3 +- default config tweaks: + * harden httpd.conf w.r.t. .htaccess restriction (#591293) + * load mod_substitute, mod_version by default + * drop proxy_ajp.conf, load mod_proxy_ajp in httpd.conf + * add commented list of shipped-but-unloaded modules + * bump up worker defaults a little + * drop KeepAliveTimeout to 5 secs per upstream +- fix LSB compliance in init script (#522074) +- bundle NOTICE in -tools +- use init script in logrotate postrotate to pick up PIDFILE +- drop some old Obsoletes/Conflicts + +* Sun Apr 04 2010 Robert Scheck - 2.2.15-1 +- update to 2.2.15 (#572404, #579311) +