|
|
fa34f0 |
diff --git a/modules/proxy/mod_proxy_http.c b/modules/proxy/mod_proxy_http.c
|
|
|
fa34f0 |
index ec1e042..2c0500f 100644
|
|
|
fa34f0 |
--- a/modules/proxy/mod_proxy_http.c
|
|
|
fa34f0 |
+++ b/modules/proxy/mod_proxy_http.c
|
|
|
fa34f0 |
@@ -310,16 +310,18 @@ static int stream_reqbody_read(proxy_http_req_t *req, apr_bucket_brigade *bb,
|
|
|
fa34f0 |
return OK;
|
|
|
fa34f0 |
}
|
|
|
fa34f0 |
|
|
|
fa34f0 |
-static int stream_reqbody(proxy_http_req_t *req, rb_methods rb_method)
|
|
|
fa34f0 |
+static int stream_reqbody(proxy_http_req_t *req)
|
|
|
fa34f0 |
{
|
|
|
fa34f0 |
request_rec *r = req->r;
|
|
|
fa34f0 |
int seen_eos = 0, rv = OK;
|
|
|
fa34f0 |
apr_size_t hdr_len;
|
|
|
fa34f0 |
char chunk_hdr[20]; /* must be here due to transient bucket. */
|
|
|
fa34f0 |
+ conn_rec *origin = req->origin;
|
|
|
fa34f0 |
proxy_conn_rec *p_conn = req->backend;
|
|
|
fa34f0 |
apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc;
|
|
|
fa34f0 |
apr_bucket_brigade *header_brigade = req->header_brigade;
|
|
|
fa34f0 |
apr_bucket_brigade *input_brigade = req->input_brigade;
|
|
|
fa34f0 |
+ rb_methods rb_method = req->rb_method;
|
|
|
fa34f0 |
apr_off_t bytes, bytes_streamed = 0;
|
|
|
fa34f0 |
apr_bucket *e;
|
|
|
fa34f0 |
|
|
|
fa34f0 |
@@ -333,7 +335,7 @@ static int stream_reqbody(proxy_http_req_t *req, rb_methods rb_method)
|
|
|
fa34f0 |
}
|
|
|
fa34f0 |
|
|
|
fa34f0 |
if (!APR_BRIGADE_EMPTY(input_brigade)) {
|
|
|
fa34f0 |
- /* If this brigade contains EOS, either stop or remove it. */
|
|
|
fa34f0 |
+ /* If this brigade contains EOS, remove it and be done. */
|
|
|
fa34f0 |
if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) {
|
|
|
fa34f0 |
seen_eos = 1;
|
|
|
fa34f0 |
|
|
|
fa34f0 |
@@ -375,7 +377,8 @@ static int stream_reqbody(proxy_http_req_t *req, rb_methods rb_method)
|
|
|
fa34f0 |
APR_BRIGADE_INSERT_TAIL(input_brigade, e);
|
|
|
fa34f0 |
}
|
|
|
fa34f0 |
}
|
|
|
fa34f0 |
- else if (bytes_streamed > req->cl_val) {
|
|
|
fa34f0 |
+ else if (rb_method == RB_STREAM_CL
|
|
|
fa34f0 |
+ && bytes_streamed > req->cl_val) {
|
|
|
fa34f0 |
/* C-L < bytes streamed?!?
|
|
|
fa34f0 |
* We will error out after the body is completely
|
|
|
fa34f0 |
* consumed, but we can't stream more bytes at the
|
|
|
fa34f0 |
@@ -407,7 +410,7 @@ static int stream_reqbody(proxy_http_req_t *req, rb_methods rb_method)
|
|
|
fa34f0 |
APR_BRIGADE_PREPEND(input_brigade, header_brigade);
|
|
|
fa34f0 |
|
|
|
fa34f0 |
/* Flush here on EOS because we won't stream_reqbody_read() again */
|
|
|
fa34f0 |
- rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, req->origin,
|
|
|
fa34f0 |
+ rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin,
|
|
|
fa34f0 |
input_brigade, seen_eos);
|
|
|
fa34f0 |
if (rv != OK) {
|
|
|
fa34f0 |
return rv;
|
|
|
fa34f0 |
@@ -454,10 +457,6 @@ static int spool_reqbody_cl(proxy_http_req_t *req, apr_off_t *bytes_spooled)
|
|
|
fa34f0 |
/* If this brigade contains EOS, either stop or remove it. */
|
|
|
fa34f0 |
if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) {
|
|
|
fa34f0 |
seen_eos = 1;
|
|
|
fa34f0 |
-
|
|
|
fa34f0 |
- /* We can't pass this EOS to the output_filters. */
|
|
|
fa34f0 |
- e = APR_BRIGADE_LAST(input_brigade);
|
|
|
fa34f0 |
- apr_bucket_delete(e);
|
|
|
fa34f0 |
}
|
|
|
fa34f0 |
|
|
|
fa34f0 |
apr_brigade_length(input_brigade, 1, &bytes);
|
|
|
fa34f0 |
@@ -644,7 +643,18 @@ static int ap_proxy_http_prefetch(proxy_http_req_t *req,
|
|
|
fa34f0 |
*/
|
|
|
fa34f0 |
temp_brigade = apr_brigade_create(p, bucket_alloc);
|
|
|
fa34f0 |
block = req->prefetch_nonblocking ? APR_NONBLOCK_READ : APR_BLOCK_READ;
|
|
|
fa34f0 |
- do {
|
|
|
fa34f0 |
+
|
|
|
fa34f0 |
+ /* Account for saved input, if any. */
|
|
|
fa34f0 |
+ apr_brigade_length(input_brigade, 0, &bytes_read);
|
|
|
fa34f0 |
+
|
|
|
fa34f0 |
+ /* Ensure we don't hit a wall where we have a buffer too small
|
|
|
fa34f0 |
+ * for ap_get_brigade's filters to fetch us another bucket,
|
|
|
fa34f0 |
+ * surrender once we hit 80 bytes less than MAX_MEM_SPOOL
|
|
|
fa34f0 |
+ * (an arbitrary value).
|
|
|
fa34f0 |
+ */
|
|
|
fa34f0 |
+ while (bytes_read < MAX_MEM_SPOOL - 80
|
|
|
fa34f0 |
+ && (APR_BRIGADE_EMPTY(input_brigade)
|
|
|
fa34f0 |
+ || !APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade)))) {
|
|
|
fa34f0 |
status = ap_get_brigade(r->input_filters, temp_brigade,
|
|
|
fa34f0 |
AP_MODE_READBYTES, block,
|
|
|
fa34f0 |
MAX_MEM_SPOOL - bytes_read);
|
|
|
fa34f0 |
@@ -686,15 +696,7 @@ static int ap_proxy_http_prefetch(proxy_http_req_t *req,
|
|
|
fa34f0 |
c->client_ip, c->remote_host ? c->remote_host: "");
|
|
|
fa34f0 |
return HTTP_INTERNAL_SERVER_ERROR;
|
|
|
fa34f0 |
}
|
|
|
fa34f0 |
-
|
|
|
fa34f0 |
- /* Ensure we don't hit a wall where we have a buffer too small
|
|
|
fa34f0 |
- * for ap_get_brigade's filters to fetch us another bucket,
|
|
|
fa34f0 |
- * surrender once we hit 80 bytes less than MAX_MEM_SPOOL
|
|
|
fa34f0 |
- * (an arbitrary value.)
|
|
|
fa34f0 |
- */
|
|
|
fa34f0 |
- } while ((bytes_read < MAX_MEM_SPOOL - 80)
|
|
|
fa34f0 |
- && !APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))
|
|
|
fa34f0 |
- && !req->prefetch_nonblocking);
|
|
|
fa34f0 |
+ }
|
|
|
fa34f0 |
|
|
|
fa34f0 |
/* Use chunked request body encoding or send a content-length body?
|
|
|
fa34f0 |
*
|
|
|
fa34f0 |
@@ -838,35 +840,21 @@ static int ap_proxy_http_request(proxy_http_req_t *req)
|
|
|
fa34f0 |
{
|
|
|
fa34f0 |
int rv;
|
|
|
fa34f0 |
request_rec *r = req->r;
|
|
|
fa34f0 |
- apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc;
|
|
|
fa34f0 |
- apr_bucket_brigade *header_brigade = req->header_brigade;
|
|
|
fa34f0 |
- apr_bucket_brigade *input_brigade = req->input_brigade;
|
|
|
fa34f0 |
|
|
|
fa34f0 |
/* send the request header/body, if any. */
|
|
|
fa34f0 |
switch (req->rb_method) {
|
|
|
fa34f0 |
+ case RB_SPOOL_CL:
|
|
|
fa34f0 |
case RB_STREAM_CL:
|
|
|
fa34f0 |
case RB_STREAM_CHUNKED:
|
|
|
fa34f0 |
if (req->do_100_continue) {
|
|
|
fa34f0 |
- rv = ap_proxy_pass_brigade(bucket_alloc, r, req->backend,
|
|
|
fa34f0 |
- req->origin, header_brigade, 1);
|
|
|
fa34f0 |
+ rv = ap_proxy_pass_brigade(req->bucket_alloc, r, req->backend,
|
|
|
fa34f0 |
+ req->origin, req->header_brigade, 1);
|
|
|
fa34f0 |
}
|
|
|
fa34f0 |
else {
|
|
|
fa34f0 |
- rv = stream_reqbody(req, req->rb_method);
|
|
|
fa34f0 |
+ rv = stream_reqbody(req);
|
|
|
fa34f0 |
}
|
|
|
fa34f0 |
break;
|
|
|
fa34f0 |
|
|
|
fa34f0 |
- case RB_SPOOL_CL:
|
|
|
fa34f0 |
- /* Prefetch has built the header and spooled the whole body;
|
|
|
fa34f0 |
- * if we don't expect 100-continue we can flush both all at once,
|
|
|
fa34f0 |
- * otherwise flush the header only.
|
|
|
fa34f0 |
- */
|
|
|
fa34f0 |
- if (!req->do_100_continue) {
|
|
|
fa34f0 |
- APR_BRIGADE_CONCAT(header_brigade, input_brigade);
|
|
|
fa34f0 |
- }
|
|
|
fa34f0 |
- rv = ap_proxy_pass_brigade(bucket_alloc, r, req->backend,
|
|
|
fa34f0 |
- req->origin, header_brigade, 1);
|
|
|
fa34f0 |
- break;
|
|
|
fa34f0 |
-
|
|
|
fa34f0 |
default:
|
|
|
fa34f0 |
/* shouldn't be possible */
|
|
|
fa34f0 |
rv = HTTP_INTERNAL_SERVER_ERROR;
|
|
|
fa34f0 |
@@ -1577,15 +1565,10 @@ int ap_proxy_http_process_response(proxy_http_req_t *req)
|
|
|
fa34f0 |
|
|
|
fa34f0 |
/* Send the request body (fully). */
|
|
|
fa34f0 |
switch(req->rb_method) {
|
|
|
fa34f0 |
+ case RB_SPOOL_CL:
|
|
|
fa34f0 |
case RB_STREAM_CL:
|
|
|
fa34f0 |
case RB_STREAM_CHUNKED:
|
|
|
fa34f0 |
- status = stream_reqbody(req, req->rb_method);
|
|
|
fa34f0 |
- break;
|
|
|
fa34f0 |
- case RB_SPOOL_CL:
|
|
|
fa34f0 |
- /* Prefetch has spooled the whole body, flush it. */
|
|
|
fa34f0 |
- status = ap_proxy_pass_brigade(req->bucket_alloc, r,
|
|
|
fa34f0 |
- backend, origin,
|
|
|
fa34f0 |
- req->input_brigade, 1);
|
|
|
fa34f0 |
+ status = stream_reqbody(req);
|
|
|
fa34f0 |
break;
|
|
|
fa34f0 |
default:
|
|
|
fa34f0 |
/* Shouldn't happen */
|
|
|
fa34f0 |
@@ -1940,6 +1923,7 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker,
|
|
|
fa34f0 |
const char *u;
|
|
|
fa34f0 |
proxy_http_req_t *req = NULL;
|
|
|
fa34f0 |
proxy_conn_rec *backend = NULL;
|
|
|
fa34f0 |
+ apr_bucket_brigade *input_brigade = NULL;
|
|
|
fa34f0 |
int is_ssl = 0;
|
|
|
fa34f0 |
conn_rec *c = r->connection;
|
|
|
fa34f0 |
proxy_dir_conf *dconf;
|
|
|
fa34f0 |
@@ -2005,8 +1989,20 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker,
|
|
|
fa34f0 |
|
|
|
fa34f0 |
dconf = ap_get_module_config(r->per_dir_config, &proxy_module);
|
|
|
fa34f0 |
|
|
|
fa34f0 |
+ /* We possibly reuse input data prefetched in previous call(s), e.g. for a
|
|
|
fa34f0 |
+ * balancer fallback scenario, and in this case the 100 continue settings
|
|
|
fa34f0 |
+ * should be consistent between balancer members. If not, we need to ignore
|
|
|
fa34f0 |
+ * Proxy100Continue on=>off once we tried to prefetch already, otherwise
|
|
|
fa34f0 |
+ * the HTTP_IN filter won't send 100 Continue for us anymore, and we might
|
|
|
fa34f0 |
+ * deadlock with the client waiting for each other. Note that off=>on is
|
|
|
fa34f0 |
+ * not an issue because in this case r->expecting_100 is false (the 100
|
|
|
fa34f0 |
+ * Continue is out already), but we make sure that prefetch will be
|
|
|
fa34f0 |
+ * nonblocking to avoid passing more time there.
|
|
|
fa34f0 |
+ */
|
|
|
fa34f0 |
+ apr_pool_userdata_get((void **)&input_brigade, "proxy-req-input", p);
|
|
|
fa34f0 |
+
|
|
|
fa34f0 |
/* Should we handle end-to-end or ping 100-continue? */
|
|
|
fa34f0 |
- if ((r->expecting_100 && dconf->forward_100_continue)
|
|
|
fa34f0 |
+ if ((r->expecting_100 && (dconf->forward_100_continue || input_brigade))
|
|
|
fa34f0 |
|| PROXY_DO_100_CONTINUE(worker, r)) {
|
|
|
fa34f0 |
/* We need to reset r->expecting_100 or prefetching will cause
|
|
|
fa34f0 |
* ap_http_filter() to send "100 Continue" response by itself. So
|
|
|
fa34f0 |
@@ -2023,7 +2019,8 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker,
|
|
|
fa34f0 |
/* Should we block while prefetching the body or try nonblocking and flush
|
|
|
fa34f0 |
* data to the backend ASAP?
|
|
|
fa34f0 |
*/
|
|
|
fa34f0 |
- else if (apr_table_get(r->subprocess_env, "proxy-prefetch-nonblocking")) {
|
|
|
fa34f0 |
+ else if (input_brigade || apr_table_get(r->subprocess_env,
|
|
|
fa34f0 |
+ "proxy-prefetch-nonblocking")) {
|
|
|
fa34f0 |
req->prefetch_nonblocking = 1;
|
|
|
fa34f0 |
}
|
|
|
fa34f0 |
|
|
|
fa34f0 |
@@ -2048,6 +2045,17 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker,
|
|
|
fa34f0 |
sizeof(req->server_portstr))))
|
|
|
fa34f0 |
goto cleanup;
|
|
|
fa34f0 |
|
|
|
fa34f0 |
+ /* The header is always (re-)built since it depends on worker settings,
|
|
|
fa34f0 |
+ * but the body can be fetched only once (even partially), so it's saved
|
|
|
fa34f0 |
+ * in between proxy_http_handler() calls should we come back here.
|
|
|
fa34f0 |
+ */
|
|
|
fa34f0 |
+ req->header_brigade = apr_brigade_create(p, req->bucket_alloc);
|
|
|
fa34f0 |
+ if (input_brigade == NULL) {
|
|
|
fa34f0 |
+ input_brigade = apr_brigade_create(p, req->bucket_alloc);
|
|
|
fa34f0 |
+ apr_pool_userdata_setn(input_brigade, "proxy-req-input", NULL, p);
|
|
|
fa34f0 |
+ }
|
|
|
fa34f0 |
+ req->input_brigade = input_brigade;
|
|
|
fa34f0 |
+
|
|
|
fa34f0 |
/* Prefetch (nonlocking) the request body so to increase the chance to get
|
|
|
fa34f0 |
* the whole (or enough) body and determine Content-Length vs chunked or
|
|
|
fa34f0 |
* spooled. By doing this before connecting or reusing the backend, we want
|
|
|
fa34f0 |
@@ -2058,8 +2066,6 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker,
|
|
|
fa34f0 |
* to reduce to the minimum the unavoidable local is_socket_connected() vs
|
|
|
fa34f0 |
* remote keepalive race condition.
|
|
|
fa34f0 |
*/
|
|
|
fa34f0 |
- req->input_brigade = apr_brigade_create(p, req->bucket_alloc);
|
|
|
fa34f0 |
- req->header_brigade = apr_brigade_create(p, req->bucket_alloc);
|
|
|
fa34f0 |
if ((status = ap_proxy_http_prefetch(req, uri, locurl)) != OK)
|
|
|
fa34f0 |
goto cleanup;
|
|
|
fa34f0 |
|