diff --git a/httpd-2.4.37-CVE-2022-29404.patch b/httpd-2.4.37-CVE-2022-29404.patch index 3d706be6d355157ee686aa2f2f80947818ae5883..08d0b7b1ddf4049f97b8fd4022d1a77093dc2469 100644 --- a/httpd-2.4.37-CVE-2022-29404.patch +++ b/httpd-2.4.37-CVE-2022-29404.patch @@ -75,44 +75,8 @@ index 6bedcac..393343a 100644 #ifdef AP_DEBUG { /* Make sure ap_getline() didn't leave any droppings. */ -diff --git a/modules/proxy/mod_proxy_http.c b/modules/proxy/mod_proxy_http.c -index 7da9bde..1b7bb81 100644 ---- a/modules/proxy/mod_proxy_http.c -+++ b/modules/proxy/mod_proxy_http.c -@@ -439,13 +439,10 @@ static int spool_reqbody_cl(proxy_http_req_t *req, apr_off_t *bytes_spooled) - apr_bucket *e; - apr_off_t bytes, fsize = 0; - apr_file_t *tmpfile = NULL; -- apr_off_t limit; - - body_brigade = apr_brigade_create(p, bucket_alloc); - *bytes_spooled = 0; - -- limit = ap_get_limit_req_body(r); -- - do { - if (APR_BRIGADE_EMPTY(input_brigade)) { - rv = stream_reqbody_read(req, input_brigade, 0); -@@ -462,17 +459,6 @@ static int spool_reqbody_cl(proxy_http_req_t *req, apr_off_t *bytes_spooled) - apr_brigade_length(input_brigade, 1, &bytes); - - if (*bytes_spooled + bytes > MAX_MEM_SPOOL) { -- /* -- * LimitRequestBody does not affect Proxy requests (Should it?). -- * Let it take effect if we decide to store the body in a -- * temporary file on disk. -- */ -- if (limit && (*bytes_spooled + bytes > limit)) { -- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01088) -- "Request body is larger than the configured " -- "limit of %" APR_OFF_T_FMT, limit); -- return HTTP_REQUEST_ENTITY_TOO_LARGE; -- } - /* can't spool any more in memory; write latest brigade to disk */ - if (tmpfile == NULL) { - const char *temp_dir; diff --git a/server/core.c b/server/core.c -index 09664fc..084e243 100644 +index a0bfaad..6556f20 100644 --- a/server/core.c +++ b/server/core.c @@ -65,7 +65,7 @@ diff --git a/httpd-2.4.37-CVE-2022-31813.patch b/httpd-2.4.37-CVE-2022-31813.patch index 7fe91d1accaab30eefe15cd12b6c60891be91287..bc0e2320703789250e54e11119b8d25146ec9748 100644 --- a/httpd-2.4.37-CVE-2022-31813.patch +++ b/httpd-2.4.37-CVE-2022-31813.patch @@ -1,8 +1,8 @@ diff --git a/modules/proxy/proxy_util.c b/modules/proxy/proxy_util.c -index 3d5b220..ec9a414 100644 +index efcc6ca..6626ea0 100644 --- a/modules/proxy/proxy_util.c +++ b/modules/proxy/proxy_util.c -@@ -3621,12 +3621,14 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, +@@ -3631,12 +3631,14 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, char **old_cl_val, char **old_te_val) { @@ -18,7 +18,7 @@ index 3d5b220..ec9a414 100644 apr_bucket *e; int do_100_continue; conn_rec *origin = p_conn->connection; -@@ -3662,6 +3664,52 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, +@@ -3672,6 +3674,52 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, ap_xlate_proto_to_ascii(buf, strlen(buf)); e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(header_brigade, e); @@ -71,7 +71,7 @@ index 3d5b220..ec9a414 100644 if (dconf->preserve_host == 0) { if (ap_strchr_c(uri->hostname, ':')) { /* if literal IPv6 address */ if (uri->port_str && uri->port != DEFAULT_HTTP_PORT) { -@@ -3683,7 +3731,7 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, +@@ -3693,7 +3741,7 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, /* don't want to use r->hostname, as the incoming header might have a * port attached */ @@ -80,7 +80,7 @@ index 3d5b220..ec9a414 100644 if (!hostname) { hostname = r->server->server_hostname; ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01092) -@@ -3697,21 +3745,7 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, +@@ -3707,21 +3755,7 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, ap_xlate_proto_to_ascii(buf, strlen(buf)); e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(header_brigade, e); @@ -103,7 +103,7 @@ index 3d5b220..ec9a414 100644 /* handle Via */ if (conf->viaopt == via_block) { -@@ -3778,8 +3812,6 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, +@@ -3788,8 +3822,6 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, */ if (dconf->add_forwarded_headers) { if (PROXYREQ_REVERSE == r->proxyreq) { @@ -112,7 +112,7 @@ index 3d5b220..ec9a414 100644 /* Add X-Forwarded-For: so that the upstream has a chance to * determine, where the original request came from. */ -@@ -3789,8 +3821,9 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, +@@ -3799,8 +3831,9 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, /* Add X-Forwarded-Host: so that upstream knows what the * original request hostname was. */ @@ -124,7 +124,7 @@ index 3d5b220..ec9a414 100644 } /* Add X-Forwarded-Server: so that upstream knows what the -@@ -3802,10 +3835,27 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, +@@ -3812,10 +3845,27 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, } } @@ -155,7 +155,7 @@ index 3d5b220..ec9a414 100644 creds = apr_table_get(r->notes, "proxy-basic-creds"); if (creds) { -@@ -3817,55 +3867,8 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, +@@ -3827,55 +3877,8 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, headers_in = (const apr_table_entry_t *) headers_in_array->elts; for (counter = 0; counter < headers_in_array->nelts; counter++) { if (headers_in[counter].key == NULL @@ -213,7 +213,7 @@ index 3d5b220..ec9a414 100644 } buf = apr_pstrcat(p, headers_in[counter].key, ": ", -@@ -3876,11 +3879,9 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, +@@ -3886,11 +3889,9 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, APR_BRIGADE_INSERT_TAIL(header_brigade, e); } @@ -226,4 +226,4 @@ index 3d5b220..ec9a414 100644 + return rc; } - PROXY_DECLARE(int) ap_proxy_pass_brigade(apr_bucket_alloc_t *bucket_alloc, + PROXY_DECLARE(int) ap_proxy_prefetch_input(request_rec *r, diff --git a/httpd-2.4.37-CVE-2023-25690.patch b/httpd-2.4.37-CVE-2023-25690.patch index 5b24a06b70f423fcab83ddfd4af9a97ada7aa683..bb856b76a3801e4ca891baed071e891e9a328282 100644 --- a/httpd-2.4.37-CVE-2023-25690.patch +++ b/httpd-2.4.37-CVE-2023-25690.patch @@ -22,7 +22,7 @@ index 815ec72..2b8ed35 100644
-+# Escape spaces to %20 in the path instead of + as used in form submission via -+# the query string -+RewriteRule "^search/(.*)$" "/search.php/$1" "[B,BNP]" -+-+ -
This flag is available in version 2.4.26 and later.
- -+The [BCTLS] flag is similar to the [B] flag, but only escapes -+control characters and the space character. This is the same set of -+characters rejected when they are copied into the query string unencoded. -+
-+ -+-+# Escape control characters and spaces -+RewriteRule "^search/(.*)$" "/search.php/$1" "[BCTLS]" -+-+ -+
The list of characters in [BNE=...] are treated as exclusions to the -+characters of the [B] or [BCTLS] flags. The listed characters will not be -+escaped. -+
-+ -+-+# Escape the default characters, but leave / -+RewriteRule "^search/(.*)$" "/search.php?term=$1" "[B,BNE=/]" -+-+ -
Consider this example:
- -RewriteEngine On --RewriteRule "^/index\.html" "-" [CO=frontdoor:yes:.example.com:1440:/]-+RewriteRule "^/index\.html" "-" [CO=frontdoor:yes:.example.com:1440:/] - - -
In the example give, the rule doesn't rewrite the request.
-@@ -410,8 +452,8 @@ argument to index.php, however, the index.php, the RewriteRule will be skipped.
RewriteBase "/"
--RewriteCond "%{REQUEST_URI}" "!=/index.php"
--RewriteRule "^(.*)" "/index.php?req=$1" [L,PT]
-+RewriteCond "%{REQUEST_URI}" !=/index.php
-+RewriteRule "^(.*)" "/index.php?req=$1" [L,PT]
-
- A), perform this substitution (i.e., replace the
- A with a B).
-
--In 2.4.8 and later, this module returns an error after 32,000 iterations to -+
In 2.4.8 and later, this module returns an error after 10,000 iterations to - protect against unintended looping. An alternative maximum number of - iterations can be specified by adding to the N flag.
-# Be willing to replace 1 character in each pass of the loop --RewriteRule "(.+)[><;]$" "$1" [N=64000] -+RewriteRule "(.+)[><;]$" "$1" [N=32000] - # ... or, give up if after 10 loops - RewriteRule "(.+)[><;]$" "$1" [N=10]- -@@ -681,19 +723,21 @@ URI in request' warnings. -
The [S] flag is used to skip rules that you don't want to run. The
- syntax of the skip flag is [S=N], where N signifies
- the number of rules to skip (provided the
--RewriteRule matches). This can be thought of as a goto
--statement in your rewrite ruleset. In the following example, we only want
--to run the RewriteRule if the
--requested URI doesn't correspond with an actual file.
-+RewriteCond directives match). This can be thought of as a
-+goto statement in your rewrite ruleset. In the following
-+example, we only want to run the
-+RewriteRule if the requested URI doesn't correspond with an
-+actual file.
-
- # Is the request for a non-existent file?
--RewriteCond "%{REQUEST_FILENAME}" "!-f"
--RewriteCond "%{REQUEST_FILENAME}" "!-d"
-+RewriteCond "%{REQUEST_FILENAME}" !-f
-+RewriteCond "%{REQUEST_FILENAME}" !-d
- # If so, skip these two RewriteRules
--RewriteRule ".?" "-" [S=2]
-+RewriteRule ".?" "-" [S=2]
-
--RewriteRule "(.*\.gif)" "images.php?$1"
--RewriteRule "(.*\.html)" "docs.php?$1"
-+RewriteRule "(.*\.gif)" "images.php?$1"
-+RewriteRule "(.*\.html)" "docs.php?$1"
-
-
- This technique is useful because a RewriteCond only applies to the
-@@ -705,18 +749,18 @@ use this to make pseudo if-then-else constructs: The last rule of
- the then-clause becomes skip=N, where N is the
- number of rules in the else-clause:
# Does the file exist?
--RewriteCond "%{REQUEST_FILENAME}" "!-f"
--RewriteCond "%{REQUEST_FILENAME}" "!-d"
-+RewriteCond "%{REQUEST_FILENAME}" !-f
-+RewriteCond "%{REQUEST_FILENAME}" !-d
- # Create an if-then-else construct by skipping 3 lines if we meant to go to the "else" stanza.
--RewriteRule ".?" "-" [S=3]
-+RewriteRule ".?" "-" [S=3]
-
- # IF the file exists, then:
-- RewriteRule "(.*\.gif)" "images.php?$1"
-+ RewriteRule "(.*\.gif)" "images.php?$1"
- RewriteRule "(.*\.html)" "docs.php?$1"
- # Skip past the "else" stanza.
-- RewriteRule ".?" "-" [S=1]
-+ RewriteRule ".?" "-" [S=1]
- # ELSE...
-- RewriteRule "(.*)" "404.php?file=$1"
-+ RewriteRule "(.*)" "404.php?file=$1"
- # END
-
-
-@@ -733,7 +777,7 @@ sent. This has the same effect as the # Serve .pl files as plain text
--RewriteRule "\.pl$" "-" [T=text/plain]
-+RewriteRule "\.pl$" "-" [T=text/plain]
-
-
- Or, perhaps, if you have a camera that produces jpeg images without
-@@ -741,7 +785,7 @@ file extensions, you could force those images to be served with the
- correct MIME type by virtue of their file names:
-
- # Files with 'IMG' in the name are jpg images.
--RewriteRule "IMG" "-" [T=image/jpg]
-+RewriteRule "IMG" "-" [T=image/jpg]
-
-
- Please note that this is a trivial example, and could be better done
diff --git a/modules/mappers/mod_rewrite.c b/modules/mappers/mod_rewrite.c
index 38dbb24..b71c67c 100644
--- a/modules/mappers/mod_rewrite.c
@@ -612,7 +463,7 @@ index 38dbb24..b71c67c 100644
/* append the QUERY_STRING part */
diff --git a/modules/proxy/mod_proxy_ajp.c b/modules/proxy/mod_proxy_ajp.c
-index 6faabea..59396a8 100644
+index cbb0872..873ccf1 100644
--- a/modules/proxy/mod_proxy_ajp.c
+++ b/modules/proxy/mod_proxy_ajp.c
@@ -69,6 +69,16 @@ static int proxy_ajp_canon(request_rec *r, char *url)
@@ -654,7 +505,7 @@ index 3a28038..c599e1a 100644
if (path == NULL)
return HTTP_BAD_REQUEST;
diff --git a/modules/proxy/mod_proxy_http.c b/modules/proxy/mod_proxy_http.c
-index 7da9bde..2cdc61e 100644
+index 7573638..fe7b322 100644
--- a/modules/proxy/mod_proxy_http.c
+++ b/modules/proxy/mod_proxy_http.c
@@ -90,6 +90,16 @@ static int proxy_http_canon(request_rec *r, char *url)
@@ -716,7 +567,7 @@ index 48ae6f4..6a153a3 100644
return 0;
}
diff --git a/server/util.c b/server/util.c
-index 2a5dd04..1d82fd8 100644
+index 45051b7..9d897d4 100644
--- a/server/util.c
+++ b/server/util.c
@@ -74,13 +74,6 @@
diff --git a/httpd-2.4.37-CVE-2023-27522.patch b/httpd-2.4.37-CVE-2023-27522.patch
index 68caa96b9c7476c76bd7d5aedb44e4383539f900..8a8f7bd4bb4fbe4870dcf0231aee2b8dcb2b8802 100644
--- a/httpd-2.4.37-CVE-2023-27522.patch
+++ b/httpd-2.4.37-CVE-2023-27522.patch
@@ -1,41 +1,42 @@
-diff -aruN httpd-2.4.37/modules/proxy/mod_proxy_uwsgi.c httpd-2.4.37-new/modules/proxy/mod_proxy_uwsgi.c
---- httpd-2.4.37/modules/proxy/mod_proxy_uwsgi.c 2023-03-23 17:17:42.778867787 +0800
-+++ httpd-2.4.37-new/modules/proxy/mod_proxy_uwsgi.c 2023-03-24 00:25:57.265336276 +0800
-@@ -304,18 +304,18 @@
+diff --git a/modules/proxy/mod_proxy_uwsgi.c b/modules/proxy/mod_proxy_uwsgi.c
+index 9dcbed1..a1b564d 100644
+--- a/modules/proxy/mod_proxy_uwsgi.c
++++ b/modules/proxy/mod_proxy_uwsgi.c
+@@ -304,18 +304,16 @@ static int uwsgi_response(request_rec *r, proxy_conn_rec * backend,
pass_bb = apr_brigade_create(r->pool, c->bucket_alloc);
len = ap_getline(buffer, sizeof(buffer), rp, 1);
-
if (len <= 0) {
- /* oops */
-- return HTTP_INTERNAL_SERVER_ERROR;
+ /* invalid or empty */
-+ return HTTP_INTERNAL_SERVER_ERROR;
+ return HTTP_INTERNAL_SERVER_ERROR;
}
-
+-
backend->worker->s->read += len;
-
+-
- if (len >= sizeof(buffer) - 1) {
+- /* oops */
+ if ((apr_size_t)len >= sizeof(buffer)) {
- /* oops */
++ /* too long */
return HTTP_INTERNAL_SERVER_ERROR;
}
+
/* Position of http status code */
if (apr_date_checkmask(buffer, "HTTP/#.# ###*")) {
status_start = 9;
-@@ -324,8 +324,8 @@
+@@ -324,8 +322,8 @@ static int uwsgi_response(request_rec *r, proxy_conn_rec * backend,
status_start = 7;
}
else {
- /* oops */
- return HTTP_INTERNAL_SERVER_ERROR;
-+ /* not HTTP */
++ /* not HTTP */
+ return HTTP_BAD_GATEWAY;
}
status_end = status_start + 3;
-@@ -345,21 +345,46 @@
+@@ -345,21 +343,44 @@ static int uwsgi_response(request_rec *r, proxy_conn_rec * backend,
}
r->status_line = apr_pstrdup(r->pool, &buffer[status_start]);
@@ -47,14 +48,13 @@ diff -aruN httpd-2.4.37/modules/proxy/mod_proxy_uwsgi.c httpd-2.4.37-new/modules
+ len = -1;
+ break;
+ }
-+
value = strchr(buffer, ':');
- /* invalid header skip */
- if (!value)
- continue;
- *value = '\0';
- ++value;
-+ if (!value) {
++ if (!value) {
+ /* invalid header */
+ len = -1;
+ break;
@@ -70,12 +70,11 @@ diff -aruN httpd-2.4.37/modules/proxy/mod_proxy_uwsgi.c httpd-2.4.37-new/modules
for (end = &value[strlen(value) - 1];
end > value && apr_isspace(*end); --end)
*end = '\0';
-+ if (*ap_scan_http_field_content(value)) {
++ if (*ap_scan_http_field_content(value)) {
+ /* invalid value */
+ len = -1;
+ break;
+ }
-+
apr_table_add(r->headers_out, buffer, value);
}
+ if (len < 0) {
@@ -88,11 +87,3 @@ diff -aruN httpd-2.4.37/modules/proxy/mod_proxy_uwsgi.c httpd-2.4.37-new/modules
if ((buf = apr_table_get(r->headers_out, "Content-Type"))) {
ap_set_content_type(r, apr_pstrdup(r->pool, buf));
-diff -aruN httpd-2.4.37/proxy_uwsgi_response_validation.txt httpd-2.4.37-new/proxy_uwsgi_response_validation.txt
---- httpd-2.4.37/proxy_uwsgi_response_validation.txt 1970-01-01 08:00:00.000000000 +0800
-+++ httpd-2.4.37-new/proxy_uwsgi_response_validation.txt 2023-03-24 00:06:18.077406561 +0800
-@@ -0,0 +1,4 @@
-+ *) mod_proxy_uwsgi: Stricter backend HTTP response parsing/validation.
-+ [Yann Ylavic]
-+ *) mod_proxy_uwsgi: Stricter backend HTTP response parsing/validation.
-+ [Yann Ylavic]
diff --git a/httpd-2.4.37-mod_status-duplicate-key.patch b/httpd-2.4.37-mod_status-duplicate-key.patch
new file mode 100644
index 0000000000000000000000000000000000000000..a48fad6785be518ba25f108309fb4fae75600e3c
--- /dev/null
+++ b/httpd-2.4.37-mod_status-duplicate-key.patch
@@ -0,0 +1,170 @@
+commit 84e6f25f67de9a9bddefdcdbfee3f251fead647e
+Author: Tomas Korbar
+Date: Thu Jul 20 14:41:33 2023 +0200
+
+ Fix duplicate presence of keys printed by mod_status
+
+diff --git a/modules/generators/mod_status.c b/modules/generators/mod_status.c
+index 5917953..5bada07 100644
+--- a/modules/generators/mod_status.c
++++ b/modules/generators/mod_status.c
+@@ -186,7 +186,8 @@ static int status_handler(request_rec *r)
+ apr_uint32_t up_time;
+ ap_loadavg_t t;
+ int j, i, res, written;
+- int ready;
++ int idle;
++ int graceful;
+ int busy;
+ unsigned long count;
+ unsigned long lres, my_lres, conn_lres;
+@@ -203,6 +204,7 @@ static int status_handler(request_rec *r)
+ char *stat_buffer;
+ pid_t *pid_buffer, worker_pid;
+ int *thread_idle_buffer = NULL;
++ int *thread_graceful_buffer = NULL;
+ int *thread_busy_buffer = NULL;
+ clock_t tu, ts, tcu, tcs;
+ clock_t gu, gs, gcu, gcs;
+@@ -231,7 +233,8 @@ static int status_handler(request_rec *r)
+ #endif
+ #endif
+
+- ready = 0;
++ idle = 0;
++ graceful = 0;
+ busy = 0;
+ count = 0;
+ bcount = 0;
+@@ -250,6 +253,7 @@ static int status_handler(request_rec *r)
+ stat_buffer = apr_palloc(r->pool, server_limit * thread_limit * sizeof(char));
+ if (is_async) {
+ thread_idle_buffer = apr_palloc(r->pool, server_limit * sizeof(int));
++ thread_graceful_buffer = apr_palloc(r->pool, server_limit * sizeof(int));
+ thread_busy_buffer = apr_palloc(r->pool, server_limit * sizeof(int));
+ }
+
+@@ -318,6 +322,7 @@ static int status_handler(request_rec *r)
+ ps_record = ap_get_scoreboard_process(i);
+ if (is_async) {
+ thread_idle_buffer[i] = 0;
++ thread_graceful_buffer[i] = 0;
+ thread_busy_buffer[i] = 0;
+ }
+ for (j = 0; j < thread_limit; ++j) {
+@@ -336,18 +341,20 @@ static int status_handler(request_rec *r)
+ && ps_record->pid) {
+ if (res == SERVER_READY) {
+ if (ps_record->generation == mpm_generation)
+- ready++;
++ idle++;
+ if (is_async)
+ thread_idle_buffer[i]++;
+ }
+ else if (res != SERVER_DEAD &&
+ res != SERVER_STARTING &&
+ res != SERVER_IDLE_KILL) {
+- busy++;
+- if (is_async) {
+- if (res == SERVER_GRACEFUL)
+- thread_idle_buffer[i]++;
+- else
++ if (res == SERVER_GRACEFUL) {
++ graceful++;
++ if (is_async)
++ thread_graceful_buffer[i]++;
++ } else {
++ busy++;
++ if (is_async)
+ thread_busy_buffer[i]++;
+ }
+ }
+@@ -548,10 +555,10 @@ static int status_handler(request_rec *r)
+ } /* ap_extended_status */
+
+ if (!short_report)
+- ap_rprintf(r, "%d requests currently being processed, "
+- "%d idle workers \n", busy, ready);
++ ap_rprintf(r, "%d requests currently being processed, %d workers gracefully restarting, "
++ "%d idle workers \n", busy, graceful, idle);
+ else
+- ap_rprintf(r, "BusyWorkers: %d\nIdleWorkers: %d\n", busy, ready);
++ ap_rprintf(r, "BusyWorkers: %d\nGracefulWorkers: %d\nIdleWorkers: %d\n", busy, graceful, idle);
+
+ if (!short_report)
+ ap_rputs("", r);
+@@ -559,11 +566,6 @@ static int status_handler(request_rec *r)
+ if (is_async) {
+ int write_completion = 0, lingering_close = 0, keep_alive = 0,
+ connections = 0, stopping = 0, procs = 0;
+- /*
+- * These differ from 'busy' and 'ready' in how gracefully finishing
+- * threads are counted. XXX: How to make this clear in the html?
+- */
+- int busy_workers = 0, idle_workers = 0;
+ if (!short_report)
+ ap_rputs("\n\n\n"
+ "Slot "
+@@ -573,7 +575,7 @@ static int status_handler(request_rec *r)
+ "Threads "
+ "Async connections \n"
+ "total accepting "
+- "busy idle "
++ "busy graceful idle "
+ "writing keep-alive closing \n", r);
+ for (i = 0; i < server_limit; ++i) {
+ ps_record = ap_get_scoreboard_process(i);
+@@ -582,8 +584,6 @@ static int status_handler(request_rec *r)
+ write_completion += ps_record->write_completion;
+ keep_alive += ps_record->keep_alive;
+ lingering_close += ps_record->lingering_close;
+- busy_workers += thread_busy_buffer[i];
+- idle_workers += thread_idle_buffer[i];
+ procs++;
+ if (ps_record->quiescing) {
+ stopping++;
+@@ -599,7 +599,7 @@ static int status_handler(request_rec *r)
+ ap_rprintf(r, "%u %" APR_PID_T_FMT " "
+ "%s%s "
+ "%u %s "
+- "%u %u "
++ "%u %u %u "
+ "%u %u %u "
+ " \n",
+ i, ps_record->pid,
+@@ -607,6 +607,7 @@ static int status_handler(request_rec *r)
+ ps_record->connections,
+ ps_record->not_accepting ? "no" : "yes",
+ thread_busy_buffer[i],
++ thread_graceful_buffer[i],
+ thread_idle_buffer[i],
+ ps_record->write_completion,
+ ps_record->keep_alive,
+@@ -618,25 +619,22 @@ static int status_handler(request_rec *r)
+ ap_rprintf(r, "Sum "
+ "%d %d "
+ "%d "
+- "%d %d "
++ "%d %d %d "
+ "%d %d %d "
+ " \n
\n",
+ procs, stopping,
+ connections,
+- busy_workers, idle_workers,
++ busy, graceful, idle,
+ write_completion, keep_alive, lingering_close);
+ }
+ else {
+ ap_rprintf(r, "Processes: %d\n"
+ "Stopping: %d\n"
+- "BusyWorkers: %d\n"
+- "IdleWorkers: %d\n"
+ "ConnsTotal: %d\n"
+ "ConnsAsyncWriting: %d\n"
+ "ConnsAsyncKeepAlive: %d\n"
+ "ConnsAsyncClosing: %d\n",
+ procs, stopping,
+- busy_workers, idle_workers,
+ connections,
+ write_completion, keep_alive, lingering_close);
+ }
diff --git a/httpd-2.4.37-r1885607.patch b/httpd-2.4.37-r1885607.patch
new file mode 100644
index 0000000000000000000000000000000000000000..124d06ccaa6987a40d54b4d66714d3a6d35d9f08
--- /dev/null
+++ b/httpd-2.4.37-r1885607.patch
@@ -0,0 +1,849 @@
+diff --git a/modules/proxy/mod_proxy.h b/modules/proxy/mod_proxy.h
+index fbbd508..8fcd26d 100644
+--- a/modules/proxy/mod_proxy.h
++++ b/modules/proxy/mod_proxy.h
+@@ -1168,6 +1168,55 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
+ char **old_cl_val,
+ char **old_te_val);
+
++/**
++ * Prefetch the client request body (in memory), up to a limit.
++ * Read what's in the client pipe. If nonblocking is set and read is EAGAIN,
++ * pass a FLUSH bucket to the backend and read again in blocking mode.
++ * @param r client request
++ * @param backend backend connection
++ * @param input_brigade input brigade to use/fill
++ * @param block blocking or non-blocking mode
++ * @param bytes_read number of bytes read
++ * @param max_read maximum number of bytes to read
++ * @return OK or HTTP_* error code
++ * @note max_read is rounded up to APR_BUCKET_BUFF_SIZE
++ */
++PROXY_DECLARE(int) ap_proxy_prefetch_input(request_rec *r,
++ proxy_conn_rec *backend,
++ apr_bucket_brigade *input_brigade,
++ apr_read_type_e block,
++ apr_off_t *bytes_read,
++ apr_off_t max_read);
++
++/**
++ * Spool the client request body to memory, or disk above given limit.
++ * @param r client request
++ * @param backend backend connection
++ * @param input_brigade input brigade to use/fill
++ * @param bytes_spooled number of bytes spooled
++ * @param max_mem_spool maximum number of in-memory bytes
++ * @return OK or HTTP_* error code
++ */
++PROXY_DECLARE(int) ap_proxy_spool_input(request_rec *r,
++ proxy_conn_rec *backend,
++ apr_bucket_brigade *input_brigade,
++ apr_off_t *bytes_spooled,
++ apr_off_t max_mem_spool);
++
++/**
++ * Read what's in the client pipe. If the read would block (EAGAIN),
++ * pass a FLUSH bucket to the backend and read again in blocking mode.
++ * @param r client request
++ * @param backend backend connection
++ * @param input_brigade brigade to use/fill
++ * @param max_read maximum number of bytes to read
++ * @return OK or HTTP_* error code
++ */
++PROXY_DECLARE(int) ap_proxy_read_input(request_rec *r,
++ proxy_conn_rec *backend,
++ apr_bucket_brigade *input_brigade,
++ apr_off_t max_read);
++
+ /**
+ * @param bucket_alloc bucket allocator
+ * @param r request
+diff --git a/modules/proxy/mod_proxy_fcgi.c b/modules/proxy/mod_proxy_fcgi.c
+index 2e97408..f9cf716 100644
+--- a/modules/proxy/mod_proxy_fcgi.c
++++ b/modules/proxy/mod_proxy_fcgi.c
+@@ -521,7 +521,8 @@ static int handle_headers(request_rec *r, int *state,
+ static apr_status_t dispatch(proxy_conn_rec *conn, proxy_dir_conf *conf,
+ request_rec *r, apr_pool_t *setaside_pool,
+ apr_uint16_t request_id, const char **err,
+- int *bad_request, int *has_responded)
++ int *bad_request, int *has_responded,
++ apr_bucket_brigade *input_brigade)
+ {
+ apr_bucket_brigade *ib, *ob;
+ int seen_end_of_headers = 0, done = 0, ignore_body = 0;
+@@ -583,9 +584,26 @@ static apr_status_t dispatch(proxy_conn_rec *conn, proxy_dir_conf *conf,
+ int last_stdin = 0;
+ char *iobuf_cursor;
+
+- rv = ap_get_brigade(r->input_filters, ib,
+- AP_MODE_READBYTES, APR_BLOCK_READ,
+- iobuf_size);
++ if (APR_BRIGADE_EMPTY(input_brigade)) {
++ rv = ap_get_brigade(r->input_filters, ib,
++ AP_MODE_READBYTES, APR_BLOCK_READ,
++ iobuf_size);
++ }
++ else {
++ apr_bucket *e;
++ APR_BRIGADE_CONCAT(ib, input_brigade);
++ rv = apr_brigade_partition(ib, iobuf_size, &e);
++ if (rv == APR_SUCCESS) {
++ while (e != APR_BRIGADE_SENTINEL(ib)
++ && APR_BUCKET_IS_METADATA(e)) {
++ e = APR_BUCKET_NEXT(e);
++ }
++ apr_brigade_split_ex(ib, e, input_brigade);
++ }
++ else if (rv == APR_INCOMPLETE) {
++ rv = APR_SUCCESS;
++ }
++ }
+ if (rv != APR_SUCCESS) {
+ *err = "reading input brigade";
+ *bad_request = 1;
+@@ -924,7 +942,8 @@ static int fcgi_do_request(apr_pool_t *p, request_rec *r,
+ conn_rec *origin,
+ proxy_dir_conf *conf,
+ apr_uri_t *uri,
+- char *url, char *server_portstr)
++ char *url, char *server_portstr,
++ apr_bucket_brigade *input_brigade)
+ {
+ /* Request IDs are arbitrary numbers that we assign to a
+ * single request. This would allow multiplex/pipelining of
+@@ -960,7 +979,8 @@ static int fcgi_do_request(apr_pool_t *p, request_rec *r,
+
+ /* Step 3: Read records from the back end server and handle them. */
+ rv = dispatch(conn, conf, r, temp_pool, request_id,
+- &err, &bad_request, &has_responded);
++ &err, &bad_request, &has_responded,
++ input_brigade);
+ if (rv != APR_SUCCESS) {
+ /* If the client aborted the connection during retrieval or (partially)
+ * sending the response, don't return a HTTP_SERVICE_UNAVAILABLE, since
+@@ -996,6 +1016,8 @@ static int fcgi_do_request(apr_pool_t *p, request_rec *r,
+
+ #define FCGI_SCHEME "FCGI"
+
++#define MAX_MEM_SPOOL 16384
++
+ /*
+ * This handles fcgi:(dest) URLs
+ */
+@@ -1008,6 +1030,8 @@ static int proxy_fcgi_handler(request_rec *r, proxy_worker *worker,
+ char server_portstr[32];
+ conn_rec *origin = NULL;
+ proxy_conn_rec *backend = NULL;
++ apr_bucket_brigade *input_brigade;
++ apr_off_t input_bytes = 0;
+ apr_uri_t *uri;
+
+ proxy_dir_conf *dconf = ap_get_module_config(r->per_dir_config,
+@@ -1050,6 +1074,101 @@ static int proxy_fcgi_handler(request_rec *r, proxy_worker *worker,
+ goto cleanup;
+ }
+
++ /* We possibly reuse input data prefetched in previous call(s), e.g. for a
++ * balancer fallback scenario.
++ */
++ apr_pool_userdata_get((void **)&input_brigade, "proxy-fcgi-input", p);
++ if (input_brigade == NULL) {
++ const char *old_te = apr_table_get(r->headers_in, "Transfer-Encoding");
++ const char *old_cl = NULL;
++ if (old_te) {
++ apr_table_unset(r->headers_in, "Content-Length");
++ }
++ else {
++ old_cl = apr_table_get(r->headers_in, "Content-Length");
++ }
++
++ input_brigade = apr_brigade_create(p, r->connection->bucket_alloc);
++ apr_pool_userdata_setn(input_brigade, "proxy-fcgi-input", NULL, p);
++
++ /* Prefetch (nonlocking) the request body so to increase the chance
++ * to get the whole (or enough) body and determine Content-Length vs
++ * chunked or spooled. By doing this before connecting or reusing the
++ * backend, we want to minimize the delay between this connection is
++ * considered alive and the first bytes sent (should the client's link
++ * be slow or some input filter retain the data). This is a best effort
++ * to prevent the backend from closing (from under us) what it thinks is
++ * an idle connection, hence to reduce to the minimum the unavoidable
++ * local is_socket_connected() vs remote keepalive race condition.
++ */
++ status = ap_proxy_prefetch_input(r, backend, input_brigade,
++ APR_NONBLOCK_READ, &input_bytes,
++ MAX_MEM_SPOOL);
++ if (status != OK) {
++ goto cleanup;
++ }
++
++ /*
++ * The request body is streamed by default, using either C-L or
++ * chunked T-E, like this:
++ *
++ * The whole body (including no body) was received on prefetch, i.e.
++ * the input brigade ends with EOS => C-L = input_bytes.
++ *
++ * C-L is known and reliable, i.e. only protocol filters in the input
++ * chain thus none should change the body => use C-L from client.
++ *
++ * The administrator has not "proxy-sendcl" which prevents T-E => use
++ * T-E and chunks.
++ *
++ * Otherwise we need to determine and set a content-length, so spool
++ * the entire request body to memory/temporary file (MAX_MEM_SPOOL),
++ * such that we finally know its length => C-L = input_bytes.
++ */
++ if (!APR_BRIGADE_EMPTY(input_brigade)
++ && APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) {
++ /* The whole thing fit, so our decision is trivial, use the input
++ * bytes for the Content-Length. If we expected no body, and read
++ * no body, do not set the Content-Length.
++ */
++ if (old_cl || old_te || input_bytes) {
++ apr_table_setn(r->headers_in, "Content-Length",
++ apr_off_t_toa(p, input_bytes));
++ if (old_te) {
++ apr_table_unset(r->headers_in, "Transfer-Encoding");
++ }
++ }
++ }
++ else if (old_cl && r->input_filters == r->proto_input_filters) {
++ /* Streaming is possible by preserving the existing C-L */
++ }
++ else if (!apr_table_get(r->subprocess_env, "proxy-sendcl")) {
++ /* Streaming is possible using T-E: chunked */
++ }
++ else {
++ /* No streaming, C-L is the only option so spool to memory/file */
++ apr_bucket_brigade *tmp_bb;
++ apr_off_t remaining_bytes = 0;
++
++ AP_DEBUG_ASSERT(MAX_MEM_SPOOL >= input_bytes);
++ tmp_bb = apr_brigade_create(p, r->connection->bucket_alloc);
++ status = ap_proxy_spool_input(r, backend, tmp_bb, &remaining_bytes,
++ MAX_MEM_SPOOL - input_bytes);
++ if (status != OK) {
++ goto cleanup;
++ }
++
++ APR_BRIGADE_CONCAT(input_brigade, tmp_bb);
++ input_bytes += remaining_bytes;
++
++ apr_table_setn(r->headers_in, "Content-Length",
++ apr_off_t_toa(p, input_bytes));
++ if (old_te) {
++ apr_table_unset(r->headers_in, "Transfer-Encoding");
++ }
++ }
++ }
++
+ /* This scheme handler does not reuse connections by default, to
+ * avoid tying up a fastcgi that isn't expecting to work on
+ * parallel requests. But if the user went out of their way to
+@@ -1074,7 +1193,7 @@ static int proxy_fcgi_handler(request_rec *r, proxy_worker *worker,
+
+ /* Step Three: Process the Request */
+ status = fcgi_do_request(p, r, backend, origin, dconf, uri, url,
+- server_portstr);
++ server_portstr, input_brigade);
+
+ cleanup:
+ ap_proxy_release_connection(FCGI_SCHEME, backend, r->server);
+diff --git a/modules/proxy/mod_proxy_http.c b/modules/proxy/mod_proxy_http.c
+index df10997..7f67f26 100644
+--- a/modules/proxy/mod_proxy_http.c
++++ b/modules/proxy/mod_proxy_http.c
+@@ -266,50 +266,6 @@ typedef struct {
+ prefetch_nonblocking:1;
+ } proxy_http_req_t;
+
+-/* Read what's in the client pipe. If nonblocking is set and read is EAGAIN,
+- * pass a FLUSH bucket to the backend and read again in blocking mode.
+- */
+-static int stream_reqbody_read(proxy_http_req_t *req, apr_bucket_brigade *bb,
+- int nonblocking)
+-{
+- request_rec *r = req->r;
+- proxy_conn_rec *p_conn = req->backend;
+- apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc;
+- apr_read_type_e block = nonblocking ? APR_NONBLOCK_READ : APR_BLOCK_READ;
+- apr_status_t status;
+- int rv;
+-
+- for (;;) {
+- status = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES,
+- block, HUGE_STRING_LEN);
+- if (block == APR_BLOCK_READ
+- || (!APR_STATUS_IS_EAGAIN(status)
+- && (status != APR_SUCCESS || !APR_BRIGADE_EMPTY(bb)))) {
+- break;
+- }
+-
+- /* Flush and retry (blocking) */
+- apr_brigade_cleanup(bb);
+- rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, req->origin, bb, 1);
+- if (rv != OK) {
+- return rv;
+- }
+- block = APR_BLOCK_READ;
+- }
+-
+- if (status != APR_SUCCESS) {
+- conn_rec *c = r->connection;
+- ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02608)
+- "read request body failed to %pI (%s)"
+- " from %s (%s)", p_conn->addr,
+- p_conn->hostname ? p_conn->hostname: "",
+- c->client_ip, c->remote_host ? c->remote_host: "");
+- return ap_map_http_request_error(status, HTTP_BAD_REQUEST);
+- }
+-
+- return OK;
+-}
+-
+ static int stream_reqbody(proxy_http_req_t *req)
+ {
+ request_rec *r = req->r;
+@@ -328,7 +284,8 @@ static int stream_reqbody(proxy_http_req_t *req)
+ do {
+ if (APR_BRIGADE_EMPTY(input_brigade)
+ && APR_BRIGADE_EMPTY(header_brigade)) {
+- rv = stream_reqbody_read(req, input_brigade, 1);
++ rv = ap_proxy_read_input(r, p_conn, input_brigade,
++ HUGE_STRING_LEN);
+ if (rv != OK) {
+ return rv;
+ }
+@@ -409,7 +366,7 @@ static int stream_reqbody(proxy_http_req_t *req)
+ */
+ APR_BRIGADE_PREPEND(input_brigade, header_brigade);
+
+- /* Flush here on EOS because we won't stream_reqbody_read() again */
++ /* Flush here on EOS because we won't ap_proxy_read_input() again. */
+ rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin,
+ input_brigade, seen_eos);
+ if (rv != OK) {
+@@ -427,137 +384,6 @@ static int stream_reqbody(proxy_http_req_t *req)
+ return OK;
+ }
+
+-static int spool_reqbody_cl(proxy_http_req_t *req, apr_off_t *bytes_spooled)
+-{
+- apr_pool_t *p = req->p;
+- request_rec *r = req->r;
+- int seen_eos = 0, rv = OK;
+- apr_status_t status = APR_SUCCESS;
+- apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc;
+- apr_bucket_brigade *input_brigade = req->input_brigade;
+- apr_bucket_brigade *body_brigade;
+- apr_bucket *e;
+- apr_off_t bytes, fsize = 0;
+- apr_file_t *tmpfile = NULL;
+- apr_off_t limit;
+-
+- body_brigade = apr_brigade_create(p, bucket_alloc);
+- *bytes_spooled = 0;
+-
+- limit = ap_get_limit_req_body(r);
+-
+- do {
+- if (APR_BRIGADE_EMPTY(input_brigade)) {
+- rv = stream_reqbody_read(req, input_brigade, 0);
+- if (rv != OK) {
+- return rv;
+- }
+- }
+-
+- /* If this brigade contains EOS, either stop or remove it. */
+- if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) {
+- seen_eos = 1;
+- }
+-
+- apr_brigade_length(input_brigade, 1, &bytes);
+-
+- if (*bytes_spooled + bytes > MAX_MEM_SPOOL) {
+- /*
+- * LimitRequestBody does not affect Proxy requests (Should it?).
+- * Let it take effect if we decide to store the body in a
+- * temporary file on disk.
+- */
+- if (limit && (*bytes_spooled + bytes > limit)) {
+- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01088)
+- "Request body is larger than the configured "
+- "limit of %" APR_OFF_T_FMT, limit);
+- return HTTP_REQUEST_ENTITY_TOO_LARGE;
+- }
+- /* can't spool any more in memory; write latest brigade to disk */
+- if (tmpfile == NULL) {
+- const char *temp_dir;
+- char *template;
+-
+- status = apr_temp_dir_get(&temp_dir, p);
+- if (status != APR_SUCCESS) {
+- ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01089)
+- "search for temporary directory failed");
+- return HTTP_INTERNAL_SERVER_ERROR;
+- }
+- apr_filepath_merge(&template, temp_dir,
+- "modproxy.tmp.XXXXXX",
+- APR_FILEPATH_NATIVE, p);
+- status = apr_file_mktemp(&tmpfile, template, 0, p);
+- if (status != APR_SUCCESS) {
+- ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01090)
+- "creation of temporary file in directory "
+- "%s failed", temp_dir);
+- return HTTP_INTERNAL_SERVER_ERROR;
+- }
+- }
+- for (e = APR_BRIGADE_FIRST(input_brigade);
+- e != APR_BRIGADE_SENTINEL(input_brigade);
+- e = APR_BUCKET_NEXT(e)) {
+- const char *data;
+- apr_size_t bytes_read, bytes_written;
+-
+- apr_bucket_read(e, &data, &bytes_read, APR_BLOCK_READ);
+- status = apr_file_write_full(tmpfile, data, bytes_read, &bytes_written);
+- if (status != APR_SUCCESS) {
+- const char *tmpfile_name;
+-
+- if (apr_file_name_get(&tmpfile_name, tmpfile) != APR_SUCCESS) {
+- tmpfile_name = "(unknown)";
+- }
+- ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01091)
+- "write to temporary file %s failed",
+- tmpfile_name);
+- return HTTP_INTERNAL_SERVER_ERROR;
+- }
+- AP_DEBUG_ASSERT(bytes_read == bytes_written);
+- fsize += bytes_written;
+- }
+- apr_brigade_cleanup(input_brigade);
+- }
+- else {
+-
+- /*
+- * Save input_brigade in body_brigade. (At least) in the SSL case
+- * input_brigade contains transient buckets whose data would get
+- * overwritten during the next call of ap_get_brigade in the loop.
+- * ap_save_brigade ensures these buckets to be set aside.
+- * Calling ap_save_brigade with NULL as filter is OK, because
+- * body_brigade already has been created and does not need to get
+- * created by ap_save_brigade.
+- */
+- status = ap_save_brigade(NULL, &body_brigade, &input_brigade, p);
+- if (status != APR_SUCCESS) {
+- return HTTP_INTERNAL_SERVER_ERROR;
+- }
+-
+- }
+-
+- *bytes_spooled += bytes;
+- } while (!seen_eos);
+-
+- APR_BRIGADE_CONCAT(input_brigade, body_brigade);
+- if (tmpfile) {
+- apr_brigade_insert_file(input_brigade, tmpfile, 0, fsize, p);
+- }
+- if (apr_table_get(r->subprocess_env, "proxy-sendextracrlf")) {
+- e = apr_bucket_immortal_create(CRLF_ASCII, 2, bucket_alloc);
+- APR_BRIGADE_INSERT_TAIL(input_brigade, e);
+- }
+- if (tmpfile) {
+- /* We dropped metadata buckets when spooling to tmpfile,
+- * terminate with EOS for stream_reqbody() to flush the
+- * whole in one go.
+- */
+- e = apr_bucket_eos_create(bucket_alloc);
+- APR_BRIGADE_INSERT_TAIL(input_brigade, e);
+- }
+- return OK;
+-}
+
+ static int ap_proxy_http_prefetch(proxy_http_req_t *req,
+ apr_uri_t *uri, char *url)
+@@ -569,14 +395,12 @@ static int ap_proxy_http_prefetch(proxy_http_req_t *req,
+ apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc;
+ apr_bucket_brigade *header_brigade = req->header_brigade;
+ apr_bucket_brigade *input_brigade = req->input_brigade;
+- apr_bucket_brigade *temp_brigade;
+ apr_bucket *e;
+- char *buf;
+ apr_status_t status;
++ char *buf;
+ apr_off_t bytes_read = 0;
+ apr_off_t bytes;
+ int force10, rv;
+- apr_read_type_e block;
+ conn_rec *origin = p_conn->connection;
+
+ if (apr_table_get(r->subprocess_env, "force-proxy-request-1.0")) {
+@@ -641,69 +465,12 @@ static int ap_proxy_http_prefetch(proxy_http_req_t *req,
+ p_conn->close = 1;
+ }
+
+- /* Prefetch MAX_MEM_SPOOL bytes
+- *
+- * This helps us avoid any election of C-L v.s. T-E
+- * request bodies, since we are willing to keep in
+- * memory this much data, in any case. This gives
+- * us an instant C-L election if the body is of some
+- * reasonable size.
+- */
+- temp_brigade = apr_brigade_create(p, bucket_alloc);
+- block = req->prefetch_nonblocking ? APR_NONBLOCK_READ : APR_BLOCK_READ;
+-
+- /* Account for saved input, if any. */
+- apr_brigade_length(input_brigade, 0, &bytes_read);
+-
+- /* Ensure we don't hit a wall where we have a buffer too small
+- * for ap_get_brigade's filters to fetch us another bucket,
+- * surrender once we hit 80 bytes less than MAX_MEM_SPOOL
+- * (an arbitrary value).
+- */
+- while (bytes_read < MAX_MEM_SPOOL - 80
+- && (APR_BRIGADE_EMPTY(input_brigade)
+- || !APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade)))) {
+- status = ap_get_brigade(r->input_filters, temp_brigade,
+- AP_MODE_READBYTES, block,
+- MAX_MEM_SPOOL - bytes_read);
+- /* ap_get_brigade may return success with an empty brigade
+- * for a non-blocking read which would block
+- */
+- if (block == APR_NONBLOCK_READ
+- && ((status == APR_SUCCESS && APR_BRIGADE_EMPTY(temp_brigade))
+- || APR_STATUS_IS_EAGAIN(status))) {
+- break;
+- }
+- if (status != APR_SUCCESS) {
+- ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01095)
+- "prefetch request body failed to %pI (%s)"
+- " from %s (%s)",
+- p_conn->addr, p_conn->hostname ? p_conn->hostname: "",
+- c->client_ip, c->remote_host ? c->remote_host: "");
+- return ap_map_http_request_error(status, HTTP_BAD_REQUEST);
+- }
+-
+- apr_brigade_length(temp_brigade, 1, &bytes);
+- bytes_read += bytes;
+-
+- /*
+- * Save temp_brigade in input_brigade. (At least) in the SSL case
+- * temp_brigade contains transient buckets whose data would get
+- * overwritten during the next call of ap_get_brigade in the loop.
+- * ap_save_brigade ensures these buckets to be set aside.
+- * Calling ap_save_brigade with NULL as filter is OK, because
+- * input_brigade already has been created and does not need to get
+- * created by ap_save_brigade.
+- */
+- status = ap_save_brigade(NULL, &input_brigade, &temp_brigade, p);
+- if (status != APR_SUCCESS) {
+- ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01096)
+- "processing prefetched request body failed"
+- " to %pI (%s) from %s (%s)",
+- p_conn->addr, p_conn->hostname ? p_conn->hostname: "",
+- c->client_ip, c->remote_host ? c->remote_host: "");
+- return HTTP_INTERNAL_SERVER_ERROR;
+- }
++ rv = ap_proxy_prefetch_input(r, req->backend, input_brigade,
++ req->prefetch_nonblocking ? APR_NONBLOCK_READ
++ : APR_BLOCK_READ,
++ &bytes_read, MAX_MEM_SPOOL);
++ if (rv != OK) {
++ return rv;
+ }
+
+ /* Use chunked request body encoding or send a content-length body?
+@@ -772,7 +539,7 @@ static int ap_proxy_http_prefetch(proxy_http_req_t *req,
+ char *endstr;
+ status = apr_strtoff(&req->cl_val, req->old_cl_val, &endstr, 10);
+ if (status != APR_SUCCESS || *endstr || req->cl_val < 0) {
+- ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01085)
++ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01085)
+ "could not parse request Content-Length (%s)",
+ req->old_cl_val);
+ return HTTP_BAD_REQUEST;
+@@ -812,7 +579,8 @@ static int ap_proxy_http_prefetch(proxy_http_req_t *req,
+ /* If we have to spool the body, do it now, before connecting or
+ * reusing the backend connection.
+ */
+- rv = spool_reqbody_cl(req, &bytes);
++ rv = ap_proxy_spool_input(r, p_conn, input_brigade,
++ &bytes, MAX_MEM_SPOOL);
+ if (rv != OK) {
+ return rv;
+ }
+diff --git a/modules/proxy/proxy_util.c b/modules/proxy/proxy_util.c
+index ab88d8f..973aa83 100644
+--- a/modules/proxy/proxy_util.c
++++ b/modules/proxy/proxy_util.c
+@@ -3866,6 +3866,268 @@ PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p,
+ return OK;
+ }
+
++PROXY_DECLARE(int) ap_proxy_prefetch_input(request_rec *r,
++ proxy_conn_rec *backend,
++ apr_bucket_brigade *input_brigade,
++ apr_read_type_e block,
++ apr_off_t *bytes_read,
++ apr_off_t max_read)
++{
++ apr_pool_t *p = r->pool;
++ conn_rec *c = r->connection;
++ apr_bucket_brigade *temp_brigade;
++ apr_status_t status;
++ apr_off_t bytes;
++
++ *bytes_read = 0;
++ if (max_read < APR_BUCKET_BUFF_SIZE) {
++ max_read = APR_BUCKET_BUFF_SIZE;
++ }
++
++ /* Prefetch max_read bytes
++ *
++ * This helps us avoid any election of C-L v.s. T-E
++ * request bodies, since we are willing to keep in
++ * memory this much data, in any case. This gives
++ * us an instant C-L election if the body is of some
++ * reasonable size.
++ */
++ temp_brigade = apr_brigade_create(p, input_brigade->bucket_alloc);
++
++ /* Account for saved input, if any. */
++ apr_brigade_length(input_brigade, 0, bytes_read);
++
++ /* Ensure we don't hit a wall where we have a buffer too small for
++ * ap_get_brigade's filters to fetch us another bucket, surrender
++ * once we hit 80 bytes (an arbitrary value) less than max_read.
++ */
++ while (*bytes_read < max_read - 80
++ && (APR_BRIGADE_EMPTY(input_brigade)
++ || !APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade)))) {
++ status = ap_get_brigade(r->input_filters, temp_brigade,
++ AP_MODE_READBYTES, block,
++ max_read - *bytes_read);
++ /* ap_get_brigade may return success with an empty brigade
++ * for a non-blocking read which would block
++ */
++ if (block == APR_NONBLOCK_READ
++ && ((status == APR_SUCCESS && APR_BRIGADE_EMPTY(temp_brigade))
++ || APR_STATUS_IS_EAGAIN(status))) {
++ break;
++ }
++ if (status != APR_SUCCESS) {
++ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01095)
++ "prefetch request body failed to %pI (%s)"
++ " from %s (%s)", backend->addr,
++ backend->hostname ? backend->hostname : "",
++ c->client_ip, c->remote_host ? c->remote_host : "");
++ return ap_map_http_request_error(status, HTTP_BAD_REQUEST);
++ }
++
++ apr_brigade_length(temp_brigade, 1, &bytes);
++ *bytes_read += bytes;
++
++ /*
++ * Save temp_brigade in input_brigade. (At least) in the SSL case
++ * temp_brigade contains transient buckets whose data would get
++ * overwritten during the next call of ap_get_brigade in the loop.
++ * ap_save_brigade ensures these buckets to be set aside.
++ * Calling ap_save_brigade with NULL as filter is OK, because
++ * input_brigade already has been created and does not need to get
++ * created by ap_save_brigade.
++ */
++ status = ap_save_brigade(NULL, &input_brigade, &temp_brigade, p);
++ if (status != APR_SUCCESS) {
++ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01096)
++ "processing prefetched request body failed"
++ " to %pI (%s) from %s (%s)", backend->addr,
++ backend->hostname ? backend->hostname : "",
++ c->client_ip, c->remote_host ? c->remote_host : "");
++ return HTTP_INTERNAL_SERVER_ERROR;
++ }
++ }
++
++ return OK;
++}
++
++PROXY_DECLARE(int) ap_proxy_read_input(request_rec *r,
++ proxy_conn_rec *backend,
++ apr_bucket_brigade *bb,
++ apr_off_t max_read)
++{
++ apr_bucket_alloc_t *bucket_alloc = bb->bucket_alloc;
++ apr_read_type_e block = (backend->connection) ? APR_NONBLOCK_READ
++ : APR_BLOCK_READ;
++ apr_status_t status;
++ int rv;
++
++ for (;;) {
++ apr_brigade_cleanup(bb);
++ status = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES,
++ block, max_read);
++ if (block == APR_BLOCK_READ
++ || (!(status == APR_SUCCESS && APR_BRIGADE_EMPTY(bb))
++ && !APR_STATUS_IS_EAGAIN(status))) {
++ break;
++ }
++
++ /* Flush and retry (blocking) */
++ apr_brigade_cleanup(bb);
++ rv = ap_proxy_pass_brigade(bucket_alloc, r, backend,
++ backend->connection, bb, 1);
++ if (rv != OK) {
++ return rv;
++ }
++ block = APR_BLOCK_READ;
++ }
++
++ if (status != APR_SUCCESS) {
++ conn_rec *c = r->connection;
++ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(02608)
++ "read request body failed to %pI (%s)"
++ " from %s (%s)", backend->addr,
++ backend->hostname ? backend->hostname : "",
++ c->client_ip, c->remote_host ? c->remote_host : "");
++ return ap_map_http_request_error(status, HTTP_BAD_REQUEST);
++ }
++
++ return OK;
++}
++
++PROXY_DECLARE(int) ap_proxy_spool_input(request_rec *r,
++ proxy_conn_rec *backend,
++ apr_bucket_brigade *input_brigade,
++ apr_off_t *bytes_spooled,
++ apr_off_t max_mem_spool)
++{
++ apr_pool_t *p = r->pool;
++ int seen_eos = 0, rv = OK;
++ apr_status_t status = APR_SUCCESS;
++ apr_bucket_alloc_t *bucket_alloc = input_brigade->bucket_alloc;
++ apr_bucket_brigade *body_brigade;
++ apr_bucket *e;
++ apr_off_t bytes, fsize = 0;
++ apr_file_t *tmpfile = NULL;
++ apr_off_t limit;
++
++ *bytes_spooled = 0;
++ body_brigade = apr_brigade_create(p, bucket_alloc);
++
++ limit = ap_get_limit_req_body(r);
++
++ do {
++ if (APR_BRIGADE_EMPTY(input_brigade)) {
++ rv = ap_proxy_read_input(r, backend, input_brigade,
++ HUGE_STRING_LEN);
++ if (rv != OK) {
++ return rv;
++ }
++ }
++
++ /* If this brigade contains EOS, either stop or remove it. */
++ if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) {
++ seen_eos = 1;
++ }
++
++ apr_brigade_length(input_brigade, 1, &bytes);
++
++ if (*bytes_spooled + bytes > max_mem_spool) {
++ /*
++ * LimitRequestBody does not affect Proxy requests (Should it?).
++ * Let it take effect if we decide to store the body in a
++ * temporary file on disk.
++ */
++ if (limit && (*bytes_spooled + bytes > limit)) {
++ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01088)
++ "Request body is larger than the configured "
++ "limit of %" APR_OFF_T_FMT, limit);
++ return HTTP_REQUEST_ENTITY_TOO_LARGE;
++ }
++ /* can't spool any more in memory; write latest brigade to disk */
++ if (tmpfile == NULL) {
++ const char *temp_dir;
++ char *template;
++
++ status = apr_temp_dir_get(&temp_dir, p);
++ if (status != APR_SUCCESS) {
++ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01089)
++ "search for temporary directory failed");
++ return HTTP_INTERNAL_SERVER_ERROR;
++ }
++ apr_filepath_merge(&template, temp_dir,
++ "modproxy.tmp.XXXXXX",
++ APR_FILEPATH_NATIVE, p);
++ status = apr_file_mktemp(&tmpfile, template, 0, p);
++ if (status != APR_SUCCESS) {
++ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01090)
++ "creation of temporary file in directory "
++ "%s failed", temp_dir);
++ return HTTP_INTERNAL_SERVER_ERROR;
++ }
++ }
++ for (e = APR_BRIGADE_FIRST(input_brigade);
++ e != APR_BRIGADE_SENTINEL(input_brigade);
++ e = APR_BUCKET_NEXT(e)) {
++ const char *data;
++ apr_size_t bytes_read, bytes_written;
++
++ apr_bucket_read(e, &data, &bytes_read, APR_BLOCK_READ);
++ status = apr_file_write_full(tmpfile, data, bytes_read, &bytes_written);
++ if (status != APR_SUCCESS) {
++ const char *tmpfile_name;
++
++ if (apr_file_name_get(&tmpfile_name, tmpfile) != APR_SUCCESS) {
++ tmpfile_name = "(unknown)";
++ }
++ ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01091)
++ "write to temporary file %s failed",
++ tmpfile_name);
++ return HTTP_INTERNAL_SERVER_ERROR;
++ }
++ AP_DEBUG_ASSERT(bytes_read == bytes_written);
++ fsize += bytes_written;
++ }
++ apr_brigade_cleanup(input_brigade);
++ }
++ else {
++
++ /*
++ * Save input_brigade in body_brigade. (At least) in the SSL case
++ * input_brigade contains transient buckets whose data would get
++ * overwritten during the next call of ap_get_brigade in the loop.
++ * ap_save_brigade ensures these buckets to be set aside.
++ * Calling ap_save_brigade with NULL as filter is OK, because
++ * body_brigade already has been created and does not need to get
++ * created by ap_save_brigade.
++ */
++ status = ap_save_brigade(NULL, &body_brigade, &input_brigade, p);
++ if (status != APR_SUCCESS) {
++ return HTTP_INTERNAL_SERVER_ERROR;
++ }
++
++ }
++
++ *bytes_spooled += bytes;
++ } while (!seen_eos);
++
++ APR_BRIGADE_CONCAT(input_brigade, body_brigade);
++ if (tmpfile) {
++ apr_brigade_insert_file(input_brigade, tmpfile, 0, fsize, p);
++ }
++ if (apr_table_get(r->subprocess_env, "proxy-sendextracrlf")) {
++ e = apr_bucket_immortal_create(CRLF_ASCII, 2, bucket_alloc);
++ APR_BRIGADE_INSERT_TAIL(input_brigade, e);
++ }
++ if (tmpfile) {
++ /* We dropped metadata buckets when spooling to tmpfile,
++ * terminate with EOS to allow for flushing in a one go.
++ */
++ e = apr_bucket_eos_create(bucket_alloc);
++ APR_BRIGADE_INSERT_TAIL(input_brigade, e);
++ }
++ return OK;
++}
++
+ PROXY_DECLARE(int) ap_proxy_pass_brigade(apr_bucket_alloc_t *bucket_alloc,
+ request_rec *r, proxy_conn_rec *p_conn,
+ conn_rec *origin, apr_bucket_brigade *bb,
diff --git a/httpd.spec b/httpd.spec
index 43bf3807d794bcc89a4b5f4f7d703e2f9f9067a5..bc4d94e813369e6ad456cb4a2042c8a82dff9686 100644
--- a/httpd.spec
+++ b/httpd.spec
@@ -11,283 +11,288 @@
%global mpm prefork
%endif
-Summary: Apache HTTP Server
-Name: httpd
-Version: 2.4.37
-Release: 56%{anolis_release}%{?dist}.6
-URL: https://httpd.apache.org/
-Source0: https://www.apache.org/dist/httpd/httpd-%{version}.tar.bz2
-Source2: httpd.logrotate
-Source3: instance.conf
-Source4: httpd-ssl-pass-dialog
-Source5: httpd.tmpfiles
-Source6: httpd.service
-Source7: action-graceful.sh
-Source8: action-configtest.sh
-Source10: httpd.conf
-Source11: 00-base.conf
-Source12: 00-mpm.conf
-Source13: 00-lua.conf
-Source14: 01-cgi.conf
-Source15: 00-dav.conf
-Source16: 00-proxy.conf
-Source17: 00-ssl.conf
-Source18: 01-ldap.conf
-Source19: 00-proxyhtml.conf
-Source20: userdir.conf
-Source21: ssl.conf
-Source22: welcome.conf
-Source23: manual.conf
-Source24: 00-systemd.conf
-Source25: 01-session.conf
-Source26: 10-listen443.conf
-Source27: httpd.socket
-Source28: 00-optional.conf
+Summary: Apache HTTP Server
+Name: httpd
+Version: 2.4.37
+Release: 62%{anolis_release}%{?dist}
+URL: https://httpd.apache.org/
+Source0: https://www.apache.org/dist/httpd/httpd-%{version}.tar.bz2
+Source2: httpd.logrotate
+Source3: instance.conf
+Source4: httpd-ssl-pass-dialog
+Source5: httpd.tmpfiles
+Source6: httpd.service
+Source7: action-graceful.sh
+Source8: action-configtest.sh
+Source10: httpd.conf
+Source11: 00-base.conf
+Source12: 00-mpm.conf
+Source13: 00-lua.conf
+Source14: 01-cgi.conf
+Source15: 00-dav.conf
+Source16: 00-proxy.conf
+Source17: 00-ssl.conf
+Source18: 01-ldap.conf
+Source19: 00-proxyhtml.conf
+Source20: userdir.conf
+Source21: ssl.conf
+Source22: welcome.conf
+Source23: manual.conf
+Source24: 00-systemd.conf
+Source25: 01-session.conf
+Source26: 10-listen443.conf
+Source27: httpd.socket
+Source28: 00-optional.conf
# Documentation
-Source30: README.confd
-Source31: README.confmod
-Source32: httpd.service.xml
-Source33: htcacheclean.service.xml
-Source34: httpd.conf.xml
-Source40: htcacheclean.service
-Source41: htcacheclean.sysconf
-Source42: httpd-init.service
-Source43: httpd-ssl-gencerts
-Source44: httpd@.service
-Source45: config.layout
-Source46: apache-poweredby.png
+Source30: README.confd
+Source31: README.confmod
+Source32: httpd.service.xml
+Source33: htcacheclean.service.xml
+Source34: httpd.conf.xml
+Source40: htcacheclean.service
+Source41: htcacheclean.sysconf
+Source42: httpd-init.service
+Source43: httpd-ssl-gencerts
+Source44: httpd@.service
+Source45: config.layout
+Source46: apache-poweredby.png
# build/scripts patches
# http://bugzilla.redhat.com/show_bug.cgi?id=1231924
# http://bugzilla.redhat.com/show_bug.cgi?id=842736
# http://bugzilla.redhat.com/show_bug.cgi?id=1214401
-Patch1: httpd-2.4.35-apachectl.patch
-Patch2: httpd-2.4.28-apxs.patch
-Patch3: httpd-2.4.35-deplibs.patch
+Patch1: httpd-2.4.35-apachectl.patch
+Patch2: httpd-2.4.28-apxs.patch
+Patch3: httpd-2.4.35-deplibs.patch
# Needed for socket activation and mod_systemd patch
-Patch19: httpd-2.4.35-detect-systemd.patch
+Patch19: httpd-2.4.35-detect-systemd.patch
# Features/functional changes
-Patch20: httpd-2.4.32-export.patch
-Patch21: httpd-2.4.35-corelimit.patch
-Patch22: httpd-2.4.35-selinux.patch
+Patch20: httpd-2.4.32-export.patch
+Patch21: httpd-2.4.35-corelimit.patch
+Patch22: httpd-2.4.35-selinux.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1170215
-Patch23: httpd-2.4.28-icons.patch
-Patch24: httpd-2.4.35-systemd.patch
-Patch25: httpd-2.4.35-cachehardmax.patch
-Patch26: httpd-2.4.28-socket-activation.patch
+Patch23: httpd-2.4.28-icons.patch
+Patch24: httpd-2.4.35-systemd.patch
+Patch25: httpd-2.4.35-cachehardmax.patch
+Patch26: httpd-2.4.28-socket-activation.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1109119
-Patch27: httpd-2.4.35-sslciphdefault.patch
+Patch27: httpd-2.4.35-sslciphdefault.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1332242
-Patch28: httpd-2.4.28-statements-comment.patch
+Patch28: httpd-2.4.28-statements-comment.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=811714
-Patch29: httpd-2.4.35-full-release.patch
-Patch30: httpd-2.4.35-freebind.patch
-Patch31: httpd-2.4.35-r1830819+.patch
+Patch29: httpd-2.4.35-full-release.patch
+Patch30: httpd-2.4.35-freebind.patch
+Patch31: httpd-2.4.35-r1830819+.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1638738
-Patch32: httpd-2.4.37-sslprotdefault.patch
+Patch32: httpd-2.4.37-sslprotdefault.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1747898
-Patch33: httpd-2.4.37-mod-md-mod-ssl-hooks.patch
+Patch33: httpd-2.4.37-mod-md-mod-ssl-hooks.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1725031
-Patch34: httpd-2.4.37-r1861793+.patch
+Patch34: httpd-2.4.37-r1861793+.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1704317
-Patch35: httpd-2.4.37-sslkeylogfile-support.patch
+Patch35: httpd-2.4.37-sslkeylogfile-support.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1794728
-Patch36: httpd-2.4.37-session-expiry-updt-int.patch
+Patch36: httpd-2.4.37-session-expiry-updt-int.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1209162
-Patch37: httpd-2.4.37-logjournal.patch
+Patch37: httpd-2.4.37-logjournal.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1869576
-Patch38: httpd-2.4.37-pr37355.patch
+Patch38: httpd-2.4.37-pr37355.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1896176
-Patch39: httpd-2.4.37-proxy-ws-idle-timeout.patch
+Patch39: httpd-2.4.37-proxy-ws-idle-timeout.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1883648
-Patch40: httpd-2.4.37-ssl-proxy-chains.patch
+Patch40: httpd-2.4.37-ssl-proxy-chains.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1935742
-Patch41: httpd-2.4.37-usertrack-samesite.patch
+Patch41: httpd-2.4.37-usertrack-samesite.patch
# Bug fixes
# https://bugzilla.redhat.com/show_bug.cgi?id=1397243
-Patch61: httpd-2.4.35-r1738878.patch
+Patch61: httpd-2.4.35-r1738878.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1170206
-Patch62: httpd-2.4.35-r1633085.patch
+Patch62: httpd-2.4.35-r1633085.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1448892
-Patch63: httpd-2.4.28-r1811831.patch
+Patch63: httpd-2.4.28-r1811831.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1602548
-Patch65: httpd-2.4.35-r1842888.patch
+Patch65: httpd-2.4.35-r1842888.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1653009
# https://bugzilla.redhat.com/show_bug.cgi?id=1672977
# https://bugzilla.redhat.com/show_bug.cgi?id=1673022
-Patch66: httpd-2.4.37-r1842929+.patch
+Patch66: httpd-2.4.37-r1842929+.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1630432
-Patch67: httpd-2.4.35-r1825120.patch
+Patch67: httpd-2.4.35-r1825120.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1670716
-Patch68: httpd-2.4.37-fips-segfault.patch
+Patch68: httpd-2.4.37-fips-segfault.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1669221
-Patch70: httpd-2.4.37-r1840554.patch
+Patch70: httpd-2.4.37-r1840554.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1673022
-Patch71: httpd-2.4.37-mod-md-perms.patch
+Patch71: httpd-2.4.37-mod-md-perms.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1724549
-Patch72: httpd-2.4.37-mod-mime-magic-strdup.patch
+Patch72: httpd-2.4.37-mod-mime-magic-strdup.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1724034
-Patch73: httpd-2.4.35-ocsp-wrong-ctx.patch
+Patch73: httpd-2.4.35-ocsp-wrong-ctx.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1633224
-Patch74: httpd-2.4.37-r1828172+.patch
+Patch74: httpd-2.4.37-r1828172+.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1775158
-Patch75: httpd-2.4.37-r1870095+.patch
+Patch75: httpd-2.4.37-r1870095+.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1771847
-Patch76: httpd-2.4.37-proxy-continue.patch
-Patch77: httpd-2.4.37-balancer-failover.patch
+Patch76: httpd-2.4.37-proxy-continue.patch
+Patch77: httpd-2.4.37-balancer-failover.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1875844
-Patch78: httpd-2.4.37-r1881459.patch
+Patch78: httpd-2.4.37-r1881459.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1891829
-Patch79: httpd-2.4.37-r1864000.patch
+Patch79: httpd-2.4.37-r1864000.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1868608
-Patch80: httpd-2.4.37-r1872790.patch
+Patch80: httpd-2.4.37-r1872790.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1861380
-Patch81: httpd-2.4.37-r1879224.patch
+Patch81: httpd-2.4.37-r1879224.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1680118
-Patch82: httpd-2.4.37-r1877397.patch
+Patch82: httpd-2.4.37-r1877397.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1847585
-Patch83: httpd-2.4.37-r1878890.patch
+Patch83: httpd-2.4.37-r1878890.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1918741
-Patch84: httpd-2.4.37-r1878280.patch
+Patch84: httpd-2.4.37-r1878280.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1891594
-Patch85: httpd-2.4.37-htcacheclean-dont-break.patch
+Patch85: httpd-2.4.37-htcacheclean-dont-break.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1937334
-Patch86: httpd-2.4.37-r1873907.patch
+Patch86: httpd-2.4.37-r1873907.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1680111
-Patch87: httpd-2.4.37-reply-two-tls-rec.patch
+Patch87: httpd-2.4.37-reply-two-tls-rec.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1905613
-Patch88: httpd-2.4.37-r1845768+.patch
+Patch88: httpd-2.4.37-r1845768+.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2001046
-Patch89: httpd-2.4.37-r1862410.patch
+Patch89: httpd-2.4.37-r1862410.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1984828
-Patch90: httpd-2.4.37-hcheck-mem-issues.patch
+Patch90: httpd-2.4.37-hcheck-mem-issues.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2017543
-Patch91: httpd-2.4.37-add-SNI-support.patch
+Patch91: httpd-2.4.37-add-SNI-support.patch
+# https://bugzilla.redhat.com/show_bug.cgi?id=2159603
+Patch92: httpd-2.4.37-mod_status-duplicate-key.patch
+# https://bugzilla.redhat.com/show_bug.cgi?id=2221083
+Patch93: httpd-2.4.37-r1885607.patch
# Security fixes
-Patch200: httpd-2.4.37-r1851471.patch
+Patch200: httpd-2.4.37-r1851471.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1694980
-Patch201: httpd-2.4.37-CVE-2019-0211.patch
+Patch201: httpd-2.4.37-CVE-2019-0211.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1695025
-Patch202: httpd-2.4.37-CVE-2019-0215.patch
+Patch202: httpd-2.4.37-CVE-2019-0215.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1696141
-Patch203: httpd-2.4.37-CVE-2019-0217.patch
+Patch203: httpd-2.4.37-CVE-2019-0217.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1696097
-Patch204: httpd-2.4.37-CVE-2019-0220.patch
+Patch204: httpd-2.4.37-CVE-2019-0220.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1741860
# https://bugzilla.redhat.com/show_bug.cgi?id=1741864
# https://bugzilla.redhat.com/show_bug.cgi?id=1741868
-Patch205: httpd-2.4.34-CVE-2019-9511-and-9516-and-9517.patch
+Patch205: httpd-2.4.34-CVE-2019-9511-and-9516-and-9517.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1823259
# https://bugzilla.redhat.com/show_bug.cgi?id=1747284
# fixes both CVE-2020-1927 and CVE-2019-10098
-Patch206: httpd-2.4.37-CVE-2019-10098.patch
+Patch206: httpd-2.4.37-CVE-2019-10098.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1747281
-Patch207: httpd-2.4.37-CVE-2019-10092.patch
+Patch207: httpd-2.4.37-CVE-2019-10092.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1747291
-Patch208: httpd-2.4.37-CVE-2019-10097.patch
+Patch208: httpd-2.4.37-CVE-2019-10097.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1820772
-Patch209: httpd-2.4.37-CVE-2020-1934.patch
+Patch209: httpd-2.4.37-CVE-2020-1934.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1668493
-Patch210: httpd-2.4.37-CVE-2018-17199.patch
+Patch210: httpd-2.4.37-CVE-2018-17199.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1866563
-Patch211: httpd-2.4.37-CVE-2020-11984.patch
+Patch211: httpd-2.4.37-CVE-2020-11984.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1972500
-Patch212: httpd-2.4.37-CVE-2021-30641.patch
+Patch212: httpd-2.4.37-CVE-2021-30641.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1968307
-Patch213: httpd-2.4.37-CVE-2021-26690.patch
+Patch213: httpd-2.4.37-CVE-2021-26690.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2005117
-Patch214: httpd-2.4.37-CVE-2021-40438.patch
+Patch214: httpd-2.4.37-CVE-2021-40438.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1966732
-Patch215: httpd-2.4.37-CVE-2021-26691.patch
+Patch215: httpd-2.4.37-CVE-2021-26691.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1968278
-Patch216: httpd-2.4.37-CVE-2020-35452.patch
+Patch216: httpd-2.4.37-CVE-2020-35452.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2005128
-Patch217: httpd-2.4.37-CVE-2021-34798.patch
+Patch217: httpd-2.4.37-CVE-2021-34798.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2005119
-Patch218: httpd-2.4.37-CVE-2021-39275.patch
+Patch218: httpd-2.4.37-CVE-2021-39275.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2005124
-Patch219: httpd-2.4.37-CVE-2021-36160.patch
+Patch219: httpd-2.4.37-CVE-2021-36160.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1966728
-Patch220: httpd-2.4.37-CVE-2021-33193.patch
+Patch220: httpd-2.4.37-CVE-2021-33193.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2034674
-Patch221: httpd-2.4.37-CVE-2021-44790.patch
+Patch221: httpd-2.4.37-CVE-2021-44790.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2034672
-Patch222: httpd-2.4.37-CVE-2021-44224.patch
+Patch222: httpd-2.4.37-CVE-2021-44224.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2064321
-Patch223: httpd-2.4.37-CVE-2022-22720.patch
+Patch223: httpd-2.4.37-CVE-2022-22720.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=1966738
-Patch224: httpd-2.4.37-CVE-2020-13950.patch
+Patch224: httpd-2.4.37-CVE-2020-13950.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2064322
-Patch225: httpd-2.4.37-CVE-2022-22719.patch
+Patch225: httpd-2.4.37-CVE-2022-22719.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2064320
-Patch226: httpd-2.4.37-CVE-2022-22721.patch
+Patch226: httpd-2.4.37-CVE-2022-22721.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2065324
-Patch227: httpd-2.4.37-CVE-2022-23943.patch
+Patch227: httpd-2.4.37-CVE-2022-23943.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2095002
-Patch228: httpd-2.4.37-CVE-2022-28614.patch
+Patch228: httpd-2.4.37-CVE-2022-28614.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2095006
-Patch229: httpd-2.4.37-CVE-2022-28615.patch
+Patch229: httpd-2.4.37-CVE-2022-28615.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2095015
-Patch230: httpd-2.4.37-CVE-2022-30522.patch
+Patch230: httpd-2.4.37-CVE-2022-30522.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2095018
-Patch231: httpd-2.4.37-CVE-2022-30556.patch
+Patch231: httpd-2.4.37-CVE-2022-30556.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2095020
-Patch232: httpd-2.4.37-CVE-2022-31813.patch
+Patch232: httpd-2.4.37-CVE-2022-31813.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2095012
-Patch233: httpd-2.4.37-CVE-2022-29404.patch
+Patch233: httpd-2.4.37-CVE-2022-29404.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2094997
-Patch234: httpd-2.4.37-CVE-2022-26377.patch
+Patch234: httpd-2.4.37-CVE-2022-26377.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2161773
-Patch235: httpd-2.4.37-CVE-2022-37436.patch
+Patch235: httpd-2.4.37-CVE-2022-37436.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2161774
-Patch236: httpd-2.4.37-CVE-2006-20001.patch
+Patch236: httpd-2.4.37-CVE-2006-20001.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2161777
-Patch237: httpd-2.4.37-CVE-2022-36760.patch
+Patch237: httpd-2.4.37-CVE-2022-36760.patch
# https://bugzilla.redhat.com/show_bug.cgi?id=2176209
-Patch238: httpd-2.4.37-CVE-2023-25690.patch
-
-Patch239:httpd-2.4.37-CVE-2023-27522.patch
+Patch238: httpd-2.4.37-CVE-2023-25690.patch
+# https://bugzilla.redhat.com/show_bug.cgi?id=2176211
+Patch239: httpd-2.4.37-CVE-2023-27522.patch
# Add by Anolis
Patch1000: 1000-httpd-anolis-rebrand.patch
Patch1001: 1001-httpd-anolis-support-loongarch64.patch
# End
-License: ASL 2.0
-Group: System Environment/Daemons
-BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root
-BuildRequires: autoconf, perl-interpreter, perl-generators, pkgconfig, findutils, xmlto
-BuildRequires: zlib-devel, libselinux-devel, lua-devel, brotli-devel
-BuildRequires: apr-devel >= 1.5.0, apr-util-devel >= 1.5.0, pcre-devel >= 5.0
-BuildRequires: systemd-devel
+License: ASL 2.0
+Group: System Environment/Daemons
+BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root
+BuildRequires: autoconf, perl-interpreter, perl-generators, pkgconfig, findutils, xmlto
+BuildRequires: zlib-devel, libselinux-devel, lua-devel, brotli-devel
+BuildRequires: apr-devel >= 1.5.0, apr-util-devel >= 1.5.0, pcre-devel >= 5.0
+BuildRequires: systemd-devel
# web server testpage added to redhat-logos in 82.0 (rhbz1896319)
-Requires: /etc/mime.types, system-logos-httpd
-Obsoletes: httpd-suexec
-Provides: webserver
-Provides: mod_dav = %{version}-%{release}, httpd-suexec = %{version}-%{release}
-Provides: httpd-mmn = %{mmn}, httpd-mmn = %{mmnisa}
-Requires: httpd-tools = %{version}-%{release}
-Requires: httpd-filesystem = %{version}-%{release}
-Requires: mod_http2 >= 1.15.7-5
+# new logo requires new footer copyring which was added in rhbz1934800
+Requires: /etc/mime.types, system-logos(httpd-logo-ng)
+Obsoletes: httpd-suexec
+Provides: webserver
+Provides: mod_dav = %{version}-%{release}, httpd-suexec = %{version}-%{release}
+Provides: httpd-mmn = %{mmn}, httpd-mmn = %{mmnisa}
+Requires: httpd-tools = %{version}-%{release}
+Requires: httpd-filesystem = %{version}-%{release}
+Requires: mod_http2 >= 1.15.7-5
Requires(pre): httpd-filesystem
Requires(preun): systemd-units
Requires(postun): systemd-units
Requires(post): systemd-units
-Conflicts: apr < 1.5.0-1
+Conflicts: apr < 1.5.0-1
%description
The Apache HTTP Server is a powerful, efficient, and extensible
web server.
%package devel
-Group: Development/Libraries
-Summary: Development interfaces for the Apache HTTP server
-Requires: apr-devel, apr-util-devel, pkgconfig
-Requires: httpd = %{version}-%{release}
+Group: Development/Libraries
+Summary: Development interfaces for the Apache HTTP server
+Requires: apr-devel, apr-util-devel, pkgconfig
+Requires: httpd = %{version}-%{release}
%description devel
The httpd-devel package contains the APXS binary and other files
@@ -299,11 +304,11 @@ able to compile or develop additional modules for Apache, you need
to install this package.
%package manual
-Group: Documentation
-Summary: Documentation for the Apache HTTP server
-Requires: httpd = %{version}-%{release}
-Obsoletes: secureweb-manual, apache-manual
-BuildArch: noarch
+Group: Documentation
+Summary: Documentation for the Apache HTTP server
+Requires: httpd = %{version}-%{release}
+Obsoletes: secureweb-manual, apache-manual
+BuildArch: noarch
%description manual
The httpd-manual package contains the complete manual and
@@ -311,9 +316,9 @@ reference guide for the Apache HTTP server. The information can
also be found at http://httpd.apache.org/docs/2.2/.
%package filesystem
-Group: System Environment/Daemons
-Summary: The basic directory layout for the Apache HTTP server
-BuildArch: noarch
+Group: System Environment/Daemons
+Summary: The basic directory layout for the Apache HTTP server
+BuildArch: noarch
Requires(pre): /usr/sbin/useradd
%description filesystem
@@ -322,24 +327,24 @@ for the Apache HTTP server including the correct permissions
for the directories.
%package tools
-Group: System Environment/Daemons
-Summary: Tools for use with the Apache HTTP Server
+Group: System Environment/Daemons
+Summary: Tools for use with the Apache HTTP Server
%description tools
The httpd-tools package contains tools which can be used with
the Apache HTTP Server.
%package -n mod_ssl
-Group: System Environment/Daemons
-Summary: SSL/TLS module for the Apache HTTP Server
-Epoch: 1
-BuildRequires: openssl-devel
+Group: System Environment/Daemons
+Summary: SSL/TLS module for the Apache HTTP Server
+Epoch: 1
+BuildRequires: openssl-devel
Requires(pre): httpd-filesystem
-Requires: httpd = 0:%{version}-%{release}, httpd-mmn = %{mmnisa}
-Requires: sscg >= 3.0.0-7, /usr/bin/hostname
-Obsoletes: stronghold-mod_ssl
+Requires: httpd = 0:%{version}-%{release}, httpd-mmn = %{mmnisa}
+Requires: sscg >= 3.0.0-7, /usr/bin/hostname
+Obsoletes: stronghold-mod_ssl
# Require an OpenSSL which supports PROFILE=SYSTEM
-Conflicts: openssl-libs < 1:1.0.1h-4
+Conflicts: openssl-libs < 1:1.0.1h-4
%description -n mod_ssl
The mod_ssl module provides strong cryptography for the Apache Web
@@ -347,31 +352,31 @@ server via the Secure Sockets Layer (SSL) and Transport Layer
Security (TLS) protocols.
%package -n mod_proxy_html
-Group: System Environment/Daemons
-Summary: HTML and XML content filters for the Apache HTTP Server
-Requires: httpd = 0:%{version}-%{release}, httpd-mmn = %{mmnisa}
-BuildRequires: libxml2-devel
-Epoch: 1
-Obsoletes: mod_proxy_html < 1:2.4.1-2
+Group: System Environment/Daemons
+Summary: HTML and XML content filters for the Apache HTTP Server
+Requires: httpd = 0:%{version}-%{release}, httpd-mmn = %{mmnisa}
+BuildRequires: libxml2-devel
+Epoch: 1
+Obsoletes: mod_proxy_html < 1:2.4.1-2
%description -n mod_proxy_html
The mod_proxy_html and mod_xml2enc modules provide filters which can
transform and modify HTML and XML content.
%package -n mod_ldap
-Group: System Environment/Daemons
-Summary: LDAP authentication modules for the Apache HTTP Server
-Requires: httpd = 0:%{version}-%{release}, httpd-mmn = %{mmnisa}
-Requires: apr-util-ldap
+Group: System Environment/Daemons
+Summary: LDAP authentication modules for the Apache HTTP Server
+Requires: httpd = 0:%{version}-%{release}, httpd-mmn = %{mmnisa}
+Requires: apr-util-ldap
%description -n mod_ldap
The mod_ldap and mod_authnz_ldap modules add support for LDAP
authentication to the Apache HTTP Server.
%package -n mod_session
-Group: System Environment/Daemons
-Summary: Session interface for the Apache HTTP Server
-Requires: httpd = 0:%{version}-%{release}, httpd-mmn = %{mmnisa}
+Group: System Environment/Daemons
+Summary: Session interface for the Apache HTTP Server
+Requires: httpd = 0:%{version}-%{release}, httpd-mmn = %{mmnisa}
%description -n mod_session
The mod_session module and associated backends provide an abstract
@@ -436,6 +441,8 @@ interface for storing and accessing per-user session data.
%patch89 -p1 -b .r1862410
%patch90 -p1 -b .hcheck-mem-issues
%patch91 -p1 -b .SNI
+%patch92 -p1 -b .mod_status-dupl
+%patch93 -p1 -b .r1885607
%patch200 -p1 -b .r1851471
%patch201 -p1 -b .CVE-2019-0211
@@ -987,13 +994,36 @@ rm -rf $RPM_BUILD_ROOT
%{_rpmconfigdir}/macros.d/macros.httpd
%changelog
-* Fri Jun 16 2023 zhangbinchen - 2.4.37-56.0.1.6
+* Thu Dec 14 2023 zhangbinchen - 2.4.37-62.0.1
- Rebrand for Anolis OS(Binchen Zhang)
- Requires system-logos-httpd(Binchen Zhang)
- Support loongarch64 platform(Liwei Ge)
- add patch for CVE-2023-27522(guo.chuang@zte.com.cn)
-* Thu Apr 27 2023 Luboš Uhliarik - 2.4.37-56.6
+* Thu Jul 27 2023 Luboš Uhliarik - 2.4.37-62
+- Resolves: #2221083 - Apache Bug 57087: mod_proxy_fcgi doesn't send cgi
+ CONTENT_LENGTH variable when the client request used Transfer-Encoding:chunked
+
+* Thu Jul 20 2023 Tomas Korbar - 2.4.37-61
+- Fix issue found by covscan
+- Related: #2159603
+
+* Mon Jul 17 2023 Tomas Korbar - 2.4.37-60
+- Another rebuild because of mistake in workflow
+- Related: #2159603
+
+* Mon Jul 17 2023 Tomas Korbar - 2.4.37-59
+- Rebuild because of mistake in workflow
+- Related: #2159603
+
+* Mon Jul 17 2023 Tomas Korbar - 2.4.37-58
+- Resolves: #2159603 - mod_status lists BusyWorkers IdleWorkers keys twice
+
+* Thu May 25 2023 Luboš Uhliarik - 2.4.37-57
+- Resolves: #2176723 - CVE-2023-27522 httpd:2.4/httpd: mod_proxy_uwsgi HTTP
+ response splitting
+
+* Thu Apr 27 2023 Luboš Uhliarik - 2.4.37-56.5
- Resolves: #2190133 - mod_rewrite regression with CVE-2023-25690
* Sat Mar 18 2023 Luboš Uhliarik - 2.4.37-56.4