From ba2571c17fdce02b2319665cfc00bf90f407a64c Mon Sep 17 00:00:00 2001 From: CentOS Sources Date: Jul 14 2020 01:45:59 +0000 Subject: import pcp-5.1.1-3.el8 --- diff --git a/SOURCES/redhat-bugzilla-1541406.patch b/SOURCES/redhat-bugzilla-1541406.patch new file mode 100644 index 0000000..9841486 --- /dev/null +++ b/SOURCES/redhat-bugzilla-1541406.patch @@ -0,0 +1,83 @@ +91dd4ae6b logutil: use $PCP_TMPFILE_DIR for intermediate pmlogger_merge archives +b0c90d858 packaging: activate pmlogger_rewrite on upgrades +b5e602187 packaging: revert pcp_archive_dir subsitution in build/rpm/GNUmakefile + +--- a/src/pmlogger/pmlogger_merge.sh 2018-06-18 16:24:25.000000000 +1000 ++++ b/src/pmlogger/pmlogger_merge.sh 2020-06-11 13:10:57.401576513 +1000 +@@ -26,8 +26,9 @@ + + prog=`basename $0` + tmp=`mktemp -d /tmp/pcp.XXXXXXXXX` || exit 1 ++tmpmerge=`mktemp -d $PCP_TMPFILE_DIR/pcp.XXXXXXXXX` || exit 1 + status=0 +-trap "rm -rf $tmp; exit \$status" 0 1 2 3 15 ++trap "rm -rf $tmp $tmpmerge; exit \$status" 0 1 2 3 15 + + force=false + VERBOSE=false +@@ -229,8 +230,8 @@ + # output = 108 file descriptors which should be well below any + # shell-imposed or system-imposed limits + # +- $VERBOSE && echo " -> partial merge to $tmp/$part" +- cmd="pmlogextract $list $tmp/$part" ++ $VERBOSE && echo " -> partial merge to $tmpmerge/$part" ++ cmd="pmlogextract $list $tmpmerge/$part" + if $SHOWME + then + echo "+ $cmd" +@@ -239,13 +240,13 @@ + then + : + else +- $VERBOSE || echo " -> partial merge to $tmp/$part" ++ $VERBOSE || echo " -> partial merge to $tmpmerge/$part" + echo "$prog: Directory: `pwd`" +- echo "$prog: Failed: pmlogextract $list $tmp/$part" ++ echo "$prog: Failed: pmlogextract $list $tmpmerge/$part" + _warning + fi + fi +- list=$tmp/$part ++ list=$tmpmerge/$part + part=`expr $part + 1` + i=0 + fi +--- a/build/rpm/fedora.spec 2020-05-29 09:15:44.000000000 +1000 ++++ b/build/rpm/fedora.spec 2020-06-11 13:10:57.402576528 +1000 +@@ -2814,6 +2814,7 @@ + chown -R pcp:pcp %{_logsdir}/pmie 2>/dev/null + chown -R pcp:pcp %{_logsdir}/pmproxy 2>/dev/null + %{install_file "$PCP_PMNS_DIR" .NeedRebuild} ++%{install_file "$PCP_LOG_DIR/pmlogger" .NeedRewrite} + %if !%{disable_systemd} + %systemd_postun_with_restart pmcd.service + %systemd_post pmcd.service +--- a/build/rpm/pcp.spec.in 2020-05-29 09:16:19.000000000 +1000 ++++ b/build/rpm/pcp.spec.in 2020-06-11 13:10:57.402576528 +1000 +@@ -3149,6 +3149,7 @@ + chown -R pcp:pcp "$PCP_LOG_DIR/pmie" 2>/dev/null + chown -R pcp:pcp "$PCP_LOG_DIR/pmproxy" 2>/dev/null + %{install_file "$PCP_PMNS_DIR" .NeedRebuild} ++%{install_file "$PCP_LOG_DIR/pmlogger" .NeedRewrite} + %if "@enable_systemd@" == "true" + %systemd_postun_with_restart pmcd.service + %systemd_post pmcd.service +--- a/debian/pcp.postinst.tail 2019-06-13 09:59:16.000000000 +1000 ++++ b/debian/pcp.postinst.tail 2020-06-11 13:10:57.402576528 +1000 +@@ -6,6 +6,8 @@ + + touch /var/lib/pcp/pmns/.NeedRebuild + chmod 644 /var/lib/pcp/pmns/.NeedRebuild ++touch /var/log/pcp/pmlogger/.NeedRewrite ++chmod 644 /var/log/pcp/pmlogger/.NeedRewrite + + getent group pcp >/dev/null || groupadd -r pcp + getent passwd pcp >/dev/null || \ +--- a/debian/pcp.prerm 2017-08-17 10:54:50.000000000 +1000 ++++ b/debian/pcp.prerm 2020-06-11 13:10:57.402576528 +1000 +@@ -24,3 +24,4 @@ + fi + fi + rm -f /var/lib/pcp/pmns/.NeedRebuild ++rm -f /var/log/pcp/pmlogger/.NeedRewrite diff --git a/SOURCES/redhat-bugzilla-1790433.patch b/SOURCES/redhat-bugzilla-1790433.patch new file mode 100644 index 0000000..31643bd --- /dev/null +++ b/SOURCES/redhat-bugzilla-1790433.patch @@ -0,0 +1,21 @@ +BZ 1790433 - Missing dependency of pcp-pmda-snmp on net-snmp-perl +bc4abb291 pmdasnmp: add Install checks for the required Net::SNMP module + +diff --git a/src/pmdas/snmp/Install b/src/pmdas/snmp/Install +index 7fe4193e4..21a76ab56 100755 +--- a/src/pmdas/snmp/Install ++++ b/src/pmdas/snmp/Install +@@ -22,6 +22,13 @@ iam=snmp + perl_opt=true + daemon_opt=false + ++perl -e "use Net::SNMP" 2>/dev/null ++if test $? -ne 0; then ++ echo "Net::SNMP (Simple Network Management Protocol) perl module is not installed" ++ status=1 ++ exit ++fi ++ + pmdaSetup + pmdaInstall + exit diff --git a/SOURCES/redhat-bugzilla-1790452.patch b/SOURCES/redhat-bugzilla-1790452.patch new file mode 100644 index 0000000..ef28a8f --- /dev/null +++ b/SOURCES/redhat-bugzilla-1790452.patch @@ -0,0 +1,45 @@ +BZ 1790452 - Installation of pcp-pmda-samba causes SELinux issues +73772a60f selinux: fix pmdasamba(1) operating with selinux enforcing + +--- a/qa/917.out.in 2020-05-19 20:34:46.000000000 +1000 ++++ pcp-5.1.1/qa/917.out.in 2020-06-22 17:29:14.346713826 +1000 +@@ -34,6 +34,8 @@ + ! allow [pcp_pmcd_t] [unreserved_port_t] : [tcp_socket] { name_bind name_connect }; + ! allow [pcp_pmcd_t] [unreserved_port_t] : [udp_socket] { name_bind }; + ! allow [pcp_pmlogger_t] [unreserved_port_t] : [tcp_socket] { name_bind }; ++ allow [pcp_pmcd_t] [samba_var_t] : [dir] { add_name write }; ++ allow [pcp_pmcd_t] [samba_var_t] : [file] { create }; + allow [pcp_pmcd_t] [websm_port_t] : [tcp_socket] { name_connect }; + ! allow [pcp_pmcd_t] [pcp_tmp_t] : [file] { execute execute_no_trans map }; + allow [pcp_pmcd_t] [hostname_exec_t] : [file] { execute execute_no_trans getattr open read }; +--- a/src/pmdas/samba/pmdasamba.pl 2020-02-04 14:51:57.000000000 +1100 ++++ pcp-5.1.1/src/pmdas/samba/pmdasamba.pl 2020-06-22 17:29:14.346713826 +1000 +@@ -41,6 +41,7 @@ + $pmda->err("pmdasamba failed to open $smbstats pipe: $!"); + + while () { ++ $_ =~ s/"//g; + if (m/^\*\*\*\*\s+(\w+[^*]*)\**$/) { + my $heading = $1; + $heading =~ s/ +$//g; +--- a/src/selinux/pcpupstream.te.in 2020-05-19 20:34:32.000000000 +1000 ++++ pcp-5.1.1/src/selinux/pcpupstream.te.in 2020-06-22 17:29:14.347713837 +1000 +@@ -22,6 +22,7 @@ + type pcp_pmie_exec_t; # pmda.summary + type ping_exec_t; # pmda.netcheck + type openvswitch_exec_t; # pmda.openvswitch ++ type samba_var_t; # pmda.samba + type websm_port_t; # pmda.openmetrics + type system_cronjob_t; + type user_home_t; +@@ -151,6 +152,10 @@ + #type=AVC msg=audit(YYY.94): avc: denied { name_bind } for pid=9365 comm=pmlogger src=4332 scontext=system_u:system_r:pcp_pmlogger_t:s0 tcontext=system_u:object_r:unreserved_port_t:s0 tclass=tcp_socket permissive=0 + @PCP_UNRESERVED_PORT_RULE_PMLOGGER@ + ++#type=AVC msg=audit(YYY.97): avc: denied { write } for pid=3507787 comm="smbstatus" name="msg.lock" dev="dm-0" ino=283321 scontext=system_u:system_r:pcp_pmcd_t:s0 tcontext=system_u:object_r:samba_var_t:s0 tclass=dir permissive=0 ++allow pcp_pmcd_t samba_var_t:dir { add_name write }; # pmda.samba ++allow pcp_pmcd_t samba_var_t:file { create }; # pmda.samba ++ + #type=AVC msg=audit(YYY.15): avc: denied { name_connect } for pid=13816 comm="python3" dest=9090 scontext=system_u:system_r:pcp_pmcd_t:s0 tcontext=system_u:object_r:websm_port_t:s0 tclass=tcp_socket permissive=0 + allow pcp_pmcd_t websm_port_t:tcp_socket name_connect; # pmda.openmetrics + diff --git a/SOURCES/redhat-bugzilla-1792971.patch b/SOURCES/redhat-bugzilla-1792971.patch new file mode 100644 index 0000000..37e6e69 --- /dev/null +++ b/SOURCES/redhat-bugzilla-1792971.patch @@ -0,0 +1,1351 @@ +5af58c8af pmdastatsd: fix minor sizeof issues found by Coverity scan +b3f78dc82 pmlogconf: fix resource leak found by coverity scan +8a3ed1b26 pmdastatsd: initialize stack variable to keep Coverity happy +6902959e5 pmdastatsd: fix Coverity LOCK issues on error paths +548cad8c5 libpcp_web: ensure context is freed only after timer is fully closed +01e8bb436 services: pmlogger and pmie services want pmcd on boot +20959e794 Fix of 1845241 - Intermittent pmlogconf core dumps +32d6febf4 pcp-atop: resolve other paths of potential null task pointer dereference +cda567efe pmproxy: improve diagnostics, particularly relating to http requests +e0bb9e66c pmproxy: cleanup, remove unused flags and dead code in http encoding +9da331eb8 pmproxy: support the OPTIONS protocol in HTTP 1.1 +1d84081af libpcp_web: add resilience to descriptor lookup paths + +--- a/src/pmdas/statsd/src/aggregator-metric-duration-exact.c 2019-08-21 11:33:26.000000000 +1000 ++++ b/src/pmdas/statsd/src/aggregator-metric-duration-exact.c 2020-06-11 13:10:57.393576397 +1000 +@@ -45,7 +45,7 @@ + double** new_values = realloc(collection->values, sizeof(double*) * new_length); + ALLOC_CHECK("Unable to allocate memory for collection value."); + collection->values = new_values; +- collection->values[collection->length] = (double*) malloc(sizeof(double*)); ++ collection->values[collection->length] = (double*) malloc(sizeof(double)); + ALLOC_CHECK("Unable to allocate memory for duration collection value."); + *(collection->values[collection->length]) = value; + collection->length = new_length; +--- a/src/pmdas/statsd/src/aggregator-metric-labels.c 2020-02-18 16:32:40.000000000 +1100 ++++ b/src/pmdas/statsd/src/aggregator-metric-labels.c 2020-06-11 13:10:57.393576397 +1000 +@@ -140,7 +140,7 @@ + + static char* + create_instance_label_segment_str(char* tags) { +- char buffer[JSON_BUFFER_SIZE]; ++ char buffer[JSON_BUFFER_SIZE] = {'\0'}; + size_t tags_length = strlen(tags) + 1; + if (tags_length > JSON_BUFFER_SIZE) { + return NULL; +@@ -197,7 +197,7 @@ + ALLOC_CHECK("Unable to allocate memory for labels string in metric label record."); + memcpy((*out)->labels, datagram->tags, labels_length); + struct metric_label_metadata* meta = +- (struct metric_label_metadata*) malloc(sizeof(struct metric_label_metadata*)); ++ (struct metric_label_metadata*) malloc(sizeof(struct metric_label_metadata)); + ALLOC_CHECK("Unable to allocate memory for metric label metadata."); + (*out)->meta = meta; + (*out)->type = METRIC_TYPE_NONE; +--- a/src/pmdas/statsd/src/network-listener.c 2019-08-27 11:09:16.000000000 +1000 ++++ b/src/pmdas/statsd/src/network-listener.c 2020-06-11 13:10:57.393576397 +1000 +@@ -68,7 +68,7 @@ + struct timeval tv; + freeaddrinfo(res); + int max_udp_packet_size = config->max_udp_packet_size; +- char *buffer = (char *) malloc(max_udp_packet_size * sizeof(char*)); ++ char *buffer = (char *) malloc(max_udp_packet_size * sizeof(char)); + struct sockaddr_storage src_addr; + socklen_t src_addr_len = sizeof(src_addr); + int rv; +--- a/src/pmlogconf/pmlogconf.c 2020-05-23 13:33:27.000000000 +1000 ++++ b/src/pmlogconf/pmlogconf.c 2020-06-11 13:10:57.394576411 +1000 +@@ -735,7 +735,7 @@ + static int + evaluate_number_values(group_t *group, int type, numeric_cmp_t compare) + { +- unsigned int i, found; ++ int i, found; + pmValueSet *vsp; + pmValue *vp; + pmAtomValue atom; +@@ -769,7 +769,7 @@ + static int + evaluate_string_values(group_t *group, string_cmp_t compare) + { +- unsigned int i, found; ++ int i, found; + pmValueSet *vsp; + pmValue *vp; + pmAtomValue atom; +@@ -828,7 +828,7 @@ + static int + evaluate_string_regexp(group_t *group, regex_cmp_t compare) + { +- unsigned int i, found; ++ int i, found; + pmValueSet *vsp; + pmValue *vp; + pmAtomValue atom; +@@ -1478,6 +1478,10 @@ + } else if (strncmp("#+ groupdir ", bytes, 12) == 0) { + group_dircheck(bytes + 12); + } else if (strncmp("#+ ", bytes, 3) == 0) { ++ if (group) { ++ /* reported by COVERITY RESOURCE LEAK */ ++ group_free(group); ++ } + group = group_create(bytes + 3, line); + head = 0; + } else if (group) { +--- a/src/pmdas/statsd/src/aggregator-metrics.c 2020-02-18 16:32:40.000000000 +1100 ++++ b/src/pmdas/statsd/src/aggregator-metrics.c 2020-06-11 13:10:57.394576411 +1000 +@@ -212,7 +212,10 @@ + VERBOSE_LOG(0, "Writing metrics to file..."); + pthread_mutex_lock(&container->mutex); + metrics* m = container->metrics; +- if (strlen(config->debug_output_filename) == 0) return; ++ if (strlen(config->debug_output_filename) == 0) { ++ pthread_mutex_unlock(&container->mutex); ++ return; ++ } + int sep = pmPathSeparator(); + char debug_output[MAXPATHLEN]; + pmsprintf( +--- a/src/pmdas/statsd/src/aggregator-stats.c 2020-02-18 16:32:40.000000000 +1100 ++++ b/src/pmdas/statsd/src/aggregator-stats.c 2020-06-11 13:10:57.394576411 +1000 +@@ -141,7 +141,10 @@ + write_stats_to_file(struct agent_config* config, struct pmda_stats_container* stats) { + VERBOSE_LOG(0, "Writing stats to file..."); + pthread_mutex_lock(&stats->mutex); +- if (strlen(config->debug_output_filename) == 0) return; ++ if (strlen(config->debug_output_filename) == 0) { ++ pthread_mutex_unlock(&stats->mutex); ++ return; ++ } + int sep = pmPathSeparator(); + char debug_output[MAXPATHLEN]; + pmsprintf( +--- a/src/libpcp_web/src/webgroup.c 2020-05-22 11:29:27.000000000 +1000 ++++ b/src/libpcp_web/src/webgroup.c 2020-06-11 13:10:57.394576411 +1000 +@@ -56,17 +56,28 @@ + } + + static void ++webgroup_release_context(uv_handle_t *handle) ++{ ++ struct context *context = (struct context *)handle->data; ++ ++ if (pmDebugOptions.http) ++ fprintf(stderr, "releasing context %p\n", context); ++ ++ pmwebapi_free_context(context); ++} ++ ++static void + webgroup_destroy_context(struct context *context, struct webgroups *groups) + { + context->garbage = 1; + + if (pmDebugOptions.http) +- fprintf(stderr, "freeing context %p\n", context); ++ fprintf(stderr, "destroying context %p\n", context); + + uv_timer_stop(&context->timer); + if (groups) + dictUnlink(groups->contexts, &context->randomid); +- pmwebapi_free_context(context); ++ uv_close((uv_handle_t *)&context->timer, webgroup_release_context); + } + + static void +--- a/src/pmie/pmie.service.in 2020-05-27 13:36:47.000000000 +1000 ++++ b/src/pmie/pmie.service.in 2020-06-11 13:10:57.394576411 +1000 +@@ -4,6 +4,7 @@ + After=network-online.target pmcd.service + After=pmie_check.timer pmie_check.path pmie_daily.timer + BindsTo=pmie_check.timer pmie_check.path pmie_daily.timer ++Wants=pmcd.service + + [Service] + Type=notify +--- a/src/pmlogger/pmlogger.service.in 2020-05-22 16:48:32.000000000 +1000 ++++ b/src/pmlogger/pmlogger.service.in 2020-06-11 13:10:57.394576411 +1000 +@@ -4,6 +4,7 @@ + After=network-online.target pmcd.service + After=pmlogger_check.timer pmlogger_check.path pmlogger_daily.timer pmlogger_daily-poll.timer + BindsTo=pmlogger_check.timer pmlogger_check.path pmlogger_daily.timer pmlogger_daily-poll.timer ++Wants=pmcd.service + + [Service] + Type=notify +--- a/src/pcp/atop/showgeneric.c 2020-03-30 12:13:55.000000000 +1100 ++++ b/src/pcp/atop/showgeneric.c 2020-06-11 13:10:57.395576426 +1000 +@@ -2024,6 +2024,9 @@ + */ + for (numusers=i=0; i < numprocs; i++, curprocs++) + { ++ if (*curprocs == NULL) ++ continue; ++ + if (procsuppress(*curprocs, &procsel)) + continue; + +@@ -2069,6 +2072,9 @@ + */ + for (numprogs=i=0; i < numprocs; i++, curprocs++) + { ++ if (*curprocs == NULL) ++ continue; ++ + if (procsuppress(*curprocs, &procsel)) + continue; + +@@ -2112,6 +2118,9 @@ + */ + for (numconts=i=0; i < numprocs; i++, curprocs++) + { ++ if (*curprocs == NULL) ++ continue; ++ + if (procsuppress(*curprocs, &procsel)) + continue; + +--- a/src/libpcp_web/src/exports 2020-05-22 15:38:47.000000000 +1000 ++++ b/src/libpcp_web/src/exports 2020-06-11 13:10:57.397576455 +1000 +@@ -189,3 +189,14 @@ + pmWebGroupDestroy; + sdsKeyDictCallBacks; + } PCP_WEB_1.12; ++ ++PCP_WEB_1.14 { ++ global: ++ dictFetchValue; ++ http_method_str; ++ http_body_is_final; ++ http_parser_version; ++ http_parser_url_init; ++ http_parser_parse_url; ++ http_parser_settings_init; ++} PCP_WEB_1.13; +--- a/src/pmproxy/src/http.c 2020-03-23 09:47:47.000000000 +1100 ++++ b/src/pmproxy/src/http.c 2020-06-11 13:10:57.398576470 +1000 +@@ -21,6 +21,18 @@ + static int chunked_transfer_size; /* pmproxy.chunksize, pagesize by default */ + static int smallest_buffer_size = 128; + ++#define MAX_PARAMS_SIZE 4096 ++#define MAX_HEADERS_SIZE 128 ++ ++static sds HEADER_ACCESS_CONTROL_REQUEST_HEADERS, ++ HEADER_ACCESS_CONTROL_REQUEST_METHOD, ++ HEADER_ACCESS_CONTROL_ALLOW_METHODS, ++ HEADER_ACCESS_CONTROL_ALLOW_HEADERS, ++ HEADER_ACCESS_CONTROL_ALLOW_ORIGIN, ++ HEADER_ACCESS_CONTROL_ALLOWED_HEADERS, ++ HEADER_CONNECTION, HEADER_CONTENT_LENGTH, ++ HEADER_ORIGIN, HEADER_WWW_AUTHENTICATE; ++ + /* + * Simple helpers to manage the cumulative addition of JSON + * (arrays and/or objects) to a buffer. +@@ -121,45 +133,9 @@ + return "text/html"; + if (flags & HTTP_FLAG_TEXT) + return "text/plain"; +- if (flags & HTTP_FLAG_JS) +- return "text/javascript"; +- if (flags & HTTP_FLAG_CSS) +- return "text/css"; +- if (flags & HTTP_FLAG_ICO) +- return "image/x-icon"; +- if (flags & HTTP_FLAG_JPG) +- return "image/jpeg"; +- if (flags & HTTP_FLAG_PNG) +- return "image/png"; +- if (flags & HTTP_FLAG_GIF) +- return "image/gif"; + return "application/octet-stream"; + } + +-http_flags +-http_suffix_type(const char *suffix) +-{ +- if (strcmp(suffix, "js") == 0) +- return HTTP_FLAG_JS; +- if (strcmp(suffix, "ico") == 0) +- return HTTP_FLAG_ICO; +- if (strcmp(suffix, "css") == 0) +- return HTTP_FLAG_CSS; +- if (strcmp(suffix, "png") == 0) +- return HTTP_FLAG_PNG; +- if (strcmp(suffix, "gif") == 0) +- return HTTP_FLAG_GIF; +- if (strcmp(suffix, "jpg") == 0) +- return HTTP_FLAG_JPG; +- if (strcmp(suffix, "jpeg") == 0) +- return HTTP_FLAG_JPG; +- if (strcmp(suffix, "html") == 0) +- return HTTP_FLAG_HTML; +- if (strcmp(suffix, "txt") == 0) +- return HTTP_FLAG_TEXT; +- return 0; +-} +- + static const char * const + http_content_encoding(http_flags flags) + { +@@ -259,26 +235,28 @@ + + header = sdscatfmt(sdsempty(), + "HTTP/%u.%u %u %s\r\n" +- "Connection: Keep-Alive\r\n" +- "Access-Control-Allow-Origin: *\r\n" +- "Access-Control-Allow-Headers: Accept, Accept-Language, Content-Language, Content-Type\r\n", ++ "%S: Keep-Alive\r\n", + parser->http_major, parser->http_minor, +- sts, http_status_mapping(sts)); ++ sts, http_status_mapping(sts), HEADER_CONNECTION); ++ header = sdscatfmt(header, ++ "%S: *\r\n" ++ "%S: %S\r\n", ++ HEADER_ACCESS_CONTROL_ALLOW_ORIGIN, ++ HEADER_ACCESS_CONTROL_ALLOW_HEADERS, ++ HEADER_ACCESS_CONTROL_ALLOWED_HEADERS); + + if (sts == HTTP_STATUS_UNAUTHORIZED && client->u.http.realm) +- header = sdscatfmt(header, "WWW-Authenticate: Basic realm=\"%S\"\r\n", +- client->u.http.realm); ++ header = sdscatfmt(header, "%S: Basic realm=\"%S\"\r\n", ++ HEADER_WWW_AUTHENTICATE, client->u.http.realm); + +- if ((flags & HTTP_FLAG_STREAMING)) +- header = sdscatfmt(header, "Transfer-encoding: %s\r\n", "chunked"); +- +- if (!(flags & HTTP_FLAG_STREAMING)) +- header = sdscatfmt(header, "Content-Length: %u\r\n", length); ++ if ((flags & (HTTP_FLAG_STREAMING | HTTP_FLAG_NO_BODY))) ++ header = sdscatfmt(header, "Transfer-encoding: chunked\r\n"); ++ else ++ header = sdscatfmt(header, "%S: %u\r\n", HEADER_CONTENT_LENGTH, length); + +- header = sdscatfmt(header, +- "Content-Type: %s%s\r\n" +- "Date: %s\r\n\r\n", +- http_content_type(flags), http_content_encoding(flags), ++ header = sdscatfmt(header, "Content-Type: %s%s\r\n", ++ http_content_type(flags), http_content_encoding(flags)); ++ header = sdscatfmt(header, "Date: %s\r\n\r\n", + http_date_string(time(NULL), date, sizeof(date))); + + if (pmDebugOptions.http && pmDebugOptions.desperate) { +@@ -288,8 +266,130 @@ + return header; + } + ++static sds ++http_header_value(struct client *client, sds header) ++{ ++ if (client->u.http.headers == NULL) ++ return NULL; ++ return (sds)dictFetchValue(client->u.http.headers, header); ++} ++ ++static sds ++http_headers_allowed(sds headers) ++{ ++ (void)headers; ++ return sdsdup(HEADER_ACCESS_CONTROL_ALLOWED_HEADERS); ++} ++ ++/* check whether the (preflight) method being proposed is acceptable */ ++static int ++http_method_allowed(sds value, http_options options) ++{ ++ if (strcmp(value, "GET") == 0 && (options & HTTP_OPT_GET)) ++ return 1; ++ if (strcmp(value, "PUT") == 0 && (options & HTTP_OPT_PUT)) ++ return 1; ++ if (strcmp(value, "POST") == 0 && (options & HTTP_OPT_POST)) ++ return 1; ++ if (strcmp(value, "HEAD") == 0 && (options & HTTP_OPT_HEAD)) ++ return 1; ++ if (strcmp(value, "TRACE") == 0 && (options & HTTP_OPT_TRACE)) ++ return 1; ++ return 0; ++} ++ ++static char * ++http_methods_string(char *buffer, size_t length, http_options options) ++{ ++ char *p = buffer; ++ ++ /* ensure room for all options, spaces and comma separation */ ++ if (!options || length < 48) ++ return NULL; ++ ++ memset(buffer, 0, length); ++ if (options & HTTP_OPT_GET) ++ strcat(p, ", GET"); ++ if (options & HTTP_OPT_PUT) ++ strcat(p, ", PUT"); ++ if (options & HTTP_OPT_HEAD) ++ strcat(p, ", HEAD"); ++ if (options & HTTP_OPT_POST) ++ strcat(p, ", POST"); ++ if (options & HTTP_OPT_TRACE) ++ strcat(p, ", TRACE"); ++ if (options & HTTP_OPT_OPTIONS) ++ strcat(p, ", OPTIONS"); ++ return p + 2; /* skip leading comma+space */ ++} ++ ++static sds ++http_response_trace(struct client *client) ++{ ++ dictIterator *iterator; ++ dictEntry *entry; ++ sds result = sdsempty(); ++ ++ iterator = dictGetSafeIterator(client->u.http.headers); ++ while ((entry = dictNext(iterator)) != NULL) ++ result = sdscatfmt("%S: %S\r\n", dictGetKey(entry), dictGetVal(entry)); ++ dictReleaseIterator(iterator); ++ return result; ++} ++ ++static sds ++http_response_access(struct client *client, http_code sts, http_options options) ++{ ++ struct http_parser *parser = &client->u.http.parser; ++ char buffer[64]; ++ sds header, value, result; ++ ++ value = http_header_value(client, HEADER_ACCESS_CONTROL_REQUEST_METHOD); ++ if (value && http_method_allowed(value, options) == 0) ++ sts = HTTP_STATUS_METHOD_NOT_ALLOWED; ++ ++ parser->http_major = parser->http_minor = 1; ++ ++ header = sdscatfmt(sdsempty(), ++ "HTTP/%u.%u %u %s\r\n" ++ "%S: Keep-Alive\r\n", ++ parser->http_major, parser->http_minor, ++ sts, http_status_mapping(sts), HEADER_CONNECTION); ++ header = sdscatfmt(header, "%S: %u\r\n", HEADER_CONTENT_LENGTH, 0); ++ ++ if (sts >= HTTP_STATUS_OK && sts < HTTP_STATUS_BAD_REQUEST) { ++ if ((value = http_header_value(client, HEADER_ORIGIN))) ++ header = sdscatfmt(header, "%S: %S\r\n", ++ HEADER_ACCESS_CONTROL_ALLOW_ORIGIN, value); ++ ++ header = sdscatfmt(header, "%S: %s\r\n", ++ HEADER_ACCESS_CONTROL_ALLOW_METHODS, ++ http_methods_string(buffer, sizeof(buffer), options)); ++ ++ value = http_header_value(client, HEADER_ACCESS_CONTROL_REQUEST_HEADERS); ++ if (value && (result = http_headers_allowed(value)) != NULL) { ++ header = sdscatfmt(header, "%S: %S\r\n", ++ HEADER_ACCESS_CONTROL_ALLOW_HEADERS, result); ++ sdsfree(result); ++ } ++ } ++ if (sts == HTTP_STATUS_UNAUTHORIZED && client->u.http.realm) ++ header = sdscatfmt(header, "%S: Basic realm=\"%S\"\r\n", ++ HEADER_WWW_AUTHENTICATE, client->u.http.realm); ++ ++ header = sdscatfmt(header, "Date: %s\r\n\r\n", ++ http_date_string(time(NULL), buffer, sizeof(buffer))); ++ ++ if (pmDebugOptions.http && pmDebugOptions.desperate) { ++ fprintf(stderr, "access response to client %p\n", client); ++ fputs(header, stderr); ++ } ++ return header; ++} ++ + void +-http_reply(struct client *client, sds message, http_code sts, http_flags type) ++http_reply(struct client *client, sds message, ++ http_code sts, http_flags type, http_options options) + { + http_flags flags = client->u.http.flags; + char length[32]; /* hex length */ +@@ -313,6 +413,15 @@ + + suffix = sdsnewlen("0\r\n\r\n", 5); /* chunked suffix */ + client->u.http.flags &= ~HTTP_FLAG_STREAMING; /* end of stream! */ ++ ++ } else if (flags & HTTP_FLAG_NO_BODY) { ++ if (client->u.http.parser.method == HTTP_OPTIONS) ++ buffer = http_response_access(client, sts, options); ++ else if (client->u.http.parser.method == HTTP_TRACE) ++ buffer = http_response_trace(client); ++ else /* HTTP_HEAD */ ++ buffer = http_response_header(client, 0, sts, type); ++ suffix = NULL; + } else { /* regular non-chunked response - headers + response body */ + if (client->buffer == NULL) { + suffix = message; +@@ -326,10 +435,11 @@ + buffer = http_response_header(client, sdslen(suffix), sts, type); + } + +- if (pmDebugOptions.http) { +- fprintf(stderr, "HTTP response (client=%p)\n%s%s", +- client, buffer, suffix); +- } ++ if (pmDebugOptions.http) ++ fprintf(stderr, "HTTP %s response (client=%p)\n%s%s", ++ http_method_str(client->u.http.parser.method), ++ client, buffer, suffix ? suffix : ""); ++ + client_write(client, buffer, suffix); + } + +@@ -363,7 +473,7 @@ + if (pmDebugOptions.desperate) + fputs(message, stderr); + } +- http_reply(client, message, status, HTTP_FLAG_HTML); ++ http_reply(client, message, status, HTTP_FLAG_HTML, 0); + } + + void +@@ -371,6 +481,7 @@ + { + struct http_parser *parser = &client->u.http.parser; + http_flags flags = client->u.http.flags; ++ const char *method; + sds buffer, suffix; + + /* If the client buffer length is now beyond a set maximum size, +@@ -390,16 +501,18 @@ + buffer = sdsempty(); + } + /* prepend a chunked transfer encoding message length (hex) */ +- buffer = sdscatprintf(buffer, "%lX\r\n", (unsigned long)sdslen(client->buffer)); ++ buffer = sdscatprintf(buffer, "%lX\r\n", ++ (unsigned long)sdslen(client->buffer)); + suffix = sdscatfmt(client->buffer, "\r\n"); + /* reset for next call - original released on I/O completion */ + client->buffer = NULL; /* safe, as now held in 'suffix' */ + + if (pmDebugOptions.http) { +- fprintf(stderr, "HTTP chunked buffer (client %p, len=%lu)\n%s" +- "HTTP chunked suffix (client %p, len=%lu)\n%s", +- client, (unsigned long)sdslen(buffer), buffer, +- client, (unsigned long)sdslen(suffix), suffix); ++ method = http_method_str(client->u.http.parser.method); ++ fprintf(stderr, "HTTP %s chunk buffer (client %p, len=%lu)\n%s" ++ "HTTP %s chunk suffix (client %p, len=%lu)\n%s", ++ method, client, (unsigned long)sdslen(buffer), buffer, ++ method, client, (unsigned long)sdslen(suffix), suffix); + } + client_write(client, buffer, suffix); + +@@ -527,6 +640,8 @@ + + if (length == 0) + return NULL; ++ if (length > MAX_PARAMS_SIZE) ++ return NULL; + for (p = url; p < end; p++) { + if (*p == '\0') + break; +@@ -558,6 +673,11 @@ + struct servlet *servlet; + sds url; + ++ if (pmDebugOptions.http || pmDebugOptions.appl0) ++ fprintf(stderr, "HTTP %s %.*s\n", ++ http_method_str(client->u.http.parser.method), ++ (int)length, offset); ++ + if (!(url = http_url_decode(offset, length, &client->u.http.parameters))) + return NULL; + for (servlet = proxy->servlets; servlet != NULL; servlet = servlet->next) { +@@ -576,13 +696,24 @@ + { + struct client *client = (struct client *)request->data; + struct servlet *servlet; ++ sds buffer; + int sts; + + http_client_release(client); /* new URL, clean slate */ +- +- if ((servlet = servlet_lookup(client, offset, length)) != NULL) { ++ /* server options - https://tools.ietf.org/html/rfc7231#section-4.3.7 */ ++ if (length == 1 && *offset == '*' && ++ client->u.http.parser.method == HTTP_OPTIONS) { ++ buffer = http_response_access(client, HTTP_STATUS_OK, HTTP_SERVER_OPTIONS); ++ client_write(client, buffer, NULL); ++ } else if ((servlet = servlet_lookup(client, offset, length)) != NULL) { + client->u.http.servlet = servlet; + if ((sts = client->u.http.parser.status_code) == 0) { ++ if (client->u.http.parser.method == HTTP_OPTIONS || ++ client->u.http.parser.method == HTTP_TRACE || ++ client->u.http.parser.method == HTTP_HEAD) ++ client->u.http.flags |= HTTP_FLAG_NO_BODY; ++ else ++ client->u.http.flags &= ~HTTP_FLAG_NO_BODY; + client->u.http.headers = dictCreate(&sdsOwnDictCallBacks, NULL); + return 0; + } +@@ -616,6 +747,11 @@ + + if (client->u.http.parser.status_code || !client->u.http.headers) + return 0; /* already in process of failing connection */ ++ if (dictSize(client->u.http.headers) >= MAX_HEADERS_SIZE) { ++ client->u.http.parser.status_code = ++ HTTP_STATUS_REQUEST_HEADER_FIELDS_TOO_LARGE; ++ return 0; ++ } + + field = sdsnewlen(offset, length); + if (pmDebugOptions.http) +@@ -826,6 +962,17 @@ + if (chunked_transfer_size < smallest_buffer_size) + chunked_transfer_size = smallest_buffer_size; + ++ HEADER_ACCESS_CONTROL_REQUEST_HEADERS = sdsnew("Access-Control-Request-Headers"); ++ HEADER_ACCESS_CONTROL_REQUEST_METHOD = sdsnew("Access-Control-Request-Method"); ++ HEADER_ACCESS_CONTROL_ALLOW_METHODS = sdsnew("Access-Control-Allow-Methods"); ++ HEADER_ACCESS_CONTROL_ALLOW_HEADERS = sdsnew("Access-Control-Allow-Headers"); ++ HEADER_ACCESS_CONTROL_ALLOW_ORIGIN = sdsnew("Access-Control-Allow-Origin"); ++ HEADER_ACCESS_CONTROL_ALLOWED_HEADERS = sdsnew("Accept, Accept-Language, Content-Language, Content-Type"); ++ HEADER_CONNECTION = sdsnew("Connection"); ++ HEADER_CONTENT_LENGTH = sdsnew("Content-Length"); ++ HEADER_ORIGIN = sdsnew("Origin"); ++ HEADER_WWW_AUTHENTICATE = sdsnew("WWW-Authenticate"); ++ + register_servlet(proxy, &pmseries_servlet); + register_servlet(proxy, &pmwebapi_servlet); + } +@@ -839,4 +986,15 @@ + servlet->close(proxy); + + proxymetrics_close(proxy, METRICS_HTTP); ++ ++ sdsfree(HEADER_ACCESS_CONTROL_REQUEST_HEADERS); ++ sdsfree(HEADER_ACCESS_CONTROL_REQUEST_METHOD); ++ sdsfree(HEADER_ACCESS_CONTROL_ALLOW_METHODS); ++ sdsfree(HEADER_ACCESS_CONTROL_ALLOW_HEADERS); ++ sdsfree(HEADER_ACCESS_CONTROL_ALLOW_ORIGIN); ++ sdsfree(HEADER_ACCESS_CONTROL_ALLOWED_HEADERS); ++ sdsfree(HEADER_CONNECTION); ++ sdsfree(HEADER_CONTENT_LENGTH); ++ sdsfree(HEADER_ORIGIN); ++ sdsfree(HEADER_WWW_AUTHENTICATE); + } +--- a/src/pmproxy/src/series.c 2020-02-25 17:47:56.000000000 +1100 ++++ b/src/pmproxy/src/series.c 2020-06-11 13:10:57.398576470 +1000 +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 2019 Red Hat. ++ * Copyright (c) 2019-2020 Red Hat. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published +@@ -15,8 +15,7 @@ + #include + + typedef enum pmSeriesRestKey { +- RESTKEY_NONE = 0, +- RESTKEY_SOURCE, ++ RESTKEY_SOURCE = 1, + RESTKEY_DESC, + RESTKEY_INSTS, + RESTKEY_LABELS, +@@ -29,7 +28,8 @@ + + typedef struct pmSeriesRestCommand { + const char *name; +- unsigned int size; ++ unsigned int namelen : 16; ++ unsigned int options : 16; + pmSeriesRestKey key; + } pmSeriesRestCommand; + +@@ -39,7 +39,8 @@ + pmSeriesFlags flags; + pmSeriesTimeWindow window; + uv_work_t loading; +- unsigned int working; ++ unsigned int working : 1; ++ unsigned int options : 16; + int nsids; + pmSID *sids; + pmSID sid; +@@ -55,16 +56,25 @@ + } pmSeriesBaton; + + static pmSeriesRestCommand commands[] = { +- { .key = RESTKEY_QUERY, .name = "query", .size = sizeof("query")-1 }, +- { .key = RESTKEY_DESC, .name = "descs", .size = sizeof("descs")-1 }, +- { .key = RESTKEY_INSTS, .name = "instances", .size = sizeof("instances")-1 }, +- { .key = RESTKEY_LABELS, .name = "labels", .size = sizeof("labels")-1 }, +- { .key = RESTKEY_METRIC, .name = "metrics", .size = sizeof("metrics")-1 }, +- { .key = RESTKEY_SOURCE, .name = "sources", .size = sizeof("sources")-1 }, +- { .key = RESTKEY_VALUES, .name = "values", .size = sizeof("values")-1 }, +- { .key = RESTKEY_LOAD, .name = "load", .size = sizeof("load")-1 }, +- { .key = RESTKEY_PING, .name = "ping", .size = sizeof("ping")-1 }, +- { .key = RESTKEY_NONE } ++ { .key = RESTKEY_QUERY, .options = HTTP_OPTIONS_GET | HTTP_OPTIONS_POST, ++ .name = "query", .namelen = sizeof("query")-1 }, ++ { .key = RESTKEY_DESC, .options = HTTP_OPTIONS_GET | HTTP_OPTIONS_POST, ++ .name = "descs", .namelen = sizeof("descs")-1 }, ++ { .key = RESTKEY_INSTS, .options = HTTP_OPTIONS_GET | HTTP_OPTIONS_POST, ++ .name = "instances", .namelen = sizeof("instances")-1 }, ++ { .key = RESTKEY_LABELS, .options = HTTP_OPTIONS_GET | HTTP_OPTIONS_POST, ++ .name = "labels", .namelen = sizeof("labels")-1 }, ++ { .key = RESTKEY_METRIC, .options = HTTP_OPTIONS_GET | HTTP_OPTIONS_POST, ++ .name = "metrics", .namelen = sizeof("metrics")-1 }, ++ { .key = RESTKEY_SOURCE, .options = HTTP_OPTIONS_GET | HTTP_OPTIONS_POST, ++ .name = "sources", .namelen = sizeof("sources")-1 }, ++ { .key = RESTKEY_VALUES, .options = HTTP_OPTIONS_GET | HTTP_OPTIONS_POST, ++ .name = "values", .namelen = sizeof("values")-1 }, ++ { .key = RESTKEY_LOAD, .options = HTTP_OPTIONS_GET | HTTP_OPTIONS_POST, ++ .name = "load", .namelen = sizeof("load")-1 }, ++ { .key = RESTKEY_PING, .options = HTTP_OPTIONS_GET, ++ .name = "ping", .namelen = sizeof("ping")-1 }, ++ { .name = NULL } /* sentinel */ + }; + + /* constant string keys (initialized during servlet setup) */ +@@ -78,8 +88,8 @@ + static const char pmseries_success[] = "{\"success\":true}\r\n"; + static const char pmseries_failure[] = "{\"success\":false}\r\n"; + +-static pmSeriesRestKey +-pmseries_lookup_restkey(sds url) ++static pmSeriesRestCommand * ++pmseries_lookup_rest_command(sds url) + { + pmSeriesRestCommand *cp; + const char *name; +@@ -88,11 +98,11 @@ + strncmp(url, "/series/", sizeof("/series/") - 1) == 0) { + name = (const char *)url + sizeof("/series/") - 1; + for (cp = &commands[0]; cp->name; cp++) { +- if (strncmp(cp->name, name, cp->size) == 0) +- return cp->key; ++ if (strncmp(cp->name, name, cp->namelen) == 0) ++ return cp; + } + } +- return RESTKEY_NONE; ++ return NULL; + } + + static void +@@ -518,6 +528,7 @@ + { + pmSeriesBaton *baton = (pmSeriesBaton *)arg; + struct client *client = baton->client; ++ http_options options = baton->options; + http_flags flags = client->u.http.flags; + http_code code; + sds msg; +@@ -545,7 +556,7 @@ + msg = sdsnewlen(pmseries_failure, sizeof(pmseries_failure) - 1); + flags |= HTTP_FLAG_JSON; + } +- http_reply(client, msg, code, flags); ++ http_reply(client, msg, code, flags, options); + } + + static void +@@ -555,6 +566,14 @@ + fprintf(stderr, "series module setup (arg=%p)\n", arg); + } + ++static void ++pmseries_log(pmLogLevel level, sds message, void *arg) ++{ ++ pmSeriesBaton *baton = (pmSeriesBaton *)arg; ++ ++ proxylog(level, message, baton->client->proxy); ++} ++ + static pmSeriesSettings pmseries_settings = { + .callbacks.on_match = on_pmseries_match, + .callbacks.on_desc = on_pmseries_desc, +@@ -567,7 +586,7 @@ + .callbacks.on_label = on_pmseries_label, + .callbacks.on_done = on_pmseries_done, + .module.on_setup = pmseries_setup, +- .module.on_info = proxylog, ++ .module.on_info = pmseries_log, + }; + + static void +@@ -686,7 +705,6 @@ + case RESTKEY_PING: + break; + +- case RESTKEY_NONE: + default: + client->u.http.parser.status_code = HTTP_STATUS_BAD_REQUEST; + break; +@@ -702,15 +720,16 @@ + pmseries_request_url(struct client *client, sds url, dict *parameters) + { + pmSeriesBaton *baton; +- pmSeriesRestKey key; ++ pmSeriesRestCommand *command; + +- if ((key = pmseries_lookup_restkey(url)) == RESTKEY_NONE) ++ if ((command = pmseries_lookup_rest_command(url)) == NULL) + return 0; + + if ((baton = calloc(1, sizeof(*baton))) != NULL) { + client->u.http.data = baton; + baton->client = client; +- baton->restkey = key; ++ baton->restkey = command->key; ++ baton->options = command->options; + pmseries_setup_request_parameters(client, baton, parameters); + } else { + client->u.http.parser.status_code = HTTP_STATUS_INTERNAL_SERVER_ERROR; +@@ -794,10 +813,12 @@ + + if (baton->query == NULL) { + message = sdsnewlen(failed, sizeof(failed) - 1); +- http_reply(client, message, HTTP_STATUS_BAD_REQUEST, HTTP_FLAG_JSON); ++ http_reply(client, message, HTTP_STATUS_BAD_REQUEST, ++ HTTP_FLAG_JSON, baton->options); + } else if (baton->working) { + message = sdsnewlen(loading, sizeof(loading) - 1); +- http_reply(client, message, HTTP_STATUS_CONFLICT, HTTP_FLAG_JSON); ++ http_reply(client, message, HTTP_STATUS_CONFLICT, ++ HTTP_FLAG_JSON, baton->options); + } else { + uv_queue_work(client->proxy->events, &baton->loading, + pmseries_load_work, pmseries_load_done); +@@ -810,8 +831,17 @@ + pmSeriesBaton *baton = (pmSeriesBaton *)client->u.http.data; + int sts; + +- if (client->u.http.parser.status_code) ++ if (client->u.http.parser.status_code) { ++ on_pmseries_done(-EINVAL, baton); ++ return 1; ++ } ++ ++ if (client->u.http.parser.method == HTTP_OPTIONS || ++ client->u.http.parser.method == HTTP_TRACE || ++ client->u.http.parser.method == HTTP_HEAD) { ++ on_pmseries_done(0, baton); + return 0; ++ } + + switch (baton->restkey) { + case RESTKEY_QUERY: +--- a/src/pmproxy/src/webapi.c 2020-04-17 15:39:17.000000000 +1000 ++++ b/src/pmproxy/src/webapi.c 2020-06-11 13:10:57.399576484 +1000 +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 2019 Red Hat. ++ * Copyright (c) 2019-2020 Red Hat. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published +@@ -18,8 +18,7 @@ + #include "util.h" + + typedef enum pmWebRestKey { +- RESTKEY_NONE = 0, +- RESTKEY_CONTEXT, ++ RESTKEY_CONTEXT = 1, + RESTKEY_METRIC, + RESTKEY_FETCH, + RESTKEY_INDOM, +@@ -32,7 +31,8 @@ + + typedef struct pmWebRestCommand { + const char *name; +- unsigned int size; ++ unsigned int namelen : 16; ++ unsigned int options : 16; + pmWebRestKey key; + } pmWebRestCommand; + +@@ -47,6 +47,7 @@ + sds password; /* from basic auth header */ + unsigned int times : 1; + unsigned int compat : 1; ++ unsigned int options : 16; + unsigned int numpmids; + unsigned int numvsets; + unsigned int numinsts; +@@ -56,21 +57,31 @@ + } pmWebGroupBaton; + + static pmWebRestCommand commands[] = { +- { .key = RESTKEY_CONTEXT, .name = "context", .size = sizeof("context")-1 }, +- { .key = RESTKEY_PROFILE, .name = "profile", .size = sizeof("profile")-1 }, +- { .key = RESTKEY_SCRAPE, .name = "metrics", .size = sizeof("metrics")-1 }, +- { .key = RESTKEY_METRIC, .name = "metric", .size = sizeof("metric")-1 }, +- { .key = RESTKEY_DERIVE, .name = "derive", .size = sizeof("derive")-1 }, +- { .key = RESTKEY_FETCH, .name = "fetch", .size = sizeof("fetch")-1 }, +- { .key = RESTKEY_INDOM, .name = "indom", .size = sizeof("indom")-1 }, +- { .key = RESTKEY_STORE, .name = "store", .size = sizeof("store")-1 }, +- { .key = RESTKEY_CHILD, .name = "children", .size = sizeof("children")-1 }, +- { .key = RESTKEY_NONE } ++ { .key = RESTKEY_CONTEXT, .options = HTTP_OPTIONS_GET, ++ .name = "context", .namelen = sizeof("context")-1 }, ++ { .key = RESTKEY_PROFILE, .options = HTTP_OPTIONS_GET, ++ .name = "profile", .namelen = sizeof("profile")-1 }, ++ { .key = RESTKEY_SCRAPE, .options = HTTP_OPTIONS_GET, ++ .name = "metrics", .namelen = sizeof("metrics")-1 }, ++ { .key = RESTKEY_METRIC, .options = HTTP_OPTIONS_GET, ++ .name = "metric", .namelen = sizeof("metric")-1 }, ++ { .key = RESTKEY_DERIVE, .options = HTTP_OPTIONS_GET | HTTP_OPTIONS_POST, ++ .name = "derive", .namelen = sizeof("derive")-1 }, ++ { .key = RESTKEY_FETCH, .options = HTTP_OPTIONS_GET, ++ .name = "fetch", .namelen = sizeof("fetch")-1 }, ++ { .key = RESTKEY_INDOM, .options = HTTP_OPTIONS_GET, ++ .name = "indom", .namelen = sizeof("indom")-1 }, ++ { .key = RESTKEY_STORE, .options = HTTP_OPTIONS_GET, ++ .name = "store", .namelen = sizeof("store")-1 }, ++ { .key = RESTKEY_CHILD, .options = HTTP_OPTIONS_GET, ++ .name = "children", .namelen = sizeof("children")-1 }, ++ { .name = NULL } /* sentinel */ + }; + + static pmWebRestCommand openmetrics[] = { +- { .key = RESTKEY_SCRAPE, .name = "/metrics", .size = sizeof("/metrics")-1 }, +- { .key = RESTKEY_NONE } ++ { .key = RESTKEY_SCRAPE, .options = HTTP_OPTIONS_GET, ++ .name = "/metrics", .namelen = sizeof("/metrics")-1 }, ++ { .name = NULL } /* sentinel */ + }; + + static sds PARAM_NAMES, PARAM_NAME, PARAM_PMIDS, PARAM_PMID, +@@ -78,8 +89,8 @@ + PARAM_CONTEXT, PARAM_CLIENT; + + +-static pmWebRestKey +-pmwebapi_lookup_restkey(sds url, unsigned int *compat, sds *context) ++static pmWebRestCommand * ++pmwebapi_lookup_rest_command(sds url, unsigned int *compat, sds *context) + { + pmWebRestCommand *cp; + const char *name, *ctxid = NULL; +@@ -94,7 +105,7 @@ + name++; + } while (isdigit((int)(*name))); + if (*name++ != '/') +- return RESTKEY_NONE; ++ return NULL; + *context = sdsnewlen(ctxid, name - ctxid - 1); + } + if (*name == '_') { +@@ -102,13 +113,13 @@ + *compat = 1; /* backward-compatibility mode */ + } + for (cp = &commands[0]; cp->name; cp++) +- if (strncmp(cp->name, name, cp->size) == 0) +- return cp->key; ++ if (strncmp(cp->name, name, cp->namelen) == 0) ++ return cp; + } + for (cp = &openmetrics[0]; cp->name; cp++) +- if (strncmp(cp->name, url, cp->size) == 0) +- return cp->key; +- return RESTKEY_NONE; ++ if (strncmp(cp->name, url, cp->namelen) == 0) ++ return cp; ++ return NULL; + } + + static void +@@ -584,9 +595,10 @@ + { + pmWebGroupBaton *baton = (pmWebGroupBaton *)arg; + struct client *client = (struct client *)baton->client; +- sds quoted, msg; ++ http_options options = baton->options; + http_flags flags = client->u.http.flags; + http_code code; ++ sds quoted, msg; + + if (pmDebugOptions.series) + fprintf(stderr, "%s: client=%p (sts=%d,msg=%s)\n", "on_pmwebapi_done", +@@ -596,7 +608,9 @@ + code = HTTP_STATUS_OK; + /* complete current response with JSON suffix if needed */ + if ((msg = baton->suffix) == NULL) { /* empty OK response */ +- if (flags & HTTP_FLAG_JSON) { ++ if (flags & HTTP_FLAG_NO_BODY) { ++ msg = sdsempty(); ++ } else if (flags & HTTP_FLAG_JSON) { + msg = sdsnewlen("{", 1); + if (context) + msg = sdscatfmt(msg, "\"context\":%S,", context); +@@ -628,10 +642,18 @@ + sdsfree(quoted); + } + +- http_reply(client, msg, code, flags); ++ http_reply(client, msg, code, flags, options); + client_put(client); + } + ++static void ++on_pmwebapi_info(pmLogLevel level, sds message, void *arg) ++{ ++ pmWebGroupBaton *baton = (pmWebGroupBaton *)arg; ++ ++ proxylog(level, message, baton->client->proxy); ++} ++ + static pmWebGroupSettings pmwebapi_settings = { + .callbacks.on_context = on_pmwebapi_context, + .callbacks.on_metric = on_pmwebapi_metric, +@@ -645,7 +667,7 @@ + .callbacks.on_scrape_labels = on_pmwebapi_scrape_labels, + .callbacks.on_check = on_pmwebapi_check, + .callbacks.on_done = on_pmwebapi_done, +- .module.on_info = proxylog, ++ .module.on_info = on_pmwebapi_info, + }; + + /* +@@ -734,7 +756,6 @@ + client->u.http.flags |= HTTP_FLAG_JSON; + break; + +- case RESTKEY_NONE: + default: + client->u.http.parser.status_code = HTTP_STATUS_BAD_REQUEST; + break; +@@ -750,11 +771,11 @@ + pmwebapi_request_url(struct client *client, sds url, dict *parameters) + { + pmWebGroupBaton *baton; +- pmWebRestKey key; ++ pmWebRestCommand *command; + unsigned int compat = 0; + sds context = NULL; + +- if ((key = pmwebapi_lookup_restkey(url, &compat, &context)) == RESTKEY_NONE) { ++ if (!(command = pmwebapi_lookup_rest_command(url, &compat, &context))) { + sdsfree(context); + return 0; + } +@@ -762,7 +783,8 @@ + if ((baton = calloc(1, sizeof(*baton))) != NULL) { + client->u.http.data = baton; + baton->client = client; +- baton->restkey = key; ++ baton->restkey = command->key; ++ baton->options = command->options; + baton->compat = compat; + baton->context = context; + pmwebapi_setup_request_parameters(client, baton, parameters); +@@ -885,17 +907,27 @@ + uv_loop_t *loop = client->proxy->events; + uv_work_t *work; + +- /* fail early if something has already gone wrong */ +- if (client->u.http.parser.status_code != 0) ++ /* take a reference on the client to prevent freeing races on close */ ++ client_get(client); ++ ++ if (client->u.http.parser.status_code) { ++ on_pmwebapi_done(NULL, -EINVAL, NULL, baton); + return 1; ++ } ++ ++ if (client->u.http.parser.method == HTTP_OPTIONS || ++ client->u.http.parser.method == HTTP_TRACE || ++ client->u.http.parser.method == HTTP_HEAD) { ++ on_pmwebapi_done(NULL, 0, NULL, baton); ++ return 0; ++ } + +- if ((work = (uv_work_t *)calloc(1, sizeof(uv_work_t))) == NULL) ++ if ((work = (uv_work_t *)calloc(1, sizeof(uv_work_t))) == NULL) { ++ client_put(client); + return 1; ++ } + work->data = baton; + +- /* take a reference on the client to prevent freeing races on close */ +- client_get(client); +- + /* submit command request to worker thread */ + switch (baton->restkey) { + case RESTKEY_CONTEXT: +@@ -925,11 +957,10 @@ + case RESTKEY_SCRAPE: + uv_queue_work(loop, work, pmwebapi_scrape, pmwebapi_work_done); + break; +- case RESTKEY_NONE: + default: ++ pmwebapi_work_done(work, -EINVAL); + client->u.http.parser.status_code = HTTP_STATUS_BAD_REQUEST; +- client_put(client); +- free(work); ++ on_pmwebapi_done(NULL, -EINVAL, NULL, baton); + return 1; + } + return 0; +--- a/src/pmproxy/src/http.h 2019-12-02 16:43:20.000000000 +1100 ++++ b/src/pmproxy/src/http.h 2020-06-11 13:10:57.398576470 +1000 +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 2019 Red Hat. ++ * Copyright (c) 2019-2020 Red Hat. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published +@@ -34,29 +34,39 @@ + HTTP_FLAG_JSON = (1<<0), + HTTP_FLAG_TEXT = (1<<1), + HTTP_FLAG_HTML = (1<<2), +- HTTP_FLAG_JS = (1<<3), +- HTTP_FLAG_CSS = (1<<4), +- HTTP_FLAG_ICO = (1<<5), +- HTTP_FLAG_JPG = (1<<6), +- HTTP_FLAG_PNG = (1<<7), +- HTTP_FLAG_GIF = (1<<8), + HTTP_FLAG_UTF8 = (1<<10), + HTTP_FLAG_UTF16 = (1<<11), ++ HTTP_FLAG_NO_BODY = (1<<13), + HTTP_FLAG_COMPRESS = (1<<14), + HTTP_FLAG_STREAMING = (1<<15), + /* maximum 16 for server.h */ + } http_flags; + ++typedef enum http_options { ++ HTTP_OPT_GET = (1 << HTTP_GET), ++ HTTP_OPT_PUT = (1 << HTTP_PUT), ++ HTTP_OPT_HEAD = (1 << HTTP_HEAD), ++ HTTP_OPT_POST = (1 << HTTP_POST), ++ HTTP_OPT_TRACE = (1 << HTTP_TRACE), ++ HTTP_OPT_OPTIONS = (1 << HTTP_OPTIONS), ++ /* maximum 16 in command opts fields */ ++} http_options; ++ ++#define HTTP_COMMON_OPTIONS (HTTP_OPT_HEAD | HTTP_OPT_TRACE | HTTP_OPT_OPTIONS) ++#define HTTP_OPTIONS_GET (HTTP_COMMON_OPTIONS | HTTP_OPT_GET) ++#define HTTP_OPTIONS_PUT (HTTP_COMMON_OPTIONS | HTTP_OPT_PUT) ++#define HTTP_OPTIONS_POST (HTTP_COMMON_OPTIONS | HTTP_OPT_POST) ++#define HTTP_SERVER_OPTIONS (HTTP_OPTIONS_GET | HTTP_OPT_PUT | HTTP_OPT_POST) ++ + typedef unsigned int http_code; + + extern void http_transfer(struct client *); +-extern void http_reply(struct client *, sds, http_code, http_flags); ++extern void http_reply(struct client *, sds, http_code, http_flags, http_options); + extern void http_error(struct client *, http_code, const char *); + + extern int http_decode(const char *, size_t, sds); + extern const char *http_status_mapping(http_code); + extern const char *http_content_type(http_flags); +-extern http_flags http_suffix_type(const char *); + + extern sds http_get_buffer(struct client *); + extern void http_set_buffer(struct client *, sds, http_flags); +--- a/qa/1837 1970-01-01 10:00:00.000000000 +1000 ++++ b/qa/1837 2020-06-11 13:10:57.396576440 +1000 +@@ -0,0 +1,55 @@ ++#!/bin/sh ++# PCP QA Test No. 1837 ++# Exercise PMWEBAPI handling server OPTIONS. ++# ++# Copyright (c) 2020 Red Hat. All Rights Reserved. ++# ++ ++seq=`basename $0` ++echo "QA output created by $seq" ++ ++# get standard environment, filters and checks ++. ./common.product ++. ./common.filter ++. ./common.check ++ ++_check_series ++which curl >/dev/null 2>&1 || _notrun "No curl binary installed" ++curl --request-targets 2>&1 | grep -q 'requires parameter' && \ ++ _notrun "Test requires curl --request-targets option" ++ ++status=1 # failure is the default! ++$sudo rm -rf $tmp.* $seq.full ++trap "cd $here; _cleanup; exit \$status" 0 1 2 3 15 ++ ++pmproxy_was_running=false ++[ -f $PCP_RUN_DIR/pmproxy.pid ] && pmproxy_was_running=true ++echo "pmproxy_was_running=$pmproxy_was_running" >>$here/$seq.full ++ ++_cleanup() ++{ ++ if $pmproxy_was_running ++ then ++ echo "Restart pmproxy ..." >>$here/$seq.full ++ _service pmproxy restart >>$here/$seq.full 2>&1 ++ _wait_for_pmproxy ++ else ++ echo "Stopping pmproxy ..." >>$here/$seq.full ++ _service pmproxy stop >>$here/$seq.full 2>&1 ++ fi ++ $sudo rm -f $tmp.* ++} ++ ++# real QA test starts here ++_service pmproxy restart >/dev/null 2>&1 ++ ++curl -isS --request-target "*" -X OPTIONS http://localhost:44322 \ ++ 2>&1 | tee -a $here/$seq.full | _webapi_header_filter ++ ++echo >>$here/$seq.full ++echo "=== pmproxy log ===" >>$here/$seq.full ++cat $PCP_LOG_DIR/pmproxy/pmproxy.log >>$here/$seq.full ++ ++# success, all done ++status=0 ++exit +--- a/qa/1837.out 1970-01-01 10:00:00.000000000 +1000 ++++ b/qa/1837.out 2020-06-11 13:10:57.397576455 +1000 +@@ -0,0 +1,6 @@ ++QA output created by 1837 ++ ++Access-Control-Allow-Methods: GET, PUT, HEAD, POST, TRACE, OPTIONS ++Content-Length: 0 ++Date: DATE ++HTTP/1.1 200 OK +--- a/qa/780 2020-04-14 14:41:41.000000000 +1000 ++++ b/qa/780 2020-06-11 13:10:57.397576455 +1000 +@@ -1,8 +1,8 @@ + #!/bin/sh + # PCP QA Test No. 780 +-# Exercise PMWEBAPI Access-Control-Allow-Origin HTTP header. ++# Exercise PMWEBAPI CORS headers. + # +-# Copyright (c) 2014,2019 Red Hat. ++# Copyright (c) 2014,2019-2020 Red Hat. + # + + seq=`basename $0` +@@ -16,7 +16,6 @@ + _check_series + which curl >/dev/null 2>&1 || _notrun "No curl binary installed" + +-signal=$PCP_BINADM_DIR/pmsignal + status=1 # failure is the default! + $sudo rm -rf $tmp.* $seq.full + trap "cd $here; _cleanup; exit \$status" 0 1 2 3 15 +@@ -39,13 +38,21 @@ + $sudo rm -f $tmp.* + } + +-unset http_proxy +-unset HTTP_PROXY +- + # real QA test starts here + _service pmproxy restart >/dev/null 2>&1 + +-curl -s -S "http://localhost:44323/pmapi/context" -I | _webapi_header_filter ++echo "=== Basic" | tee -a $here/$seq.full ++curl -IsS "http://localhost:44323/pmapi/context" | _webapi_header_filter ++ ++echo "=== Preflight" | tee -a $here/$seq.full ++curl -isS -X OPTIONS "http://localhost:44323/series/query?expr=hinv*" | _webapi_header_filter ++ ++echo "=== OK Request Method" | tee -a $here/$seq.full ++curl -isS -X OPTIONS -H "Origin: http://example.com" -H "Access-Control-Request-Method: GET" "http://localhost:44323/pmapi/context" | _webapi_header_filter ++ ++echo "=== Bad Request Method" | tee -a $here/$seq.full ++curl -isS -X OPTIONS -H "Origin: http://example.com" -H "Access-Control-Request-Method: BAD" "http://localhost:44323/pmapi/context" | _webapi_header_filter ++ + echo >>$here/$seq.full + echo "=== pmproxy log ===" >>$here/$seq.full + cat $PCP_LOG_DIR/pmproxy/pmproxy.log >>$here/$seq.full +--- a/qa/780.out 2020-03-23 09:47:47.000000000 +1100 ++++ b/qa/780.out 2020-06-11 13:10:57.397576455 +1000 +@@ -1,8 +1,27 @@ + QA output created by 780 ++=== Basic + + Access-Control-Allow-Headers: Accept, Accept-Language, Content-Language, Content-Type + Access-Control-Allow-Origin: * +-Content-Length: SIZE + Content-Type: application/json + Date: DATE + HTTP/1.1 200 OK ++Transfer-encoding: chunked ++=== Preflight ++ ++Access-Control-Allow-Methods: GET, HEAD, POST, TRACE, OPTIONS ++Content-Length: 0 ++Date: DATE ++HTTP/1.1 200 OK ++=== OK Request Method ++ ++Access-Control-Allow-Methods: GET, HEAD, TRACE, OPTIONS ++Access-Control-Allow-Origin: http://example.com ++Content-Length: 0 ++Date: DATE ++HTTP/1.1 200 OK ++=== Bad Request Method ++ ++Content-Length: 0 ++Date: DATE ++HTTP/1.1 405 Method Not Allowed +--- a/qa/common.check 2020-05-20 10:51:37.000000000 +1000 ++++ b/qa/common.check 2020-06-11 13:10:57.397576455 +1000 +@@ -2696,7 +2696,7 @@ + tee -a $here/$seq.full \ + | col -b \ + | sed \ +- -e 's/^\(Content-Length:\) [0-9][0-9]*/\1 SIZE/g' \ ++ -e 's/^\(Content-Length:\) [1-9][0-9]*/\1 SIZE/g' \ + -e 's/^\(Date:\).*/\1 DATE/g' \ + -e 's/\(\"context\":\) [0-9][0-9]*/\1 CTXID/g' \ + -e '/^Connection: Keep-Alive/d' \ +--- a/qa/group 2020-05-28 09:15:22.000000000 +1000 ++++ b/qa/group 2020-06-11 13:10:57.397576455 +1000 +@@ -1757,6 +1757,7 @@ + 1724 pmda.bpftrace local python + 1768 pmfind local + 1793 pmrep pcp2xxx python local ++1837 pmproxy local + 1855 pmda.rabbitmq local + 1896 pmlogger logutil pmlc local + 4751 libpcp threads valgrind local pcp +--- a/qa/1211.out 2020-01-20 16:53:42.000000000 +1100 ++++ b/qa/1211.out 2020-06-11 13:10:57.399576484 +1000 +@@ -507,9 +507,11 @@ + Perform simple source-based query ... + + Error handling - descriptor for bad series identifier +-pmseries: [Error] no descriptor for series identifier no.such.identifier + + no.such.identifier ++ PMID: PM_ID_NULL ++ Data Type: ??? InDom: unknown 0xffffffff ++ Semantics: unknown Units: unknown + + Error handling - metric name for bad series identifier + +--- a/src/libpcp_web/src/query.c 2020-01-20 15:43:31.000000000 +1100 ++++ b/src/libpcp_web/src/query.c 2020-06-11 13:10:57.399576484 +1000 +@@ -1938,11 +1938,15 @@ + return -EPROTO; + } + +- /* sanity check - were we given an invalid series identifier? */ ++ /* were we given a non-metric series identifier? (e.g. an instance) */ + if (elements[0]->type == REDIS_REPLY_NIL) { +- infofmt(msg, "no descriptor for series identifier %s", series); +- batoninfo(baton, PMLOG_ERROR, msg); +- return -EINVAL; ++ desc->indom = sdscpylen(desc->indom, "unknown", 7); ++ desc->pmid = sdscpylen(desc->pmid, "PM_ID_NULL", 10); ++ desc->semantics = sdscpylen(desc->semantics, "unknown", 7); ++ desc->source = sdscpylen(desc->source, "unknown", 7); ++ desc->type = sdscpylen(desc->type, "unknown", 7); ++ desc->units = sdscpylen(desc->units, "unknown", 7); ++ return 0; + } + + if (extract_string(baton, series, elements[0], &desc->indom, "indom") < 0) diff --git a/SOURCES/redhat-bugzilla-1846705.patch b/SOURCES/redhat-bugzilla-1846705.patch new file mode 100644 index 0000000..6504334 --- /dev/null +++ b/SOURCES/redhat-bugzilla-1846705.patch @@ -0,0 +1,36 @@ +BZ 1846705 - Possible memory leak detected in pcp-atop +f30aff90b qa: add valgrind suppressions needed for valgrind 3.16 + +diff --git a/qa/valgrind-suppress-3.16.0 b/qa/valgrind-suppress-3.16.0 +new file mode 100644 +index 000000000..515591747 +--- /dev/null ++++ b/qa/valgrind-suppress-3.16.0 +@@ -0,0 +1,27 @@ ++# qa/1080 and qa/490 and qa/386 and qa/459 on Fedora 32 ++# at 0x483880B: malloc (vg_replace_malloc.c:299) ++# by 0x4A0D490: tsearch (in /usr/lib64/libc-2.28.so) ++# by 0x4871EA6: __pmFindPDUBuf (pdubuf.c:126) ++# ... ++{ ++ tsearch ++ Memcheck:Leak ++ match-leak-kinds: possible ++ fun:malloc ++ fun:tsearch ++ fun:__pmFindPDUBuf ++ ... ++} ++ ++# qa/1080 and qa/490 and qa/386 and qa/459 on Fedora 32 ++# at 0x483880B: malloc (vg_replace_malloc.c:299) ++# by 0x4871E5F: __pmFindPDUBuf (pdubuf.c:115) ++# ... ++{ ++ findpdubuf ++ Memcheck:Leak ++ match-leak-kinds: possible ++ fun:malloc ++ fun:__pmFindPDUBuf ++ ... ++} diff --git a/SOURCES/redhat-bugzilla-1846711.patch b/SOURCES/redhat-bugzilla-1846711.patch new file mode 100644 index 0000000..1c57ff2 --- /dev/null +++ b/SOURCES/redhat-bugzilla-1846711.patch @@ -0,0 +1,268 @@ +BZ 1846711 - pcp-pmda-openmetrics produces warnings querying grafana in its default configuration +0b2ef2d79 pmdaopenmetrics: add control.status metrics, de-verbosify the log, QA updates +63605e3db qa/1102: tweak openmetrics QA to be more deterministic +649a0c3a2 qa: improve _filter_pmda_remove() in common.filter + +commit 0b2ef2d79686d1e44901263093edeb9e1b9b5f77 +Author: Mark Goodwin +Date: Fri Jun 19 12:18:47 2020 +1000 + + pmdaopenmetrics: add control.status metrics, de-verbosify the log, QA updates + + Resolves: RHBZ#1846711 + + Add openmetrics.control.status (string status per configured URL + of the last fetch) and openmetrics.control.status_code, which + is the integer response code (e.g. 200 is success) with discrete + semantics. + + In addition, we now only spam the PMDA log and systemd journal + when a URL fetch fails if openmetrics.control.debug is non-zero. + Users can instead rely on the new status metrics, which can also + be used for service availability monitoring. These metrics + complement the openmetrics.control.parse_time, fetch_time and + calls counters. + + Includes QA updates and pmdaopenmetrics(1) doc updates. + +diff --git a/qa/1321.out b/qa/1321.out +index cee072cd2..4533bccd8 100644 +--- a/qa/1321.out ++++ b/qa/1321.out +@@ -13,6 +13,8 @@ openmetrics.control.calls + openmetrics.control.debug + openmetrics.control.fetch_time + openmetrics.control.parse_time ++openmetrics.control.status ++openmetrics.control.status_code + openmetrics.source1.metric1 + + == Created URL file /var/lib/pcp/pmdas/openmetrics/config.d/source2.url +@@ -22,6 +24,8 @@ openmetrics.control.calls + openmetrics.control.debug + openmetrics.control.fetch_time + openmetrics.control.parse_time ++openmetrics.control.status ++openmetrics.control.status_code + openmetrics.source1.metric1 + openmetrics.source2.metric1 + openmetrics.source2.metric2 +@@ -33,6 +37,8 @@ openmetrics.control.calls + openmetrics.control.debug + openmetrics.control.fetch_time + openmetrics.control.parse_time ++openmetrics.control.status ++openmetrics.control.status_code + openmetrics.source1.metric1 + openmetrics.source2.metric1 + openmetrics.source2.metric2 +@@ -47,6 +53,8 @@ openmetrics.control.calls + openmetrics.control.debug + openmetrics.control.fetch_time + openmetrics.control.parse_time ++openmetrics.control.status ++openmetrics.control.status_code + openmetrics.source1.metric1 + openmetrics.source2.metric1 + openmetrics.source2.metric2 +@@ -63,6 +71,8 @@ openmetrics.control.calls + openmetrics.control.debug + openmetrics.control.fetch_time + openmetrics.control.parse_time ++openmetrics.control.status ++openmetrics.control.status_code + openmetrics.source1.metric1 + openmetrics.source2.metric1 + openmetrics.source2.metric2 +diff --git a/src/pmdas/openmetrics/pmdaopenmetrics.1 b/src/pmdas/openmetrics/pmdaopenmetrics.1 +index d3c7aa85f..0c92e2a11 100644 +--- a/src/pmdas/openmetrics/pmdaopenmetrics.1 ++++ b/src/pmdas/openmetrics/pmdaopenmetrics.1 +@@ -413,10 +413,37 @@ log mandatory on 2 second { + The PMDA maintains special control metrics, as described below. + Apart from + .BR openmetrics.control.debug , +-each of these metrics is a counter and has one instance for each configured metric source. +-The instance domain is adjusted dynamically as new sources are discovered. ++each of these metrics has one instance for each configured metric source. ++All of these metrics have integer values with counter semantics, except ++.BR openmetrics.control.status , ++which has a string value. ++It is important to note that fetching any of the ++.B openmetrics.control ++metrics will only update the counters and status values if the corresponding URL is actually fetched. ++If the source URL is not fetched, the control metric values do not trigger a refresh and the control ++values reported represent the most recent fetch of each corresponding source. ++.PP ++The instance domain for the ++.B openmetrics.control ++metrics is adjusted dynamically as new sources are discovered. + If there are no sources configured, the metric names are still defined + but the instance domain will be empty and a fetch will return no values. ++.IP \fBopenmetrics.control.status\fP ++A string representing the status of the last fetch of the corresponding source. ++This will generally be ++.B success ++for an http response code of 200. ++This metric can be used for service availability monitoring - provided, as stated above, ++the corresponding source URL is fetched too. ++.IP \fBopenmetrics.control.status_code\fP ++This metric is similar to ++.B openmetrics.control.status ++except that it is the integer response code of the last fetch. ++A value of ++.B 200 ++usually signifies success and any other value failure. ++This metric can also be used for service availability monitoring, with the same caveats as ++.BR openmetrics.control.status . + .IP \fBopenmetrics.control.calls\fP + total number of times each configured metric source has been fetched (if it's a URL) + or executed (if it's a script), since the PMDA started. +diff --git a/src/pmdas/openmetrics/pmdaopenmetrics.python b/src/pmdas/openmetrics/pmdaopenmetrics.python +index a5ed22f13..1486ed676 100755 +--- a/src/pmdas/openmetrics/pmdaopenmetrics.python ++++ b/src/pmdas/openmetrics/pmdaopenmetrics.python +@@ -1,6 +1,6 @@ + #!/usr/bin/env pmpython + # +-# Copyright (c) 2017-2019 Red Hat. ++# Copyright (c) 2017-2020 Red Hat. + # Copyright (c) 2017 Ronak Jain. + # + # This program is free software; you can redistribute it and/or modify it +@@ -704,6 +704,7 @@ class Source(object): + return + + # fetch the document ++ status_code = 0 + try: + if self.is_scripted: + # Execute file, expecting openmetrics metric data on stdout. +@@ -715,6 +716,7 @@ class Source(object): + self.document = open(self.url[7:], 'r').read() + else: + r = self.requests.get(self.url, headers=self.headers, timeout=timeout) ++ status_code = r.status_code + r.raise_for_status() # non-200? ERROR + # NB: the requests package automatically enables http keep-alive and compression + self.document = r.text +@@ -723,9 +725,13 @@ class Source(object): + incr = int(1000 * (time.time() - fetch_time)) + self.pmda.stats_fetch_time[self.cluster] += incr + self.pmda.stats_fetch_time[0] += incr # total for all sources ++ self.pmda.stats_status[self.cluster] = "success" ++ self.pmda.stats_status_code[self.cluster] = status_code + + except Exception as e: +- self.pmda.err('Warning: cannot fetch URL or execute script %s: %s' % (self.path, e)) ++ self.pmda.stats_status[self.cluster] = 'failed to fetch URL or execute script %s: %s' % (self.path, e) ++ self.pmda.stats_status_code[self.cluster] = status_code ++ self.pmda.debug('Warning: cannot fetch URL or execute script %s: %s' % (self.path, e)) if self.pmda.dbg else None + return + + def refresh2(self, timeout): +@@ -844,6 +850,20 @@ class OpenMetricsPMDA(PMDA): + pmUnits(0, 0, 0, 0, 0, 0)), + 'debug flag to enable verbose log messages, to enable: pmstore %s.control.debug 1' % self.pmda_name) + ++ # response status string, per-source end-point ++ self.stats_status = {0:"none"} # status string, keyed by cluster number ++ self.add_metric('%s.control.status' % self.pmda_name, pmdaMetric(self.pmid(0, 5), ++ c_api.PM_TYPE_STRING, self.sources_indom, c_api.PM_SEM_INSTANT, ++ pmUnits(0, 0, 0, 0, 0, 0)), # no units ++ 'per-end-point source URL response status after the most recent fetch') ++ ++ # response status code, per-source end-point ++ self.stats_status_code = {0:0} # status code, keyed by cluster number ++ self.add_metric('%s.control.status_code' % self.pmda_name, pmdaMetric(self.pmid(0, 6), ++ c_api.PM_TYPE_32, self.sources_indom, c_api.PM_SEM_DISCRETE, ++ pmUnits(0, 0, 0, 0, 0, 0)), # no units ++ 'per-end-point source URL response status code after the most recent fetch') ++ + # schedule a refresh + self.set_need_refresh() + +@@ -961,6 +981,8 @@ class OpenMetricsPMDA(PMDA): + self.stats_fetch_calls[cluster] = 0 + self.stats_fetch_time[cluster] = 0 + self.stats_parse_time[cluster] = 0 ++ self.stats_status[cluster] = "unknown" ++ self.stats_status_code[cluster] = 0 + + save_cluster_table = True + self.log("Found source %s cluster %d" % (name, cluster)) +@@ -996,6 +1018,10 @@ class OpenMetricsPMDA(PMDA): + return [self.stats_parse_time[inst], 1] if inst in self.stats_parse_time else [c_api.PM_ERR_INST, 0] + elif item == 4: # $(pmda_name).control.debug + return [self.dbg, 1] ++ elif item == 5: # per-source status string ++ return [self.stats_status[inst], 1] if inst in self.stats_status else [c_api.PM_ERR_INST, 0] ++ elif item == 6: # per-source status code ++ return [self.stats_status_code[inst], 1] if inst in self.stats_status_code else [c_api.PM_ERR_INST, 0] + return [c_api.PM_ERR_PMID, 0] + + self.assert_source_invariants(cluster=cluster) + +commit 63605e3db4b2821df2a6ffb21507af91d97f3a8b +Author: Mark Goodwin +Date: Fri Jun 19 10:02:04 2020 +1000 + + qa/1102: tweak openmetrics QA to be more deterministic + + Now that pmdaopenmetrics is Installed by default with the localhost + grafana metrics URL configured, after _pmdaopenmetrics_save_config + we need to _pmdaopenmetrics_remove before _pmdaopenmetrics_install + to make qa/1102 deterministic. + +diff --git a/qa/1102 b/qa/1102 +index f573d14f4..98ff61f5e 100755 +--- a/qa/1102 ++++ b/qa/1102 +@@ -46,6 +46,7 @@ trap "_cleanup; exit \$status" 0 1 2 3 15 + _stop_auto_restart pmcd + + _pmdaopenmetrics_save_config ++_pmdaopenmetrics_remove + _pmdaopenmetrics_install + + port=`_find_free_port 10000` +diff --git a/qa/1102.out b/qa/1102.out +index 5094e4a82..aa74abe44 100644 +--- a/qa/1102.out ++++ b/qa/1102.out +@@ -1,5 +1,12 @@ + QA output created by 1102 + ++=== remove openmetrics agent === ++Culling the Performance Metrics Name Space ... ++openmetrics ... done ++Updating the PMCD control file, and notifying PMCD ... ++[...removing files...] ++Check openmetrics metrics have gone away ... OK ++ + === openmetrics agent installation === + Fetch and desc openmetrics metrics: success + + +commit 649a0c3a2745f549b139ce1250e38a1e90308426 +Author: Mark Goodwin +Date: Fri Jun 19 09:55:58 2020 +1000 + + qa: improve _filter_pmda_remove() in common.filter + + Filter "Job for pmcd.service canceled" in _filter_pmda_remove. + Systemd sometimes (uncommonly) prints this if a PMDA is still + starting when a QA test ./Removes it. + +diff --git a/qa/common.filter b/qa/common.filter +index a53d4a49d..b327abedc 100644 +--- a/qa/common.filter ++++ b/qa/common.filter +@@ -760,6 +760,7 @@ _filter_pmda_remove() + _filter_pmda_install | + sed \ + -e '/Removing files/d' \ ++ -e '/Job for pmcd.service canceled/d' \ + -e '/Updating the PMCD control file/c\ + Updating the PMCD control file, and notifying PMCD ...\ + [...removing files...]' diff --git a/SOURCES/redhat-bugzilla-1848995.patch b/SOURCES/redhat-bugzilla-1848995.patch new file mode 100644 index 0000000..edabc1a --- /dev/null +++ b/SOURCES/redhat-bugzilla-1848995.patch @@ -0,0 +1,17 @@ +BZ 1848995 - Intermittent pminfo crashes (core dumps) +51bb36a84 libpcp: minor clarification to previous fix to use __pmHashInit +d0df00ad1 derive_parser.y.in: fix of incomplete initialization of __pmHashCtl struct for F_REGEX node + +diff --git a/src/libpcp/src/derive_parser.y.in b/src/libpcp/src/derive_parser.y.in +index 9ed375e01..6756daa77 100644 +--- a/src/libpcp/src/derive_parser.y.in ++++ b/src/libpcp/src/derive_parser.y.in +@@ -2598,6 +2598,8 @@ regexpr : opt_bang L_PATTERN + return -1; + } + np->data.pattern->invert = $1; ++ np->data.pattern->used = 0; ++ __pmHashInit(&np->data.pattern->hash); + $$ = np; + } + diff --git a/SOURCES/redhat-bugzilla-1849511.patch b/SOURCES/redhat-bugzilla-1849511.patch new file mode 100644 index 0000000..864a91d --- /dev/null +++ b/SOURCES/redhat-bugzilla-1849511.patch @@ -0,0 +1,996 @@ +BZ 1849511 - resolve covscan and other issues from upstream QA  +f7f1dd546 pmproxy: complete handling of HTTP/1.1 TRACE requests +cc662872b qa: add pcp-free-tera archive to pcp-testsuite package +80639d05b pmlogger_check.sh: major overhaul (diags and systemd fixups) +460b7ac2a src/pmlogger/rc_pmlogger: use --quick to pmlogger_check +0b3b4d4ee src/pmlogger/pmlogger_check.service.in: add --skip-primary arg to pmlogger_check +3a68366a8 src/pmlogger/pmlogger.service.in: change ancillary services from After to Before +5d65a6035 src/pmlogger/pmlogger_daily.sh: similar changes to pmlogger_check.sh +ace576907 src/pmlogger/pmlogger_check.sh: fix locking snarfoo +2b2c3db11 src/pmlogger/pmlogger_daily.sh: fix diagnostic spaghetti +4cc54287f pmproxy: allow URLs up to 8k in length + +diff -auNr pcp-5.1.1-004/man/man1/pmlogger_check.1 pcp-5.1.1-005/man/man1/pmlogger_check.1 +--- pcp-5.1.1-004/man/man1/pmlogger_check.1 2020-04-07 13:31:03.000000000 +1000 ++++ pcp-5.1.1-005/man/man1/pmlogger_check.1 2020-06-22 20:08:18.454403788 +1000 +@@ -19,7 +19,7 @@ + \f3pmlogger_daily\f1 \- administration of Performance Co-Pilot archive log files + .SH SYNOPSIS + .B $PCP_BINADM_DIR/pmlogger_check +-[\f3\-CNsTV?\f1] ++[\f3\-CNpqsTV?\f1] + [\f3\-c\f1 \f2control\f1] + [\f3\-l\f1 \f2logfile\f1] + .br +@@ -269,6 +269,20 @@ + .TP + \fB\-p\fR + If this option is specified for ++.B pmlogger_check ++then any line from the control files for the ++.I primary ++.B pmlogger ++will be ignored. ++This option is intended for environments where some system daemon, ++like ++.BR systemd (1), ++is responsible for controlling (starting, stopping, restarting, etc.) the ++.I primary ++.BR pmlogger . ++.TP ++\fB\-p\fR ++If this option is specified for + .B pmlogger_daily + then the status of the daily processing is polled and if the daily + .BR pmlogger (1) +@@ -296,6 +310,12 @@ + .B pmlogger_daily + are mutually exclusive. + .TP ++\fB\-q\fR ++If this option is specified for ++.B pmlogger_check ++then the script will ``quickstart'' avoiding any optional processing ++like file compression. ++.TP + \fB\-r\fR, \fB\-\-norewrite\fR + This command line option acts as an override and prevents all archive + rewriting with +diff -auNr pcp-5.1.1-004/qa/1837 pcp-5.1.1-005/qa/1837 +--- pcp-5.1.1-004/qa/1837 2020-06-22 20:00:17.636331169 +1000 ++++ pcp-5.1.1-005/qa/1837 2020-06-22 20:08:18.457403819 +1000 +@@ -1,6 +1,6 @@ + #!/bin/sh + # PCP QA Test No. 1837 +-# Exercise PMWEBAPI handling server OPTIONS. ++# Exercise PMWEBAPI handling server OPTIONS and TRACE. + # + # Copyright (c) 2020 Red Hat. All Rights Reserved. + # +@@ -43,7 +43,12 @@ + # real QA test starts here + _service pmproxy restart >/dev/null 2>&1 + +-curl -isS --request-target "*" -X OPTIONS http://localhost:44322 \ ++echo; echo "=== OPTIONS" ++curl -isS -X OPTIONS --request-target "*" http://localhost:44322 \ ++ 2>&1 | tee -a $here/$seq.full | _webapi_header_filter ++ ++echo; echo "=== TRACE" ++curl -isS -X TRACE http://localhost:44322 \ + 2>&1 | tee -a $here/$seq.full | _webapi_header_filter + + echo >>$here/$seq.full +diff -auNr pcp-5.1.1-004/qa/1837.out pcp-5.1.1-005/qa/1837.out +--- pcp-5.1.1-004/qa/1837.out 2020-06-22 20:00:17.637331179 +1000 ++++ pcp-5.1.1-005/qa/1837.out 2020-06-22 20:08:18.457403819 +1000 +@@ -1,6 +1,17 @@ + QA output created by 1837 + ++=== OPTIONS ++ + Access-Control-Allow-Methods: GET, PUT, HEAD, POST, TRACE, OPTIONS + Content-Length: 0 + Date: DATE + HTTP/1.1 200 OK ++ ++=== TRACE ++ ++Accept: */* ++Content-Length: 0 ++Date: DATE ++HTTP/1.1 200 OK ++Host: localhost:44322 ++User-Agent: curl VERSION +diff -auNr pcp-5.1.1-004/qa/archives/GNUmakefile pcp-5.1.1-005/qa/archives/GNUmakefile +--- pcp-5.1.1-004/qa/archives/GNUmakefile 2020-03-19 15:15:42.000000000 +1100 ++++ pcp-5.1.1-005/qa/archives/GNUmakefile 2020-06-22 20:08:18.461403861 +1000 +@@ -35,6 +35,7 @@ + pcp-atop.0.xz pcp-atop.meta pcp-atop.index \ + pcp-atop-boot.0.xz pcp-atop-boot.meta pcp-atop-boot.index \ + pcp-dstat.0.xz pcp-dstat.meta pcp-dstat.index \ ++ pcp-free-tera.0.xz pcp-free-tera.meta.xz pcp-free-tera.index \ + pcp-hotatop.0.xz pcp-hotatop.meta pcp-hotatop.index \ + pcp-zeroconf.0.xz pcp-zeroconf.meta pcp-zeroconf.index \ + value-test.0.xz value-test.meta value-test.index \ +diff -auNr pcp-5.1.1-004/qa/common.check pcp-5.1.1-005/qa/common.check +--- pcp-5.1.1-004/qa/common.check 2020-06-22 20:00:17.637331179 +1000 ++++ pcp-5.1.1-005/qa/common.check 2020-06-22 20:08:18.459403840 +1000 +@@ -2697,6 +2697,7 @@ + | col -b \ + | sed \ + -e 's/^\(Content-Length:\) [1-9][0-9]*/\1 SIZE/g' \ ++ -e 's/^\(User-Agent: curl\).*/\1 VERSION/g' \ + -e 's/^\(Date:\).*/\1 DATE/g' \ + -e 's/\(\"context\":\) [0-9][0-9]*/\1 CTXID/g' \ + -e '/^Connection: Keep-Alive/d' \ +diff -auNr pcp-5.1.1-004/src/pmlogger/pmlogger_check.service.in pcp-5.1.1-005/src/pmlogger/pmlogger_check.service.in +--- pcp-5.1.1-004/src/pmlogger/pmlogger_check.service.in 2020-05-22 16:40:09.000000000 +1000 ++++ pcp-5.1.1-005/src/pmlogger/pmlogger_check.service.in 2020-06-22 20:08:18.452403767 +1000 +@@ -6,7 +6,7 @@ + [Service] + Type=oneshot + TimeoutStartSec=25m +-Environment="PMLOGGER_CHECK_PARAMS=-C" ++Environment="PMLOGGER_CHECK_PARAMS=-C --skip-primary" + EnvironmentFile=-@PCP_SYSCONFIG_DIR@/pmlogger_timers + ExecStart=@PCP_BINADM_DIR@/pmlogger_check $PMLOGGER_CHECK_PARAMS + WorkingDirectory=@PCP_VAR_DIR@ +diff -auNr pcp-5.1.1-004/src/pmlogger/pmlogger_check.sh pcp-5.1.1-005/src/pmlogger/pmlogger_check.sh +--- pcp-5.1.1-004/src/pmlogger/pmlogger_check.sh 2020-05-04 09:52:04.000000000 +1000 ++++ pcp-5.1.1-005/src/pmlogger/pmlogger_check.sh 2020-06-22 20:13:04.029416598 +1000 +@@ -36,16 +36,24 @@ + echo >$tmp/lock + prog=`basename $0` + PROGLOG=$PCP_LOG_DIR/pmlogger/$prog.log ++MYPROGLOG=$PROGLOG.$$ + USE_SYSLOG=true + + _cleanup() + { ++ if [ -s "$MYPROGLOG" ] ++ then ++ rm -f "$PROGLOG" ++ mv "$MYPROGLOG" "$PROGLOG" ++ else ++ rm -f "$MYPROGLOG" ++ fi + $USE_SYSLOG && [ $status -ne 0 ] && \ + $PCP_SYSLOG_PROG -p daemon.error "$prog failed - see $PROGLOG" +- [ -s "$PROGLOG" ] || rm -f "$PROGLOG" + lockfile=`cat $tmp/lock 2>/dev/null` + rm -f "$lockfile" + rm -rf $tmp ++ $VERY_VERBOSE && echo "End: `date '+%F %T.%N'`" + } + trap "_cleanup; exit \$status" 0 1 2 3 15 + +@@ -86,6 +94,8 @@ + CHECK_RUNLEVEL=false + START_PMLOGGER=true + STOP_PMLOGGER=false ++QUICKSTART=false ++SKIP_PRIMARY=false + + echo > $tmp/usage + cat >> $tmp/usage << EOF +@@ -94,6 +104,8 @@ + -l=FILE,--logfile=FILE send important diagnostic messages to FILE + -C query system service runlevel information + -N,--showme perform a dry run, showing what would be done ++ -p,--skip-primary do not start or stop the primary pmlogger instance ++ -q,--quick quick start, no compression + -s,--stop stop pmlogger processes instead of starting them + -T,--terse produce a terser form of output + -V,--verbose increase diagnostic verbosity +@@ -117,6 +129,7 @@ + -C) CHECK_RUNLEVEL=true + ;; + -l) PROGLOG="$2" ++ MYPROGLOG="$PROGLOG".$$ + USE_SYSLOG=false + daily_args="${daily_args} -l $2.from.check" + shift +@@ -129,6 +142,10 @@ + KILL="echo + kill" + daily_args="${daily_args} -N" + ;; ++ -p) SKIP_PRIMARY=true ++ ;; ++ -q) QUICKSTART=true ++ ;; + -s) START_PMLOGGER=false + STOP_PMLOGGER=true + ;; +@@ -162,9 +179,15 @@ + + _compress_now() + { +- # If $PCP_COMPRESSAFTER=0 in the control file(s), compress archives now. +- # Invoked just before exit when this script has finished successfully. +- $PCP_BINADM_DIR/pmlogger_daily -K $daily_args ++ if $QUICKSTART ++ then ++ $VERY_VERBOSE && echo "Skip compression, -q/--quick on command line" ++ else ++ # If $PCP_COMPRESSAFTER=0 in the control file(s), compress archives now. ++ # Invoked just before exit when this script has finished successfully. ++ $VERY_VERBOSE && echo "Doing compression ..." ++ $PCP_BINADM_DIR/pmlogger_daily -K $daily_args ++ fi + } + + # after argument checking, everything must be logged to ensure no mail is +@@ -187,26 +210,37 @@ + # + # Exception ($SHOWME, above) is for -N where we want to see the output. + # +- touch "$PROGLOG" +- chown $PCP_USER:$PCP_GROUP "$PROGLOG" >/dev/null 2>&1 +- exec 1>"$PROGLOG" 2>&1 ++ touch "$MYPROGLOG" ++ chown $PCP_USER:$PCP_GROUP "$MYPROGLOG" >/dev/null 2>&1 ++ exec 1>"$MYPROGLOG" 2>&1 ++fi ++ ++if $VERY_VERBOSE ++then ++ echo "Start: `date '+%F %T.%N'`" ++ if `which pstree >/dev/null 2>&1` ++ then ++ echo "Called from:" ++ pstree -spa $$ ++ echo "--- end of pstree output ---" ++ fi + fi + + # if SaveLogs exists in the $PCP_LOG_DIR/pmlogger directory then save +-# $PROGLOG there as well with a unique name that contains the date and time ++# $MYPROGLOG there as well with a unique name that contains the date and time + # when we're run + # + if [ -d $PCP_LOG_DIR/pmlogger/SaveLogs ] + then +- now="`date '+%Y%m%d.%H.%M'`" +- link=`echo $PROGLOG | sed -e "s/$prog/SaveLogs\/$prog.$now/"` ++ now="`date '+%Y%m%d.%H.%M.%S'`" ++ link=`echo $MYPROGLOG | sed -e "s/$prog/SaveLogs\/$prog.$now/"` + if [ ! -f "$link" ] + then + if $SHOWME + then +- echo "+ ln $PROGLOG $link" ++ echo "+ ln $MYPROGLOG $link" + else +- ln $PROGLOG $link ++ ln $MYPROGLOG $link + fi + fi + fi +@@ -273,7 +307,7 @@ + + _unlock() + { +- rm -f lock ++ rm -f "$1/lock" + echo >$tmp/lock + } + +@@ -395,6 +429,41 @@ + echo "$pid" + } + ++# wait for the local pmcd to get going for a primary pmlogger ++# (borrowed from qa/common.check) ++# ++# wait_for_pmcd [maxdelay] ++# ++_wait_for_pmcd() ++{ ++ # 5 seconds default seems like a reasonable max time to get going ++ _can_wait=${1-5} ++ _limit=`expr $_can_wait \* 10` ++ _i=0 ++ _dead=true ++ while [ $_i -lt $_limit ] ++ do ++ _sts=`pmprobe pmcd.numclients 2>/dev/null | $PCP_AWK_PROG '{print $2}'` ++ if [ "${_sts:-0}" -gt 0 ] ++ then ++ # numval really > 0, we're done ++ # ++ _dead=false ++ break ++ fi ++ pmsleep 0.1 ++ _i=`expr $_i + 1` ++ done ++ if $_dead ++ then ++ date ++ echo "Arrgghhh ... pmcd at localhost failed to start after $_can_wait seconds" ++ echo "=== failing pmprobes ===" ++ pmprobe pmcd.numclients ++ status=1 ++ fi ++} ++ + _check_archive() + { + if [ ! -e "$logfile" ] +@@ -531,7 +600,17 @@ + cd "$here" + line=`expr $line + 1` + +- $VERY_VERBOSE && echo "[$controlfile:$line] host=\"$host\" primary=\"$primary\" socks=\"$socks\" dir=\"$dir\" args=\"$args\"" ++ ++ if $VERY_VERBOSE ++ then ++ case "$host" ++ in ++ \#*|'') # comment or empty ++ ;; ++ *) echo "[$controlfile:$line] host=\"$host\" primary=\"$primary\" socks=\"$socks\" dir=\"$dir\" args=\"$args\"" ++ ;; ++ esac ++ fi + + case "$host" + in +@@ -599,6 +678,15 @@ + continue + fi + ++ # if -s/--skip-primary on the command line, do not process ++ # a control file line for the primary pmlogger ++ # ++ if $SKIP_PRIMARY && [ $primary = y ] ++ then ++ $VERY_VERBOSE && echo "Skip, -s/--skip-primary on command line" ++ continue ++ fi ++ + # substitute LOCALHOSTNAME marker in this config line + # (differently for directory and pcp -h HOST arguments) + # +@@ -610,7 +698,7 @@ + then + pflag='' + [ $primary = y ] && pflag=' -P' +- echo "Check pmlogger$pflag -h $host ... in $dir ..." ++ echo "Checking for: pmlogger$pflag -h $host ... in $dir ..." + fi + + # check for directory duplicate entries +@@ -664,19 +752,25 @@ + delay=200 # tenths of a second + while [ $delay -gt 0 ] + do +- if pmlock -v lock >$tmp/out 2>&1 ++ if pmlock -v "$dir/lock" >$tmp/out 2>&1 + then +- echo $dir/lock >$tmp/lock ++ echo "$dir/lock" >$tmp/lock ++ if $VERY_VERBOSE ++ then ++ echo "Acquired lock:" ++ ls -l $dir/lock ++ fi + break + else + [ -f $tmp/stamp ] || touch -t `pmdate -30M %Y%m%d%H%M` $tmp/stamp +- if [ -z "`find lock -newer $tmp/stamp -print 2>/dev/null`" ] ++ find $tmp/stamp -newer "$dir/lock" -print 2>/dev/null >$tmp/tmp ++ if [ -s $tmp/tmp ] + then +- if [ -f lock ] ++ if [ -f "$dir/lock" ] + then + echo "$prog: Warning: removing lock file older than 30 minutes" + LC_TIME=POSIX ls -l $dir/lock +- rm -f lock ++ rm -f "$dir/lock" + else + # there is a small timing window here where pmlock + # might fail, but the lock file has been removed by +@@ -714,7 +808,7 @@ + continue + fi + fi +- if [ -f lock ] ++ if [ -f "$dir/lock" ] + then + echo "$prog: Warning: is another PCP cron job running concurrently?" + LC_TIME=POSIX ls -l $dir/lock +@@ -753,6 +847,14 @@ + $VERY_VERBOSE && echo "primary pmlogger process $pid not running" + pid='' + fi ++ else ++ if $VERY_VERBOSE ++ then ++ echo "$PCP_TMP_DIR/pmlogger/primary: missing?" ++ echo "Contents of $PCP_TMP_DIR/pmlogger" ++ ls -l $PCP_TMP_DIR/pmlogger ++ echo "--- end of ls output ---" ++ fi + fi + else + for log in $PCP_TMP_DIR/pmlogger/[0-9]* +@@ -798,6 +900,17 @@ + # + PM_LOG_PORT_DIR="$PCP_TMP_DIR/pmlogger" + rm -f "$PM_LOG_PORT_DIR/primary" ++ # We really starting the primary pmlogger to work, especially ++ # in the systemd world, so make sure pmcd is ready to accept ++ # connections. ++ # ++ _wait_for_pmcd ++ if [ "$status" = 1 ] ++ then ++ $VERY_VERBOSE && echo "pmcd not running, skip primary pmlogger" ++ _unlock "$dir" ++ continue ++ fi + else + args="-h $host $args" + envs="" +@@ -870,7 +983,7 @@ + then + echo + echo "+ ${sock_me}$PMLOGGER $args $LOGNAME" +- _unlock ++ _unlock "$dir" + continue + else + $PCP_BINADM_DIR/pmpost "start pmlogger from $prog for host $host" +@@ -903,7 +1016,7 @@ + $PCP_ECHO_PROG $PCP_ECHO_N "$pid ""$PCP_ECHO_C" >> $tmp/pmloggers + fi + +- _unlock ++ _unlock "$dir" + done + } + +diff -auNr pcp-5.1.1-004/src/pmlogger/pmlogger_daily.sh pcp-5.1.1-005/src/pmlogger/pmlogger_daily.sh +--- pcp-5.1.1-004/src/pmlogger/pmlogger_daily.sh 2020-04-07 13:31:03.000000000 +1000 ++++ pcp-5.1.1-005/src/pmlogger/pmlogger_daily.sh 2020-06-22 20:08:18.451403756 +1000 +@@ -31,16 +31,24 @@ + echo >$tmp/lock + prog=`basename $0` + PROGLOG=$PCP_LOG_DIR/pmlogger/$prog.log ++MYPROGLOG=$PROGLOG.$$ + USE_SYSLOG=true + + _cleanup() + { ++ if [ -s "$MYPROGLOG" ] ++ then ++ rm -f "$PROGLOG" ++ mv "$MYPROGLOG" "$PROGLOG" ++ else ++ rm -f "$MYPROGLOG" ++ fi + $USE_SYSLOG && [ $status -ne 0 ] && \ + $PCP_SYSLOG_PROG -p daemon.error "$prog failed - see $PROGLOG" +- [ -s "$PROGLOG" ] || rm -f "$PROGLOG" + lockfile=`cat $tmp/lock 2>/dev/null` + rm -f "$lockfile" "$PCP_RUN_DIR/pmlogger_daily.pid" + rm -rf $tmp ++ $VERY_VERBOSE && echo "End: `date '+%F %T.%N'`" + } + trap "_cleanup; exit \$status" 0 1 2 3 15 + +@@ -215,8 +223,10 @@ + fi + COMPRESSONLY=true + PROGLOG=$PCP_LOG_DIR/pmlogger/$prog-K.log ++ MYPROGLOG=$PROGLOG.$$ + ;; + -l) PROGLOG="$2" ++ MYPROGLOG=$PROGLOG.$$ + USE_SYSLOG=false + shift + ;; +@@ -278,6 +288,7 @@ + # $PCP_LOG_DIR/pmlogger/daily..trace + # + PROGLOG=$PCP_LOG_DIR/pmlogger/daily.`date "+%Y%m%d.%H.%M"`.trace ++ MYPROGLOG=$PROGLOG.$$ + VERBOSE=true + VERY_VERBOSE=true + MYARGS="$MYARGS -V -V" +@@ -418,13 +429,23 @@ + # + # Exception ($SHOWME, above) is for -N where we want to see the output. + # +- touch "$PROGLOG" +- chown $PCP_USER:$PCP_GROUP "$PROGLOG" >/dev/null 2>&1 +- exec 1>"$PROGLOG" 2>&1 ++ touch "$MYPROGLOG" ++ chown $PCP_USER:$PCP_GROUP "$MYPROGLOG" >/dev/null 2>&1 ++ exec 1>"$MYPROGLOG" 2>&1 ++fi ++ ++if $VERY_VERBOSE ++then ++ echo "Start: `date '+%F %T.%N'`" ++ if `which pstree >/dev/null 2>&1` ++ then ++ echo "Called from:" ++ pstree -spa $$ ++ fi + fi + + # if SaveLogs exists in the $PCP_LOG_DIR/pmlogger directory then save +-# $PROGLOG there as well with a unique name that contains the date and time ++# $MYPROGLOG there as well with a unique name that contains the date and time + # when we're run ... skip if -N (showme) + # + if $SHOWME +@@ -433,15 +454,15 @@ + else + if [ -d $PCP_LOG_DIR/pmlogger/SaveLogs ] + then +- now="`date '+%Y%m%d.%H.%M'`" +- link=`echo $PROGLOG | sed -e "s/$prog/SaveLogs\/$prog.$now/"` ++ now="`date '+%Y%m%d.%H.%M.%S'`" ++ link=`echo $MYPROGLOG | sed -e "s/$prog/SaveLogs\/$prog.$now/"` + if [ ! -f "$link" ] + then + if $SHOWME + then +- echo "+ ln $PROGLOG $link" ++ echo "+ ln $MYPROGLOG $link" + else +- ln $PROGLOG $link ++ ln $MYPROGLOG $link + fi + fi + fi +@@ -487,19 +508,20 @@ + delay=200 # tenths of a second + while [ $delay -gt 0 ] + do +- if pmlock -v lock >>$tmp/out 2>&1 ++ if pmlock -v "$1/lock" >>$tmp/out 2>&1 + then +- echo $1/lock >$tmp/lock ++ echo "$1/lock" >$tmp/lock + break + else + [ -f $tmp/stamp ] || touch -t `pmdate -30M %Y%m%d%H%M` $tmp/stamp +- if [ ! -z "`find lock -newer $tmp/stamp -print 2>/dev/null`" ] ++ find $tmp/stamp -newer "$1/lock" -print 2>/dev/null >$tmp/tmp ++ if [ -s $tmp/tmp ] + then +- if [ -f lock ] ++ if [ -f "$1/lock" ] + then + _warning "removing lock file older than 30 minutes" +- LC_TIME=POSIX ls -l $1/lock +- rm -f lock ++ LC_TIME=POSIX ls -l "$1/lock" ++ rm -f "$1/lock" + else + # there is a small timing window here where pmlock + # might fail, but the lock file has been removed by +@@ -517,10 +539,10 @@ + then + # failed to gain mutex lock + # +- if [ -f lock ] ++ if [ -f "$1/lock" ] + then + _warning "is another PCP cron job running concurrently?" +- LC_TIME=POSIX ls -l $1/lock ++ LC_TIME=POSIX ls -l "$1/lock" + else + echo "$prog: `cat $tmp/out`" + fi +@@ -534,7 +556,7 @@ + + _unlock() + { +- rm -f lock ++ rm -f "$1/lock" + echo >$tmp/lock + } + +@@ -703,6 +725,9 @@ + # if the directory containing the archive matches, then the name + # of the file is the pid. + # ++# The pid(s) (if any) appear on stdout, so be careful to send any ++# diagnostics to stderr. ++# + _get_non_primary_logger_pid() + { + pid='' +@@ -713,7 +738,7 @@ + then + _host=`sed -n 2p <$log` + _arch=`sed -n 3p <$log` +- $PCP_ECHO_PROG $PCP_ECHO_N "... try $log host=$_host arch=$_arch: ""$PCP_ECHO_C" ++ $PCP_ECHO_PROG >&2 $PCP_ECHO_N "... try $log host=$_host arch=$_arch: ""$PCP_ECHO_C" + fi + # throw away stderr in case $log has been removed by now + match=`sed -e '3s@/[^/]*$@@' $log 2>/dev/null | \ +@@ -721,19 +746,19 @@ + BEGIN { m = 0 } + NR == 3 && $0 == "'$dir'" { m = 2; next } + END { print m }'` +- $VERY_VERBOSE && $PCP_ECHO_PROG $PCP_ECHO_N "match=$match ""$PCP_ECHO_C" ++ $VERY_VERBOSE && $PCP_ECHO_PROG >&2 $PCP_ECHO_N "match=$match ""$PCP_ECHO_C" + if [ "$match" = 2 ] + then + pid=`echo $log | sed -e 's,.*/,,'` + if _get_pids_by_name pmlogger | grep "^$pid\$" >/dev/null + then +- $VERY_VERBOSE && echo "pmlogger process $pid identified, OK" ++ $VERY_VERBOSE && echo >&2 "pmlogger process $pid identified, OK" + break + fi +- $VERY_VERBOSE && echo "pmlogger process $pid not running, skip" ++ $VERY_VERBOSE && echo >&2 "pmlogger process $pid not running, skip" + pid='' + else +- $VERY_VERBOSE && echo "different directory, skip" ++ $VERY_VERBOSE && echo >&2 "different directory, skip" + fi + done + echo "$pid" +@@ -1028,6 +1053,8 @@ + pid='' + fi + else ++ # pid(s) on stdout, diagnostics on stderr ++ # + pid=`_get_non_primary_logger_pid` + if $VERY_VERBOSE + then +@@ -1458,7 +1485,7 @@ + fi + fi + +- _unlock ++ _unlock "$dir" + done + } + +diff -auNr pcp-5.1.1-004/src/pmlogger/pmlogger.service.in pcp-5.1.1-005/src/pmlogger/pmlogger.service.in +--- pcp-5.1.1-004/src/pmlogger/pmlogger.service.in 2020-06-22 20:00:17.634331148 +1000 ++++ pcp-5.1.1-005/src/pmlogger/pmlogger.service.in 2020-06-22 20:08:18.452403767 +1000 +@@ -2,7 +2,7 @@ + Description=Performance Metrics Archive Logger + Documentation=man:pmlogger(1) + After=network-online.target pmcd.service +-After=pmlogger_check.timer pmlogger_check.path pmlogger_daily.timer pmlogger_daily-poll.timer ++Before=pmlogger_check.timer pmlogger_check.path pmlogger_daily.timer pmlogger_daily-poll.timer + BindsTo=pmlogger_check.timer pmlogger_check.path pmlogger_daily.timer pmlogger_daily-poll.timer + Wants=pmcd.service + +diff -auNr pcp-5.1.1-004/src/pmlogger/rc_pmlogger pcp-5.1.1-005/src/pmlogger/rc_pmlogger +--- pcp-5.1.1-004/src/pmlogger/rc_pmlogger 2020-04-21 10:42:02.000000000 +1000 ++++ pcp-5.1.1-005/src/pmlogger/rc_pmlogger 2020-06-22 20:08:18.453403777 +1000 +@@ -96,7 +96,7 @@ + bgtmp=`mktemp -d $PCP_DIR/var/tmp/pcp.XXXXXXXXX` || exit 1 + trap "rm -rf $bgtmp; exit \$bgstatus" 0 1 2 3 15 + +- pmlogger_check $VFLAG >$bgtmp/pmcheck.out 2>$bgtmp/pmcheck ++ pmlogger_check --quick $VFLAG >$bgtmp/pmcheck.out 2>$bgtmp/pmcheck + bgstatus=$? + if [ -s $bgtmp/pmcheck ] + then +@@ -125,8 +125,6 @@ + false + else + # Really start the pmlogger instances based on the control file. +- # Done in the background to avoid delaying the init script, +- # failure notification is external (syslog, log files). + # + $ECHO $PCP_ECHO_N "Starting pmlogger ..." "$PCP_ECHO_C" + +@@ -234,11 +232,9 @@ + if [ $VERBOSE_CTL = on ] + then # For a verbose startup and shutdown + ECHO=$PCP_ECHO_PROG +- REBUILDOPT='' + VFLAG='-V' + else # For a quiet startup and shutdown + ECHO=: +- REBUILDOPT=-s + VFLAG= + fi + +diff -auNr pcp-5.1.1-004/src/pmproxy/src/http.c pcp-5.1.1-005/src/pmproxy/src/http.c +--- pcp-5.1.1-004/src/pmproxy/src/http.c 2020-06-22 20:00:17.635331158 +1000 ++++ pcp-5.1.1-005/src/pmproxy/src/http.c 2020-06-22 20:08:18.460403851 +1000 +@@ -324,17 +324,36 @@ + } + + static sds +-http_response_trace(struct client *client) ++http_response_trace(struct client *client, int sts) + { ++ struct http_parser *parser = &client->u.http.parser; + dictIterator *iterator; + dictEntry *entry; +- sds result = sdsempty(); ++ char buffer[64]; ++ sds header; ++ ++ parser->http_major = parser->http_minor = 1; ++ ++ header = sdscatfmt(sdsempty(), ++ "HTTP/%u.%u %u %s\r\n" ++ "%S: Keep-Alive\r\n", ++ parser->http_major, parser->http_minor, ++ sts, http_status_mapping(sts), HEADER_CONNECTION); ++ header = sdscatfmt(header, "%S: %u\r\n", HEADER_CONTENT_LENGTH, 0); + + iterator = dictGetSafeIterator(client->u.http.headers); + while ((entry = dictNext(iterator)) != NULL) +- result = sdscatfmt("%S: %S\r\n", dictGetKey(entry), dictGetVal(entry)); ++ header = sdscatfmt(header, "%S: %S\r\n", dictGetKey(entry), dictGetVal(entry)); + dictReleaseIterator(iterator); +- return result; ++ ++ header = sdscatfmt(header, "Date: %s\r\n\r\n", ++ http_date_string(time(NULL), buffer, sizeof(buffer))); ++ ++ if (pmDebugOptions.http && pmDebugOptions.desperate) { ++ fprintf(stderr, "trace response to client %p\n", client); ++ fputs(header, stderr); ++ } ++ return header; + } + + static sds +@@ -418,7 +437,7 @@ + if (client->u.http.parser.method == HTTP_OPTIONS) + buffer = http_response_access(client, sts, options); + else if (client->u.http.parser.method == HTTP_TRACE) +- buffer = http_response_trace(client); ++ buffer = http_response_trace(client, sts); + else /* HTTP_HEAD */ + buffer = http_response_header(client, 0, sts, type); + suffix = NULL; +@@ -533,6 +552,8 @@ + if (servlet && servlet->on_release) + servlet->on_release(client); + client->u.http.privdata = NULL; ++ client->u.http.servlet = NULL; ++ client->u.http.flags = 0; + + if (client->u.http.headers) { + dictRelease(client->u.http.headers); +@@ -696,29 +717,39 @@ + { + struct client *client = (struct client *)request->data; + struct servlet *servlet; +- sds buffer; + int sts; + + http_client_release(client); /* new URL, clean slate */ +- /* server options - https://tools.ietf.org/html/rfc7231#section-4.3.7 */ +- if (length == 1 && *offset == '*' && +- client->u.http.parser.method == HTTP_OPTIONS) { +- buffer = http_response_access(client, HTTP_STATUS_OK, HTTP_SERVER_OPTIONS); +- client_write(client, buffer, NULL); +- } else if ((servlet = servlet_lookup(client, offset, length)) != NULL) { ++ /* pass to servlets handling each of our internal request endpoints */ ++ if ((servlet = servlet_lookup(client, offset, length)) != NULL) { + client->u.http.servlet = servlet; +- if ((sts = client->u.http.parser.status_code) == 0) { ++ if ((sts = client->u.http.parser.status_code) != 0) ++ http_error(client, sts, "failed to process URL"); ++ else { + if (client->u.http.parser.method == HTTP_OPTIONS || + client->u.http.parser.method == HTTP_TRACE || + client->u.http.parser.method == HTTP_HEAD) + client->u.http.flags |= HTTP_FLAG_NO_BODY; +- else +- client->u.http.flags &= ~HTTP_FLAG_NO_BODY; + client->u.http.headers = dictCreate(&sdsOwnDictCallBacks, NULL); +- return 0; + } +- http_error(client, sts, "failed to process URL"); +- } else { ++ } ++ /* server options - https://tools.ietf.org/html/rfc7231#section-4.3.7 */ ++ else if (client->u.http.parser.method == HTTP_OPTIONS) { ++ if (length == 1 && *offset == '*') { ++ client->u.http.flags |= HTTP_FLAG_NO_BODY; ++ client->u.http.headers = dictCreate(&sdsOwnDictCallBacks, NULL); ++ } else { ++ sts = client->u.http.parser.status_code = HTTP_STATUS_BAD_REQUEST; ++ http_error(client, sts, "no handler for OPTIONS"); ++ } ++ } ++ /* server trace - https://tools.ietf.org/html/rfc7231#section-4.3.8 */ ++ else if (client->u.http.parser.method == HTTP_TRACE) { ++ client->u.http.flags |= HTTP_FLAG_NO_BODY; ++ client->u.http.headers = dictCreate(&sdsOwnDictCallBacks, NULL); ++ } ++ /* nothing available to respond to this request - inform the client */ ++ else { + sts = client->u.http.parser.status_code = HTTP_STATUS_BAD_REQUEST; + http_error(client, sts, "no handler for URL"); + } +@@ -734,7 +765,7 @@ + if (pmDebugOptions.http && pmDebugOptions.desperate) + printf("Body: %.*s\n(client=%p)\n", (int)length, offset, client); + +- if (servlet->on_body) ++ if (servlet && servlet->on_body) + return servlet->on_body(client, offset, length); + return 0; + } +@@ -828,7 +859,7 @@ + } + + client->u.http.privdata = NULL; +- if (servlet->on_headers) ++ if (servlet && servlet->on_headers) + sts = servlet->on_headers(client, client->u.http.headers); + + /* HTTP Basic Auth for all servlets */ +@@ -857,13 +888,31 @@ + { + struct client *client = (struct client *)request->data; + struct servlet *servlet = client->u.http.servlet; ++ sds buffer; ++ int sts; + + if (pmDebugOptions.http) + fprintf(stderr, "HTTP message complete (client=%p)\n", client); + +- if (servlet && servlet->on_done) +- return servlet->on_done(client); +- return 0; ++ if (servlet) { ++ if (servlet->on_done) ++ return servlet->on_done(client); ++ return 0; ++ } ++ ++ sts = HTTP_STATUS_OK; ++ if (client->u.http.parser.method == HTTP_OPTIONS) { ++ buffer = http_response_access(client, sts, HTTP_SERVER_OPTIONS); ++ client_write(client, buffer, NULL); ++ return 0; ++ } ++ if (client->u.http.parser.method == HTTP_TRACE) { ++ buffer = http_response_trace(client, sts); ++ client_write(client, buffer, NULL); ++ return 0; ++ } ++ ++ return 1; + } + + void +diff -auNr pcp-5.1.1.orig/qa/1608 pcp-5.1.1/qa/1608 +--- pcp-5.1.1.orig/qa/1608 1970-01-01 10:00:00.000000000 +1000 ++++ pcp-5.1.1/qa/1608 2020-06-23 12:16:04.005557293 +1000 +@@ -0,0 +1,58 @@ ++#!/bin/sh ++# PCP QA Test No. 1608 ++# Exercise a long URL handling in pmproxy. ++# ++# Copyright (c) 2020 Red Hat. All Rights Reserved. ++# ++ ++seq=`basename $0` ++echo "QA output created by $seq" ++ ++# get standard environment, filters and checks ++. ./common.product ++. ./common.filter ++. ./common.check ++ ++_check_series ++which curl >/dev/null 2>&1 || _notrun "No curl binary installed" ++ ++status=1 # failure is the default! ++$sudo rm -rf $tmp $tmp.* $seq.full ++trap "_cleanup; exit \$status" 0 1 2 3 15 ++ ++pmproxy_was_running=false ++[ -f $PCP_RUN_DIR/pmproxy.pid ] && pmproxy_was_running=true ++echo "pmproxy_was_running=$pmproxy_was_running" >>$here/$seq.full ++ ++_cleanup() ++{ ++ if $pmproxy_was_running ++ then ++ echo "Restart pmproxy ..." >>$here/$seq.full ++ _service pmproxy restart >>$here/$seq.full 2>&1 ++ _wait_for_pmproxy ++ else ++ echo "Stopping pmproxy ..." >>$here/$seq.full ++ _service pmproxy stop >>$here/$seq.full 2>&1 ++ fi ++ $sudo rm -f $tmp.* ++} ++ ++_webapi_failure_filter() ++{ ++ _webapi_header_filter | \ ++ sed \ ++ -e 's/pmproxy.[0-9][0-9]*.[0-9][0-9]*.[0-9][0-9]*/PMPROXY\/VERSION/g' \ ++ #end ++} ++ ++# real QA test starts here ++_service pmproxy restart >/dev/null 2>&1 ++ ++url="http://localhost:44322/pmapi/context" ++aaa=`head -c 10000 < /dev/zero | tr '\0' '\141'` ++curl -isS -X OPTIONS "${url}?${aaa}" | _webapi_failure_filter ++ ++# success, all done ++status=0 ++exit +diff -auNr pcp-5.1.1.orig/qa/1608.out pcp-5.1.1/qa/1608.out +--- pcp-5.1.1.orig/qa/1608.out 1970-01-01 10:00:00.000000000 +1000 ++++ pcp-5.1.1/qa/1608.out 2020-06-23 12:16:04.005557293 +1000 +@@ -0,0 +1,16 @@ ++QA output created by 1608 ++ ++ ++ ++ ++

414 URI Too Long

++414 URI Too Long ++ ++

unknown servlet: request URL too long


++

PMPROXY/VERSION

++Access-Control-Allow-Headers: Accept, Accept-Language, Content-Language, Content-Type ++Access-Control-Allow-Origin: * ++Content-Length: SIZE ++Content-Type: text/html ++Date: DATE ++HTTP/1.1 414 URI Too Long +diff -auNr pcp-5.1.1.orig/qa/group pcp-5.1.1/qa/group +--- pcp-5.1.1.orig/qa/group 2020-06-23 12:15:21.335094106 +1000 ++++ pcp-5.1.1/qa/group 2020-06-23 12:16:54.256102754 +1000 +@@ -1717,6 +1717,7 @@ + 1600 pmseries pmcd pmproxy pmlogger local + 1601 pmseries pmproxy local + 1602 pmproxy local ++1608 pmproxy local + 1622 selinux local + 1623 libpcp_import collectl local + 1644 pmda.perfevent local +diff -auNr pcp-5.1.1.orig/src/pmproxy/src/http.c pcp-5.1.1/src/pmproxy/src/http.c +--- pcp-5.1.1.orig/src/pmproxy/src/http.c 2020-06-23 12:15:21.364094421 +1000 ++++ pcp-5.1.1/src/pmproxy/src/http.c 2020-06-23 12:16:04.008557325 +1000 +@@ -21,7 +21,9 @@ + static int chunked_transfer_size; /* pmproxy.chunksize, pagesize by default */ + static int smallest_buffer_size = 128; + +-#define MAX_PARAMS_SIZE 4096 ++/* https://tools.ietf.org/html/rfc7230#section-3.1.1 */ ++#define MAX_URL_SIZE 8192 ++#define MAX_PARAMS_SIZE 8000 + #define MAX_HEADERS_SIZE 128 + + static sds HEADER_ACCESS_CONTROL_REQUEST_HEADERS, +@@ -720,8 +722,13 @@ + int sts; + + http_client_release(client); /* new URL, clean slate */ ++ ++ if (length >= MAX_URL_SIZE) { ++ sts = client->u.http.parser.status_code = HTTP_STATUS_URI_TOO_LONG; ++ http_error(client, sts, "request URL too long"); ++ } + /* pass to servlets handling each of our internal request endpoints */ +- if ((servlet = servlet_lookup(client, offset, length)) != NULL) { ++ else if ((servlet = servlet_lookup(client, offset, length)) != NULL) { + client->u.http.servlet = servlet; + if ((sts = client->u.http.parser.status_code) != 0) + http_error(client, sts, "failed to process URL"); diff --git a/SPECS/pcp.spec b/SPECS/pcp.spec index f4065bd..297467b 100644 --- a/SPECS/pcp.spec +++ b/SPECS/pcp.spec @@ -1,6 +1,6 @@ Name: pcp Version: 5.1.1 -Release: 1%{?dist} +Release: 3%{?dist} Summary: System-level performance monitoring and performance management License: GPLv2+ and LGPLv2+ and CC-BY URL: https://pcp.io @@ -8,6 +8,15 @@ URL: https://pcp.io %global bintray https://bintray.com/artifact/download Source0: %{bintray}/pcp/source/pcp-%{version}.src.tar.gz +Patch000: redhat-bugzilla-1792971.patch +Patch001: redhat-bugzilla-1541406.patch +Patch002: redhat-bugzilla-1846711.patch +Patch003: redhat-bugzilla-1848995.patch +Patch004: redhat-bugzilla-1790452.patch +Patch005: redhat-bugzilla-1846705.patch +Patch006: redhat-bugzilla-1849511.patch +Patch007: redhat-bugzilla-1790433.patch + %if 0%{?fedora} >= 26 || 0%{?rhel} > 7 %global __python2 python2 %else @@ -2214,8 +2223,20 @@ updated policy package. %prep %setup -q +%patch000 -p1 +%patch001 -p1 +%patch002 -p1 +%patch003 -p1 +%patch004 -p1 +%patch005 -p1 +%patch006 -p1 +%patch007 -p1 %build +# fix up build version +_build=`echo %{release} | sed -e 's/\..*$//'` +sed -i "/PACKAGE_BUILD/s/=[0-9]*/=$_build/" VERSION.pcp + %if !%{disable_python2} && 0%{?default_python} != 3 export PYTHON=python%{?default_python} %endif @@ -2808,12 +2829,14 @@ pmieconf -c enable dmthin %post PCP_PMNS_DIR=%{_pmnsdir} +PCP_LOG_DIR=%{_logsdir} chown -R pcp:pcp %{_logsdir}/pmcd 2>/dev/null chown -R pcp:pcp %{_logsdir}/pmlogger 2>/dev/null chown -R pcp:pcp %{_logsdir}/sa 2>/dev/null chown -R pcp:pcp %{_logsdir}/pmie 2>/dev/null chown -R pcp:pcp %{_logsdir}/pmproxy 2>/dev/null %{install_file "$PCP_PMNS_DIR" .NeedRebuild} +%{install_file "$PCP_LOG_DIR/pmlogger" .NeedRewrite} %if !%{disable_systemd} %systemd_postun_with_restart pmcd.service %systemd_post pmcd.service @@ -3385,6 +3408,27 @@ chown -R pcp:pcp %{_logsdir}/pmproxy 2>/dev/null %endif %changelog +* Tue Jun 23 2020 Mark Goodwin - 5.1.1-3 +- fix for missing runtime deps on perl Net::SNMP (BZ 1790433) +- resolve covscan and other issues from upstream QA (BZ 1849511) +- Possible memory leak detected in pcp-atop (BZ 1846705) +- Installation of pcp-pmda-samba causes SELinux issues (BZ 1790452) +- fix Intermittent pminfo crashes (BZ 1848995) +- Silence openmetrics PMDA warnings, add status metrics (BZ 1846711) +- set PACKAGE_BUILD in VERSION.pcp so pmcd.build metric is correct + +* Thu Jun 11 2020 Mark Goodwin - 5.1.1-2 +- activate pmlogger_rewrite on upgrades (BZ 1541406) +- fix Coverity issues in pmdastatsd and pmlogconf (BZ 1792971) +- libpcp_web: ensure context is freed only after timer is fully closed +- services: pmlogger and pmie services Want pmcd on boot +- fix intermittent pmlogconf core dumps (BZ 1845241) +- pcp-atop: resolve potential null task pointer dereference +- pmproxy: improve diagnostics, particularly relating to http requests +- pmproxy: cleanup, remove unused flags and dead code in http encoding +- pmproxy: support the OPTIONS protocol in HTTP 1.1 +- libpcp_web: add resilience to descriptor lookup paths (BZ 1837153) + * Fri May 29 2020 Mark Goodwin - 5.1.1-1 - Rebuild to pick up changed HdrHistogram_c version (BZ 1831502) - pmdakvm: handle kernel lockdown in integrity mode (BZ 1824297)