diff --git a/.gitignore b/.gitignore
index 02817f0..036f7d7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1 +1 @@
-SOURCES/pcp-5.0.2.src.tar.gz
+SOURCES/pcp-5.1.1.src.tar.gz
diff --git a/.pcp.metadata b/.pcp.metadata
index 017406d..d432933 100644
--- a/.pcp.metadata
+++ b/.pcp.metadata
@@ -1 +1 @@
-95c6975c1af0c910a0e26ad5677a40bd8cd52ed2 SOURCES/pcp-5.0.2.src.tar.gz
+ecc3f3a9163a2295816ca5eb8d918125670a727e SOURCES/pcp-5.1.1.src.tar.gz
diff --git a/SOURCES/activemq-modules.patch b/SOURCES/activemq-modules.patch
deleted file mode 100644
index f43068d..0000000
--- a/SOURCES/activemq-modules.patch
+++ /dev/null
@@ -1,53 +0,0 @@
-commit 4356ac909fa1cc30e5804d9ff6e4c9204977b091
-Author: Nathan Scott <nathans@redhat.com>
-Date:   Fri Jan 31 14:34:25 2020 +1100
-
-    pmdactivemq: module name and path resolution fixes for latest perl
-    
-    Picked up by test qa/760 with recent versions of perl.
-    
-    Resolves Red Hat BZ #1788881
-
-diff --git a/src/pmdas/activemq/GNUmakefile b/src/pmdas/activemq/GNUmakefile
-index 8289670ca..1ad330220 100644
---- a/src/pmdas/activemq/GNUmakefile
-+++ b/src/pmdas/activemq/GNUmakefile
-@@ -41,7 +41,9 @@ build-me: check_domain
- install install_pcp: default
- 	$(INSTALL) -m 755 -d $(PMDADIR)
- 	$(INSTALL) -m 755 Install Remove $(PMDADIR)
--	$(INSTALL) -m 644 pmda$(IAM).pl $(MODULES) $(PMDADIR)
-+	$(INSTALL) -m 644 pmda$(IAM).pl $(PMDADIR)/pmda$(IAM).pl
-+	$(INSTALL) -m 755 -d $(PMDADIR)/PCP
-+	$(INSTALL) -m 644 $(MODULES) $(PMDADIR)/PCP
- 	@$(INSTALL_MAN)
- else
- build-me:
-diff --git a/src/pmdas/activemq/pmdaactivemq.pl b/src/pmdas/activemq/pmdaactivemq.pl
-index 7f835aac5..4b5451688 100755
---- a/src/pmdas/activemq/pmdaactivemq.pl
-+++ b/src/pmdas/activemq/pmdaactivemq.pl
-@@ -49,16 +49,16 @@ my $jvm_garbage_collection_cluster = 5;
- for my $file (pmda_config('PCP_PMDAS_DIR') . '/activemq/activemq.conf', 'activemq.conf') {
-     eval `cat $file` unless ! -f $file;
- }
--my $timesource = TimeSource->new;
--my $cache = Cache->new($timesource, $cache_time);
-+my $timesource = PCP::TimeSource->new;
-+my $cache = PCP::Cache->new($timesource, $cache_time);
- my $http_client = LWP::UserAgent->new;
- $http_client->agent('pmdaactivemq');
- $http_client->timeout($rest_timeout);
--my $rest_client = RESTClient->new($http_client, $cache, $rest_hostname, $rest_port, $rest_username, $rest_password, $rest_realm);
--my $activemq = ActiveMQ->new($rest_client);
--my $jvm_memory = JVMMemory->new($rest_client);
--my $jvm_memory_pool = JVMMemoryPool->new($rest_client);
--my $jvm_garbage_collection = JVMGarbageCollection->new($rest_client);
-+my $rest_client = PCP::RESTClient->new($http_client, $cache, $rest_hostname, $rest_port, $rest_username, $rest_password, $rest_realm);
-+my $activemq = PCP::ActiveMQ->new($rest_client);
-+my $jvm_memory = PCP::JVMMemory->new($rest_client);
-+my $jvm_memory_pool = PCP::JVMMemoryPool->new($rest_client);
-+my $jvm_garbage_collection = PCP::JVMGarbageCollection->new($rest_client);
- 
- my %queue_instances;
- 
diff --git a/SOURCES/archive-discovery.patch b/SOURCES/archive-discovery.patch
deleted file mode 100644
index cf7a3d3..0000000
--- a/SOURCES/archive-discovery.patch
+++ /dev/null
@@ -1,3175 +0,0 @@
-diff -Naurp pcp-5.0.2.orig/qa/1211.out pcp-5.0.2/qa/1211.out
---- pcp-5.0.2.orig/qa/1211.out	2019-12-06 15:18:26.000000000 +1100
-+++ pcp-5.0.2/qa/1211.out	2020-02-03 13:23:15.258762963 +1100
-@@ -144,6 +144,9 @@ kernel.uname.nodename
- kernel.uname.release
- kernel.uname.sysname
- kernel.uname.version
-+pmcd.pmlogger.archive
-+pmcd.pmlogger.host
-+pmcd.pmlogger.port
- proc.fd.count
- proc.id.egid
- proc.id.egid_nm
-@@ -267,6 +270,7 @@ List all instance names ...
- 030016 pmlogger -P -c config.default 20110930.17.20
- 1 minute
- 15 minute
-+2950
- 5 minute
- cpu0
- cpu1
-@@ -398,10 +402,10 @@ fecd5a4b4c6e1273eaa001287a6dd57b7bbd19f7
- Values fetch for a single-valued query ...
- 
- d51624d12da45900bfee2fd73f1e23f3ccabb784
--    [Mon Oct  3 09:10:22.959242000 2011] 172598244
--    [Mon Oct  3 09:10:23.300460000 2011] 172598364
--    [Mon Oct  3 09:10:23.802930000 2011] 172598481
-     [Mon Oct  3 09:10:24.305845000 2011] 172598559
-+    [Mon Oct  3 09:10:23.802930000 2011] 172598481
-+    [Mon Oct  3 09:10:23.300460000 2011] 172598364
-+    [Mon Oct  3 09:10:22.959242000 2011] 172598244
- 
- Values fetch with a one-second interval ...
- 
-@@ -420,15 +424,18 @@ d51624d12da45900bfee2fd73f1e23f3ccabb784
- Values fetch for a multi-valued query ...
- 
- fecd5a4b4c6e1273eaa001287a6dd57b7bbd19f7
--    [Mon Oct  3 09:10:23.300460000 2011] 0.000000e+00 59181b1de54ff2b383cfd1cdd8636f86c880b69b
--    [Mon Oct  3 09:10:23.300460000 2011] 2.000000e-02 ab010c7d45145aa33c8f8fa681a68c9d4102ae19
--    [Mon Oct  3 09:10:23.300460000 2011] 5.000000e-02 9d418095c9f971ff4fd44d6828ead27f9d021dc3
--    [Mon Oct  3 09:10:23.802930000 2011] 0.000000e+00 59181b1de54ff2b383cfd1cdd8636f86c880b69b
--    [Mon Oct  3 09:10:23.802930000 2011] 2.000000e-02 ab010c7d45145aa33c8f8fa681a68c9d4102ae19
--    [Mon Oct  3 09:10:23.802930000 2011] 5.000000e-02 9d418095c9f971ff4fd44d6828ead27f9d021dc3
-     [Mon Oct  3 09:10:24.305845000 2011] 0.000000e+00 59181b1de54ff2b383cfd1cdd8636f86c880b69b
-     [Mon Oct  3 09:10:24.305845000 2011] 2.000000e-02 ab010c7d45145aa33c8f8fa681a68c9d4102ae19
-     [Mon Oct  3 09:10:24.305845000 2011] 5.000000e-02 9d418095c9f971ff4fd44d6828ead27f9d021dc3
-+    [Mon Oct  3 09:10:23.802930000 2011] 0.000000e+00 59181b1de54ff2b383cfd1cdd8636f86c880b69b
-+    [Mon Oct  3 09:10:23.802930000 2011] 2.000000e-02 ab010c7d45145aa33c8f8fa681a68c9d4102ae19
-+    [Mon Oct  3 09:10:23.802930000 2011] 5.000000e-02 9d418095c9f971ff4fd44d6828ead27f9d021dc3
-+    [Mon Oct  3 09:10:23.300460000 2011] 0.000000e+00 59181b1de54ff2b383cfd1cdd8636f86c880b69b
-+    [Mon Oct  3 09:10:23.300460000 2011] 2.000000e-02 ab010c7d45145aa33c8f8fa681a68c9d4102ae19
-+    [Mon Oct  3 09:10:23.300460000 2011] 5.000000e-02 9d418095c9f971ff4fd44d6828ead27f9d021dc3
-+    [Mon Oct  3 09:10:22.959242000 2011] 0.000000e+00 59181b1de54ff2b383cfd1cdd8636f86c880b69b
-+    [Mon Oct  3 09:10:22.959242000 2011] 2.000000e-02 ab010c7d45145aa33c8f8fa681a68c9d4102ae19
-+    [Mon Oct  3 09:10:22.959242000 2011] 5.000000e-02 9d418095c9f971ff4fd44d6828ead27f9d021dc3
- 
- Multi-series lookups from a multi-series query ...
- 2db1da4d276d81c42c578c2829e99188ae7cc898
-diff -Naurp pcp-5.0.2.orig/qa/1573 pcp-5.0.2/qa/1573
---- pcp-5.0.2.orig/qa/1573	1970-01-01 10:00:00.000000000 +1000
-+++ pcp-5.0.2/qa/1573	2020-02-03 13:36:17.288581801 +1100
-@@ -0,0 +1,103 @@
-+#!/bin/sh
-+# PCP QA Test No. 1573
-+# Exercise libpcp_web memory leak without a redis-server.
-+#
-+# Copyright (c) 2020 Red Hat.
-+#
-+
-+seq=`basename $0`
-+echo "QA output created by $seq"
-+
-+# get standard environment, filters and checks
-+. ./common.product
-+. ./common.filter
-+. ./common.check
-+
-+_check_series
-+
-+_cleanup()
-+{
-+    cd $here
-+    if $need_restore
-+    then
-+	need_restore=false
-+	_service pmlogger stop >/dev/null
-+	$sudo rm -rf $PCP_LOG_DIR/pmlogger
-+	$sudo mv $PCP_LOG_DIR/pmlogger.$seq $PCP_LOG_DIR/pmlogger
-+	_restore_config $PCP_ETC_DIR/pcp/pmlogger
-+	_service pcp restart 2>&1 | _filter_pcp_stop | _filter_pcp_start
-+	_wait_for_pmcd
-+	_wait_for_pmlogger
-+	echo === restarting pmproxy
-+	_restore_config $PCP_SYSCONF_DIR/pmproxy
-+	_service pmproxy restart 2>&1 | _filter_pcp_start
-+	_wait_for_pmproxy
-+    fi
-+    $sudo rm -rf $tmp $tmp.*
-+}
-+
-+status=1	# failure is the default!
-+need_restore=false
-+$sudo rm -rf $tmp $tmp.* $seq.full
-+trap "_cleanup; exit \$status" 0 1 2 3 15
-+
-+# real QA test starts here
-+_save_config $PCP_SYSCONF_DIR/pmproxy
-+need_restore=true
-+
-+# only want the primary logger running
-+_save_config $PCP_ETC_DIR/pcp/pmlogger
-+_restore_pmlogger_control
-+
-+#$sudo rm -f $PCP_SYSCONF_DIR/pmproxy/*
-+echo "[pmproxy]" > $tmp.conf
-+echo "pcp.enabled = true" >> $tmp.conf
-+echo "http.enabled = true" >> $tmp.conf
-+echo "redis.enabled = true" >> $tmp.conf
-+echo "[discover]" >> $tmp.conf
-+echo "enabled = true" >> $tmp.conf
-+echo "[pmseries]" >> $tmp.conf
-+echo "enabled = false" >> $tmp.conf
-+$sudo cp $tmp.conf $PCP_SYSCONF_DIR/pmproxy/pmproxy.conf
-+
-+_service pmlogger stop >/dev/null
-+
-+# move aside existing logs so we can measure base memory footprint
-+[ -d $PCP_LOG_DIR/pmlogger.$seq ] && $sudo mv $PCP_LOG_DIR/pmlogger.$seq $PCP_LOG_DIR/pmlogger.$seq.saved
-+$sudo mv $PCP_LOG_DIR/pmlogger $PCP_LOG_DIR/pmlogger.$seq
-+$sudo mkdir -p $PCP_LOG_DIR/pmlogger
-+$sudo chmod 775 $PCP_LOG_DIR/pmlogger
-+$sudo chown $PCP_USER:$PCP_USER $PCP_LOG_DIR/pmlogger
-+
-+_service pmproxy restart 2>&1 | _filter_pcp_stop | _filter_pcp_start
-+_wait_for_pmproxy
-+
-+pmproxy_pid=`_get_pids_by_name -a pmproxy`
-+[ -z "$pmproxy_pid" ] && echo === pmproxy not running && status=1 && exit 1
-+echo === extract initial rss
-+pmproxy_rss1=`pminfo -f proc.memory.rss |
-+	$PCP_AWK_PROG '{ if ($2 == "['$pmproxy_pid'") { print $NF} }'`
-+
-+echo === restarting pmlogger # primary only
-+_service pmlogger restart 2>&1 | _filter_pcp_start
-+_wait_for_pmlogger
-+
-+echo === wait for pmproxy to process filesystem events
-+pmsleep 4.2
-+
-+echo === extract updated rss
-+pmproxy_rss2=`pminfo -f proc.memory.rss |
-+	$PCP_AWK_PROG '{ if ($2 == "['$pmproxy_pid'") { print $NF} }'`
-+
-+echo === checking rss within tolerance
-+_within_tolerance "rss" $pmproxy_rss1 $pmproxy_rss2 10%
-+[ $pmproxy_rss2 -gt 10000 ] && echo "Unexpected pmproxy RSS: $pmproxy_rss2, was initially $pmproxy_rss1"
-+
-+echo "RSS1 for PID $pmproxy_pid is $pmproxy_rss1" >> $here/$seq.full
-+echo "RSS2 for PID $pmproxy_pid is $pmproxy_rss2" >> $here/$seq.full
-+cat $PCP_LOG_DIR/pmproxy/pmproxy.log >>$seq.full
-+echo === see $seq.full for pmproxy rss and logs
-+
-+# success, all done
-+status=0
-+exit
-diff -Naurp pcp-5.0.2.orig/qa/1573.out pcp-5.0.2/qa/1573.out
---- pcp-5.0.2.orig/qa/1573.out	1970-01-01 10:00:00.000000000 +1000
-+++ pcp-5.0.2/qa/1573.out	2020-02-03 13:23:15.259762953 +1100
-@@ -0,0 +1,8 @@
-+QA output created by 1573
-+=== extract initial rss
-+=== restarting pmlogger
-+=== wait for pmproxy to process filesystem events
-+=== extract updated rss
-+=== checking rss within tolerance
-+=== see 1573.full for pmproxy rss and logs
-+=== restarting pmproxy
-diff -Naurp pcp-5.0.2.orig/qa/1600 pcp-5.0.2/qa/1600
---- pcp-5.0.2.orig/qa/1600	2019-12-10 17:49:05.000000000 +1100
-+++ pcp-5.0.2/qa/1600	2020-02-03 13:23:15.260762942 +1100
-@@ -82,7 +82,11 @@ _filter_values()
- _filter_label_values()
- {
-     sed \
-+	-e "s/^domainname: \"${domainname}\"/domainname: \"DOMAIN\"/g" \
-+	-e "s/^machineid: \"${machineid}\"/machineid: \"MACHINE\"/g" \
- 	-e "s/^hostname: \"${hostname}\"/hostname: \"HOSTNAME\"/g" \
-+	-e "s/^groupid: $groupid/groupid: GID/g" \
-+	-e "s/^userid: $userid/userid: UID/g" \
- 	-e "s/changed: false, true/changed: false/g" \
- 	-e "/metric_label: null/d" \
-     #end
-diff -Naurp pcp-5.0.2.orig/qa/1600.out pcp-5.0.2/qa/1600.out
---- pcp-5.0.2.orig/qa/1600.out	2019-12-10 10:46:20.000000000 +1100
-+++ pcp-5.0.2/qa/1600.out	2020-02-03 13:23:15.260762942 +1100
-@@ -27,15 +27,15 @@ TIMESERIES
- == verify metric labels
- 
- TIMESERIES
--    inst [100 or "bin-100"] labels {"agent":"sample","hostname":"HOST","role":"testing"}
--    inst [200 or "bin-200"] labels {"agent":"sample","hostname":"HOST","role":"testing"}
--    inst [300 or "bin-300"] labels {"agent":"sample","hostname":"HOST","role":"testing"}
--    inst [400 or "bin-400"] labels {"agent":"sample","hostname":"HOST","role":"testing"}
--    inst [500 or "bin-500"] labels {"agent":"sample","hostname":"HOST","role":"testing"}
--    inst [600 or "bin-600"] labels {"agent":"sample","hostname":"HOST","role":"testing"}
--    inst [700 or "bin-700"] labels {"agent":"sample","hostname":"HOST","role":"testing"}
--    inst [800 or "bin-800"] labels {"agent":"sample","hostname":"HOST","role":"testing"}
--    inst [900 or "bin-900"] labels {"agent":"sample","hostname":"HOST","role":"testing"}
-+    inst [100 or "bin-100"] labels {"agent":"sample","bin":100,"domainname":"DOMAIN","groupid":GID,"hostname":"HOST","latitude":-25.28496,"longitude":152.87886,"machineid":"MACHINE","role":"testing","userid":UID}
-+    inst [200 or "bin-200"] labels {"agent":"sample","bin":200,"domainname":"DOMAIN","groupid":GID,"hostname":"HOST","latitude":-25.28496,"longitude":152.87886,"machineid":"MACHINE","role":"testing","userid":UID}
-+    inst [300 or "bin-300"] labels {"agent":"sample","bin":300,"domainname":"DOMAIN","groupid":GID,"hostname":"HOST","latitude":-25.28496,"longitude":152.87886,"machineid":"MACHINE","role":"testing","userid":UID}
-+    inst [400 or "bin-400"] labels {"agent":"sample","bin":400,"domainname":"DOMAIN","groupid":GID,"hostname":"HOST","latitude":-25.28496,"longitude":152.87886,"machineid":"MACHINE","role":"testing","userid":UID}
-+    inst [500 or "bin-500"] labels {"agent":"sample","bin":500,"domainname":"DOMAIN","groupid":GID,"hostname":"HOST","latitude":-25.28496,"longitude":152.87886,"machineid":"MACHINE","role":"testing","userid":UID}
-+    inst [600 or "bin-600"] labels {"agent":"sample","bin":600,"domainname":"DOMAIN","groupid":GID,"hostname":"HOST","latitude":-25.28496,"longitude":152.87886,"machineid":"MACHINE","role":"testing","userid":UID}
-+    inst [700 or "bin-700"] labels {"agent":"sample","bin":700,"domainname":"DOMAIN","groupid":GID,"hostname":"HOST","latitude":-25.28496,"longitude":152.87886,"machineid":"MACHINE","role":"testing","userid":UID}
-+    inst [800 or "bin-800"] labels {"agent":"sample","bin":800,"domainname":"DOMAIN","groupid":GID,"hostname":"HOST","latitude":-25.28496,"longitude":152.87886,"machineid":"MACHINE","role":"testing","userid":UID}
-+    inst [900 or "bin-900"] labels {"agent":"sample","bin":900,"domainname":"DOMAIN","groupid":GID,"hostname":"HOST","latitude":-25.28496,"longitude":152.87886,"machineid":"MACHINE","role":"testing","userid":UID}
- == verify metric values
- 
- TIMESERIES
-@@ -43,15 +43,24 @@ TIMESERIES
-     [TIMESTAMP] VALUE
- == verify label names and values
- agent: "mmv", "sample", "pmcd"
-+bin: 100, 200, 300, 400, 500, 600, 700, 800, 900
- changed: false
- clan: "mcdonell"
- cluster: "zero"
-+domainname: "DOMAIN"
-+groupid: GID
- hostname: "HOSTNAME"
- indom_label: 42.001
-+latitude: -25.28496
-+longitude: 152.87886
-+machineid: "MACHINE"
- measure: "speed"
- model: "RGB"
-+registry_label: "string"
- role: "testing"
-+transient: false, true
- units: "metres per second"
- unitsystem: "SI"
-+userid: UID
- == verify archive removal
- == all done
-diff -Naurp pcp-5.0.2.orig/qa/1601.out pcp-5.0.2/qa/1601.out
---- pcp-5.0.2.orig/qa/1601.out	2019-11-27 16:01:34.000000000 +1100
-+++ pcp-5.0.2/qa/1601.out	2020-02-03 13:23:15.261762932 +1100
-@@ -131,7 +131,7 @@ Using series 01d8bc7fa75aaff98a08aa0b1c0
-     {
-         "series": "605fc77742cd0317597291329561ac4e50c0dd12",
-         "instance": "c3795d8b757506a2901c6b08b489ba56cae7f0d4",
--        "timestamp": 1317633023300.460,
-+        "timestamp": 1317633024305.845,
-         "value": "71661"
-     },
-     {
-@@ -147,7 +147,7 @@ Using series 01d8bc7fa75aaff98a08aa0b1c0
-         {
-             "series": "605fc77742cd0317597291329561ac4e50c0dd12",
-             "instance": "c3795d8b757506a2901c6b08b489ba56cae7f0d4",
--            "timestamp": 1317633023300.460,
-+            "timestamp": 1317633024305.845,
-             "value": "71661"
-         },
-         {
-@@ -163,7 +163,7 @@ Using series 01d8bc7fa75aaff98a08aa0b1c0
-     {
-         "series": "605fc77742cd0317597291329561ac4e50c0dd12",
-         "instance": "c3795d8b757506a2901c6b08b489ba56cae7f0d4",
--        "timestamp": 1317633023300.460,
-+        "timestamp": 1317633024305.845,
-         "value": "71661"
-     },
-     {
-@@ -179,7 +179,7 @@ Using series 01d8bc7fa75aaff98a08aa0b1c0
-         {
-             "series": "605fc77742cd0317597291329561ac4e50c0dd12",
-             "instance": "c3795d8b757506a2901c6b08b489ba56cae7f0d4",
--            "timestamp": 1317633023300.460,
-+            "timestamp": 1317633024305.845,
-             "value": "71661"
-         },
-         {
-diff -Naurp pcp-5.0.2.orig/qa/1661 pcp-5.0.2/qa/1661
---- pcp-5.0.2.orig/qa/1661	2019-12-10 17:04:20.000000000 +1100
-+++ pcp-5.0.2/qa/1661	2020-02-03 13:23:15.261762932 +1100
-@@ -41,8 +41,7 @@ _restore_pmlogger_control
- echo;echo === restarting pmproxy service to ensure sane starting condition 
- _service pmlogger stop 2>&1 | _filter_pcp_stop
- _service pmproxy restart 2>&1 | _filter_pcp_stop | _filter_pcp_start
--# give pmproxy a chance to startup
--pmsleep 2; _wait_for_pmproxy
-+_wait_for_pmproxy
- 
- pmproxy_pid=`_get_pids_by_name -a pmproxy`
- [ -z "$pmproxy_pid" ] && echo === pmproxy not running && status=1 && exit 1
-diff -Naurp pcp-5.0.2.orig/qa/group pcp-5.0.2/qa/group
---- pcp-5.0.2.orig/qa/group	2019-12-11 14:06:06.000000000 +1100
-+++ pcp-5.0.2/qa/group	2020-02-03 13:23:15.261762932 +1100
-@@ -1688,6 +1688,7 @@ BAD
- 1545 pcp2xml python pcp2xxx local
- 1546 pmrep python local
- 1547 pmrep python local
-+1573 pmproxy libpcp_web pmlogger local
- 1588 python pmiostat local
- 1598 pmda.statsd local
- 1599 pmda.statsd local
-diff -Naurp pcp-5.0.2.orig/src/include/pcp/libpcp.h pcp-5.0.2/src/include/pcp/libpcp.h
---- pcp-5.0.2.orig/src/include/pcp/libpcp.h	2019-09-24 17:23:36.000000000 +1000
-+++ pcp-5.0.2/src/include/pcp/libpcp.h	2020-02-03 13:23:15.261762932 +1100
-@@ -7,7 +7,7 @@
-  *	remain fixed across releases, and they may not work, or may
-  *	provide different semantics at some point in the future.
-  *
-- * Copyright (c) 2012-2019 Red Hat.
-+ * Copyright (c) 2012-2020 Red Hat.
-  * Copyright (c) 2008-2009 Aconex.  All Rights Reserved.
-  * Copyright (c) 1995-2002 Silicon Graphics, Inc.  All Rights Reserved.
-  *
-@@ -846,6 +846,13 @@ PCP_CALL extern int __pmLogPutText(__pmA
- PCP_CALL extern int __pmLogWriteLabel(__pmFILE *, const __pmLogLabel *);
- PCP_CALL extern int __pmLogLoadLabel(__pmArchCtl *, const char *);
- PCP_CALL extern int __pmLogLoadMeta(__pmArchCtl *);
-+PCP_CALL extern int __pmLogAddDesc(__pmArchCtl *, const pmDesc *);
-+PCP_CALL extern int __pmLogAddInDom(__pmArchCtl *, const pmTimespec *, const pmInResult *, int *, int);
-+PCP_CALL extern int __pmLogAddPMNSNode(__pmArchCtl *, pmID, const char *);
-+PCP_CALL extern int __pmLogAddLabelSets(__pmArchCtl *, const pmTimespec *, unsigned int, unsigned int, int, pmLabelSet *);
-+PCP_CALL extern int __pmLogAddText(__pmArchCtl *, unsigned int, unsigned int, const char *);
-+PCP_CALL extern int __pmLogAddVolume(__pmArchCtl *, unsigned int);
-+
- #define PMLOGREAD_NEXT		0
- #define PMLOGREAD_TO_EOF	1
- PCP_CALL extern int __pmLogRead(__pmArchCtl *, int, __pmFILE *, pmResult **, int);
-@@ -862,7 +869,9 @@ PCP_CALL extern int __pmLogLookupText(__
- PCP_CALL extern int __pmLogNameInDom(__pmArchCtl *, pmInDom, pmTimeval *, int, char **);
- PCP_CALL extern const char *__pmLogLocalSocketDefault(int, char *buf, size_t bufSize);
- PCP_CALL extern const char *__pmLogLocalSocketUser(int, char *buf, size_t bufSize);
-+PCP_CALL extern int __pmLogCompressedSuffix(const char *);
- PCP_CALL extern char *__pmLogBaseName(char *);
-+PCP_CALL extern char *__pmLogBaseNameVol(char *, int *);
- PCP_DATA extern int __pmLogReads;
- 
- /* Convert opaque context handle to __pmContext pointer */
-diff -Naurp pcp-5.0.2.orig/src/libpcp/src/exports.master pcp-5.0.2/src/libpcp/src/exports.master
---- pcp-5.0.2.orig/src/libpcp/src/exports.master	2019-10-02 14:40:30.000000000 +1000
-+++ pcp-5.0.2/src/libpcp/src/exports.master	2020-02-03 13:23:15.262762921 +1100
-@@ -683,3 +683,15 @@ PCP_3.26 {
-   global:
-     __pmDupLabelSets;
- } PCP_3.25;
-+
-+PCP_3.26_1 {
-+  global:
-+    __pmLogAddDesc;
-+    __pmLogAddInDom;
-+    __pmLogAddPMNSNode;
-+    __pmLogAddLabelSets;
-+    __pmLogAddText;
-+    __pmLogAddVolume;
-+    __pmLogCompressedSuffix;
-+    __pmLogBaseNameVol;
-+} PCP_3.26;
-diff -Naurp pcp-5.0.2.orig/src/libpcp/src/io.c pcp-5.0.2/src/libpcp/src/io.c
---- pcp-5.0.2.orig/src/libpcp/src/io.c	2018-06-09 11:43:34.000000000 +1000
-+++ pcp-5.0.2/src/libpcp/src/io.c	2020-02-03 13:23:15.262762921 +1100
-@@ -1,5 +1,5 @@
- /*
-- * Copyright (c) 2017-2018 Red Hat.
-+ * Copyright (c) 2017-2018,2020 Red Hat.
-  * 
-  * This library is free software; you can redistribute it and/or modify it
-  * under the terms of the GNU Lesser General Public License as published
-@@ -47,7 +47,7 @@ extern __pm_fops __pm_xz;
- #endif
- 
- static const struct {
--    const char	*suff;
-+    const char	*suffix;
-     const int	appl;
-     __pm_fops   *handler;
- } compress_ctl[] = {
-@@ -61,40 +61,43 @@ static const struct {
- };
- static const int ncompress = sizeof(compress_ctl) / sizeof(compress_ctl[0]);
- 
-+int
-+__pmLogCompressedSuffix(const char *suffix)
-+{
-+    int		i;
-+
-+    for (i = 0; i < ncompress; i++)
-+	if (strcmp(suffix, compress_ctl[i].suffix) == 0)
-+	    return 1;
-+    return 0;
-+}
-+
- /*
-- * If name contains '.' and the suffix is "index", "meta" or a string of
-- * digits, all optionally followed by one of the compression suffixes,
-- * strip the suffix.
-- *
-- * Modifications are performed on the argument string in-place. If modifications
-- * are made, a pointer to the start of the modified string is returned.
-- * Otherwise, NULL is returned.
-+ * Variant of __pmLogBaseName() - see below that also returns log
-+ * the volume number if the file name is an archive log volume.
-+ * If the vol argument is NULL it will be ignored.
-  */
- char *
--__pmLogBaseName(char *name)
-+__pmLogBaseNameVol(char *name, int *vol)
- {
--    char *q;
--    int   strip;
--    int   i;
-+    char	*q, *q2;
-+    int		strip = 0;
- 
--    strip = 0;
-+    if (vol)
-+	*vol = -1;
-     if ((q = strrchr(name, '.')) != NULL) {
--	for (i = 0; i < ncompress; i++) {
--	    if (strcmp(q, compress_ctl[i].suff) == 0) {
--		char	*q2;
--		/*
--		 * The name ends with one of the supported compressed file
--		 * suffixes. Strip it before checking for other known suffixes.
--		 */
--		*q = '\0';
--		if ((q2 = strrchr(name, '.')) == NULL) {
--		    /* no . to the left of the suffix */
--		    *q = '.';
--		    goto done;
--		}
--		q = q2;
--		break;
-+	if (__pmLogCompressedSuffix(q)) {
-+	    /*
-+	     * The name ends with one of the supported compressed file
-+	     * suffixes. Strip it before checking for other known suffixes.
-+	     */
-+	    *q = '\0';
-+	    if ((q2 = strrchr(name, '.')) == NULL) {
-+		/* no . to the left of the suffix */
-+		*q = '.';
-+		goto done;
- 	    }
-+	    q = q2;
- 	}
- 	if (strcmp(q, ".index") == 0) {
- 	    strip = 1;
-@@ -109,16 +112,10 @@ __pmLogBaseName(char *name)
- 	 */
- 	if (q[1] != '\0') {
- 	    char	*end;
--	    /*
--	     * Below we don't care about the value from strtol(),
--	     * we're interested in updating the pointer "end".
--	     * The messiness is thanks to gcc and glibc ... strtol()
--	     * is marked __attribute__((warn_unused_result)) ...
--	     * to avoid warnings on all platforms, assign to a
--	     * dummy variable that is explicitly marked unused.
--	     */
--	    long	tmpl __attribute__((unused));
-+	    long	tmpl;
- 	    tmpl = strtol(q+1, &end, 10);
-+	    if (vol)
-+		*vol = tmpl;
- 	    if (*end == '\0') strip = 1;
- 	}
-     }
-@@ -131,6 +128,21 @@ done:
-     return NULL; /* not the name of an archive file. */
- }
- 
-+/*
-+ * If name contains '.' and the suffix is "index", "meta" or a string of
-+ * digits, all optionally followed by one of the compression suffixes,
-+ * strip the suffix.
-+ *
-+ * Modifications are performed on the argument string in-place. If modifications
-+ * are made, a pointer to the start of the modified string is returned.
-+ * Otherwise, NULL is returned.
-+ */
-+char *
-+__pmLogBaseName(char *name)
-+{
-+    return __pmLogBaseNameVol(name, NULL);
-+}
-+
- static int
- popen_uncompress(const char *cmd, const char *arg, const char *fname, int fd)
- {
-@@ -319,7 +331,7 @@ __pmCompressedFileIndex(char *fname, siz
-     char	tmpname[MAXPATHLEN];
- 
-     for (i = 0; i < ncompress; i++) {
--	suffix = compress_ctl[i].suff;
-+	suffix = compress_ctl[i].suffix;
- 	pmsprintf(tmpname, sizeof(tmpname), "%s%s", fname, suffix);
- 	sts = access(tmpname, R_OK);
- 	if (sts == 0 || (errno != ENOENT && errno != ENOTDIR)) {
-@@ -358,7 +370,7 @@ index_compress(char *fname, size_t flen)
-     suffix = strrchr(fname, '.');
-     if (suffix != NULL) {
- 	for (i = 0; i < ncompress; i++) {
--	    if (strcmp(suffix, compress_ctl[i].suff) == 0)
-+	    if (strcmp(suffix, compress_ctl[i].suffix) == 0)
- 		return i;
- 	}
-     }
-@@ -731,7 +743,7 @@ compress_suffix_list(void)
- 	const char	*q;
- 
- 	for (i = 0; i < ncompress; i++) {
--	    q = compress_ctl[i].suff;
-+	    q = compress_ctl[i].suffix;
- 	    if (i > 0)
- 		*p++ = ' ';
- 	    while (*q) {
-diff -Naurp pcp-5.0.2.orig/src/libpcp/src/logmeta.c pcp-5.0.2/src/libpcp/src/logmeta.c
---- pcp-5.0.2.orig/src/libpcp/src/logmeta.c	2018-09-14 10:22:56.000000000 +1000
-+++ pcp-5.0.2/src/libpcp/src/logmeta.c	2020-02-03 13:23:15.262762921 +1100
-@@ -1,5 +1,5 @@
- /*
-- * Copyright (c) 2013-2018 Red Hat.
-+ * Copyright (c) 2013-2018, 2020 Red Hat.
-  * Copyright (c) 1995-2002 Silicon Graphics, Inc.  All Rights Reserved.
-  * 
-  * This library is free software; you can redistribute it and/or modify it
-@@ -490,7 +490,7 @@ check_dup_labels(const __pmArchCtl *acp)
- }
- 
- static int
--addtext(__pmArchCtl *acp, unsigned int ident, unsigned int type, char *buffer)
-+addtext(__pmArchCtl *acp, unsigned int ident, unsigned int type, const char *buffer)
- {
-     __pmLogCtl		*lcp = acp->ac_log;
-     __pmHashNode	*hp;
-@@ -553,6 +553,92 @@ PM_FAULT_POINT("libpcp/" __FILE__ ":15",
-     return sts;
- }
- 
-+int
-+__pmLogAddDesc(__pmArchCtl *acp, const pmDesc *newdp)
-+{
-+    __pmHashNode	*hp;
-+    __pmLogCtl		*lcp = acp->ac_log;
-+    pmDesc		*dp, *olddp;
-+
-+    if ((hp = __pmHashSearch((int)newdp->pmid, &lcp->l_hashpmid)) != NULL) {
-+	/* PMID is already in the hash table - check for conflicts. */
-+	olddp = (pmDesc *)hp->data;
-+	if (newdp->type != olddp->type)
-+	    return PM_ERR_LOGCHANGETYPE;
-+	if (newdp->sem != olddp->sem)
-+	    return PM_ERR_LOGCHANGESEM;
-+	if (newdp->indom != olddp->indom)
-+	    return PM_ERR_LOGCHANGEINDOM;
-+	if (newdp->units.dimSpace != olddp->units.dimSpace ||
-+	    newdp->units.dimTime != olddp->units.dimTime ||
-+	    newdp->units.dimCount != olddp->units.dimCount ||
-+	    newdp->units.scaleSpace != olddp->units.scaleSpace ||
-+	    newdp->units.scaleTime != olddp->units.scaleTime ||
-+	    newdp->units.scaleCount != olddp->units.scaleCount)
-+	    return PM_ERR_LOGCHANGEUNITS;
-+
-+	/* PMID is already known and checks out - we're done here. */
-+	return 0;
-+    }
-+
-+    /* Add a copy of the descriptor into the PMID:desc hash table. */
-+PM_FAULT_POINT("libpcp/" __FILE__ ":2", PM_FAULT_ALLOC);
-+    if ((dp = (pmDesc *)malloc(sizeof(pmDesc))) == NULL)
-+	return -oserror();
-+    *dp = *newdp;
-+
-+    return __pmHashAdd((int)dp->pmid, (void *)dp, &lcp->l_hashpmid);
-+}
-+
-+int
-+__pmLogAddPMNSNode(__pmArchCtl *acp, pmID pmid, const char *name)
-+{
-+    __pmLogCtl		*lcp = acp->ac_log;
-+    int			sts;
-+
-+    /*
-+     * If we see a duplicate name with a different PMID, its a
-+     * recoverable error.
-+     * We wont be able to see all of the data in the log, but
-+     * its better to provide access to some rather than none,
-+     * esp. when only one or two metric IDs may be corrupted
-+     * in this way (which we may not be interested in anyway).
-+     */
-+    sts = __pmAddPMNSNode(lcp->l_pmns, pmid, name);
-+    if (sts == PM_ERR_PMID)
-+	sts = 0;
-+    return sts;
-+}
-+
-+int
-+__pmLogAddInDom(__pmArchCtl *acp, const pmTimespec *when, const pmInResult *in,
-+		int *tbuf, int allinbuf)
-+{
-+    pmTimeval		tv;
-+
-+    tv.tv_sec = when->tv_sec;
-+    tv.tv_usec = when->tv_nsec / 1000;
-+    return addindom(acp->ac_log, in->indom, &tv,
-+		    in->numinst, in->instlist, in->namelist, tbuf, allinbuf);
-+}
-+
-+int
-+__pmLogAddLabelSets(__pmArchCtl *acp, const pmTimespec *when, unsigned int type,
-+		unsigned int ident, int nsets, pmLabelSet *labelsets)
-+{
-+    pmTimeval		tv;
-+
-+    tv.tv_sec = when->tv_sec;
-+    tv.tv_usec = when->tv_nsec / 1000;
-+    return addlabel(acp, type, ident, nsets, labelsets, &tv);
-+}
-+
-+int
-+__pmLogAddText(__pmArchCtl *acp, unsigned int ident, unsigned int type, const char *buffer)
-+{
-+    return addtext(acp, ident, type, buffer);
-+}
-+
- /*
-  * Load _all_ of the hashed pmDesc and __pmLogInDom structures from the metadata
-  * log file -- used at the initialization (NewContext) of an archive.
-@@ -563,11 +649,8 @@ int
- __pmLogLoadMeta(__pmArchCtl *acp)
- {
-     __pmLogCtl		*lcp = acp->ac_log;
--    __pmHashNode	*hp;
-     int			rlen;
-     int			check;
--    pmDesc		*dp;
--    pmDesc		*olddp;
-     int			sts = 0;
-     __pmLogHdr		h;
-     __pmFILE		*f = lcp->l_mdfp;
-@@ -615,13 +698,10 @@ __pmLogLoadMeta(__pmArchCtl *acp)
- 	}
- 	rlen = h.len - (int)sizeof(__pmLogHdr) - (int)sizeof(int);
- 	if (h.type == TYPE_DESC) {
-+	    pmDesc		desc;
-+
- 	    numpmid++;
--PM_FAULT_POINT("libpcp/" __FILE__ ":2", PM_FAULT_ALLOC);
--	    if ((dp = (pmDesc *)malloc(sizeof(pmDesc))) == NULL) {
--		sts = -oserror();
--		goto end;
--	    }
--	    if ((n = (int)__pmFread(dp, 1, sizeof(pmDesc), f)) != sizeof(pmDesc)) {
-+	    if ((n = (int)__pmFread(&desc, 1, sizeof(pmDesc), f)) != sizeof(pmDesc)) {
- 		if (pmDebugOptions.logmeta) {
- 		    fprintf(stderr, "__pmLogLoadMeta: pmDesc read -> %d: expected: %d\n",
- 			    n, (int)sizeof(pmDesc));
-@@ -632,67 +712,25 @@ PM_FAULT_POINT("libpcp/" __FILE__ ":2",
- 		}
- 		else
- 		    sts = PM_ERR_LOGREC;
--		free(dp);
- 		goto end;
- 	    }
--	    else {
--		/* swab desc */
--		dp->type = ntohl(dp->type);
--		dp->sem = ntohl(dp->sem);
--		dp->indom = __ntohpmInDom(dp->indom);
--		dp->units = __ntohpmUnits(dp->units);
--		dp->pmid = __ntohpmID(dp->pmid);
--	    }
- 
--	    /* Add it to the hash pmid hash table. */
--	    if ((hp = __pmHashSearch((int)dp->pmid, &lcp->l_hashpmid)) != NULL) {
--		/*
--		 * This pmid is already in the hash table. Check for conflicts.
--		 */
--		olddp = (pmDesc *)hp->data;
--		if (dp->type != olddp->type) {
--		    sts = PM_ERR_LOGCHANGETYPE;
--		    free(dp);
--		    goto end;
--		}
--		if (dp->sem != olddp->sem) {
--		    sts = PM_ERR_LOGCHANGESEM;
--		    free(dp);
--		    goto end;
--		}
--		if (dp->indom != olddp->indom) {
--		    sts = PM_ERR_LOGCHANGEINDOM;
--		    free(dp);
--		    goto end;
--		}
--		if (dp->units.dimSpace != olddp->units.dimSpace ||
--		    dp->units.dimTime != olddp->units.dimTime ||
--		    dp->units.dimCount != olddp->units.dimCount ||
--		    dp->units.scaleSpace != olddp->units.scaleSpace ||
--		    dp->units.scaleTime != olddp->units.scaleTime ||
--		    dp->units.scaleCount != olddp->units.scaleCount) {
--		    sts = PM_ERR_LOGCHANGEUNITS;
--		    free(dp);
--		    goto end;
--		}
--                /*
--                 * This pmid is already known, and matches.  We can free the newly
--                 * read copy and use the one in the hash table. 
--                 */
--                free(dp);
--                dp = olddp;
--	    }
--	    else if ((sts = __pmHashAdd((int)dp->pmid, (void *)dp, &lcp->l_hashpmid)) < 0) {
--		free(dp);
-+	    /* swab desc */
-+	    desc.type = ntohl(desc.type);
-+	    desc.sem = ntohl(desc.sem);
-+	    desc.indom = __ntohpmInDom(desc.indom);
-+	    desc.units = __ntohpmUnits(desc.units);
-+	    desc.pmid = __ntohpmID(desc.pmid);
-+
-+	    if ((sts = __pmLogAddDesc(acp, &desc)) < 0)
- 		goto end;
--	    }
- 
- 	    /* read in the names & store in PMNS tree ... */
- 	    if ((n = (int)__pmFread(&numnames, 1, sizeof(numnames), f)) != 
- 		sizeof(numnames)) {
- 		if (pmDebugOptions.logmeta) {
--		    fprintf(stderr, "__pmLogLoadMeta: numnames read -> %d: expected: %d\n",
--			    n, (int)sizeof(numnames));
-+		    fprintf(stderr, "%s: numnames read -> %d: expected: %d\n",
-+			    "__pmLogLoadMeta", n, (int)sizeof(numnames));
- 		}
- 		if (__pmFerror(f)) {
- 		    __pmClearerr(f);
-@@ -711,8 +749,8 @@ PM_FAULT_POINT("libpcp/" __FILE__ ":2",
- 		if ((n = (int)__pmFread(&len, 1, sizeof(len), f)) != 
- 		    sizeof(len)) {
- 		    if (pmDebugOptions.logmeta) {
--			fprintf(stderr, "__pmLogLoadMeta: len name[%d] read -> %d: expected: %d\n",
--				i, n, (int)sizeof(len));
-+			fprintf(stderr, "%s: len name[%d] read -> %d: expected: %d\n",
-+				"__pmLogLoadMeta", i, n, (int)sizeof(len));
- 		    }
- 		    if (__pmFerror(f)) {
- 			__pmClearerr(f);
-@@ -729,8 +767,8 @@ PM_FAULT_POINT("libpcp/" __FILE__ ":2",
- 
- 		if ((n = (int)__pmFread(name, 1, len, f)) != len) {
- 		    if (pmDebugOptions.logmeta) {
--			fprintf(stderr, "__pmLogLoadMeta: name[%d] read -> %d: expected: %d\n",
--				i, n, len);
-+			fprintf(stderr, "%s: name[%d] read -> %d: expected: %d\n",
-+				"__pmLogLoadMeta", i, n, len);
- 		    }
- 		    if (__pmFerror(f)) {
- 			__pmClearerr(f);
-@@ -743,36 +781,23 @@ PM_FAULT_POINT("libpcp/" __FILE__ ":2",
- 		name[len] = '\0';
- 		if (pmDebugOptions.logmeta) {
- 		    char	strbuf[20];
--		    fprintf(stderr, "__pmLogLoadMeta: PMID: %s name: %s\n",
--			    pmIDStr_r(dp->pmid, strbuf, sizeof(strbuf)), name);
-+		    fprintf(stderr, "%s: PMID: %s name: %s\n",
-+			    "__pmLogLoadMeta",
-+			    pmIDStr_r(desc.pmid, strbuf, sizeof(strbuf)), name);
- 		}
--		/* Add the new PMNS node */
--		if ((sts = __pmAddPMNSNode(lcp->l_pmns, dp->pmid, name)) < 0) {
--		    /*
--		     * If we see a duplicate name with a different PMID, its a
--		     * recoverable error.
--		     * We wont be able to see all of the data in the log, but
--		     * its better to provide access to some rather than none,
--		     * esp. when only one or two metric IDs may be corrupted
--		     * in this way (which we may not be interested in anyway).
--		     */
--		    if (sts != PM_ERR_PMID)
--			goto end;
--		} 
-+
-+		/* Add the new PMNS node into this context */
-+		if ((sts = __pmLogAddPMNSNode(acp, desc.pmid, name)) < 0)
-+		    goto end;
- 	    }/*for*/
- 	}
- 	else if (h.type == TYPE_INDOM) {
--	    int			*tbuf;
--	    pmInDom		indom;
--	    pmTimeval		*when;
--	    int			numinst;
--	    int			*instlist;
--	    char		**namelist;
-+	    pmTimeval		*tv;
-+	    pmTimespec		when;
-+	    pmInResult		in;
- 	    char		*namebase;
--	    int			*stridx;
--	    int			i;
--	    int			k;
--	    int			allinbuf = 0;
-+	    int			*tbuf, *stridx;
-+	    int			i, k, allinbuf = 0;
- 
- PM_FAULT_POINT("libpcp/" __FILE__ ":3", PM_FAULT_ALLOC);
- 	    if ((tbuf = (int *)malloc(rlen)) == NULL) {
-@@ -781,8 +806,8 @@ PM_FAULT_POINT("libpcp/" __FILE__ ":3",
- 	    }
- 	    if ((n = (int)__pmFread(tbuf, 1, rlen, f)) != rlen) {
- 		if (pmDebugOptions.logmeta) {
--		    fprintf(stderr, "__pmLogLoadMeta: indom read -> %d: expected: %d\n",
--			    n, rlen);
-+		    fprintf(stderr, "%s: indom read -> %d: expected: %d\n",
-+			    "__pmLogLoadMeta", n, rlen);
- 		}
- 		if (__pmFerror(f)) {
- 		    __pmClearerr(f);
-@@ -795,44 +820,44 @@ PM_FAULT_POINT("libpcp/" __FILE__ ":3",
- 	    }
- 
- 	    k = 0;
--	    when = (pmTimeval *)&tbuf[k];
--	    when->tv_sec = ntohl(when->tv_sec);
--	    when->tv_usec = ntohl(when->tv_usec);
--	    k += sizeof(*when)/sizeof(int);
--	    indom = __ntohpmInDom((unsigned int)tbuf[k++]);
--	    numinst = ntohl(tbuf[k++]);
--	    if (numinst > 0) {
--		instlist = &tbuf[k];
--		k += numinst;
-+	    tv = (pmTimeval *)&tbuf[k];
-+	    when.tv_sec = ntohl(tv->tv_sec);
-+	    when.tv_nsec = ntohl(tv->tv_usec) * 1000;
-+	    k += sizeof(*tv)/sizeof(int);
-+	    in.indom = __ntohpmInDom((unsigned int)tbuf[k++]);
-+	    in.numinst = ntohl(tbuf[k++]);
-+	    if (in.numinst > 0) {
-+		in.instlist = &tbuf[k];
-+		k += in.numinst;
- 		stridx = &tbuf[k];
- #if defined(HAVE_32BIT_PTR)
--		namelist = (char **)stridx;
-+		in.namelist = (char **)stridx;
- 		allinbuf = 1; /* allocation is all in tbuf */
- #else
- 		allinbuf = 0; /* allocation for namelist + tbuf */
- 		/* need to allocate to hold the pointers */
- PM_FAULT_POINT("libpcp/" __FILE__ ":4", PM_FAULT_ALLOC);
--		namelist = (char **)malloc(numinst*sizeof(char*));
--		if (namelist == NULL) {
-+		in.namelist = (char **)malloc(in.numinst * sizeof(char*));
-+		if (in.namelist == NULL) {
- 		    sts = -oserror();
- 		    free(tbuf);
- 		    goto end;
- 		}
- #endif
--		k += numinst;
-+		k += in.numinst;
- 		namebase = (char *)&tbuf[k];
--	        for (i = 0; i < numinst; i++) {
--		    instlist[i] = ntohl(instlist[i]);
--	            namelist[i] = &namebase[ntohl(stridx[i])];
-+	        for (i = 0; i < in.numinst; i++) {
-+		    in.instlist[i] = ntohl(in.instlist[i]);
-+	            in.namelist[i] = &namebase[ntohl(stridx[i])];
- 		}
--		if ((sts = addindom(lcp, indom, when, numinst, instlist, namelist, tbuf, allinbuf)) < 0)
-+		if ((sts = __pmLogAddInDom(acp, &when, &in, tbuf, allinbuf)) < 0)
- 		    goto end;
- 		/* If this indom was a duplicate, then we need to free tbuf and
- 		   namelist, as appropriate. */
- 		if (sts == PMLOGPUTINDOM_DUP) {
- 		    free(tbuf);
--		    if (namelist != NULL && !allinbuf)
--			free(namelist);
-+		    if (in.namelist != NULL && !allinbuf)
-+			free(in.namelist);
- 		}
- 	    }
- 	    else {
-@@ -860,8 +885,8 @@ PM_FAULT_POINT("libpcp/" __FILE__ ":11",
- 	    }
- 	    if ((n = (int)__pmFread(tbuf, 1, rlen, f)) != rlen) {
- 		if (pmDebugOptions.logmeta) {
--		    fprintf(stderr, "__pmLogLoadMeta: label read -> %d: expected: %d\n",
--			    n, rlen);
-+		    fprintf(stderr, "%s: label read -> %d: expected: %d\n",
-+			    "__pmLogLoadMeta", n, rlen);
- 		}
- 		if (__pmFerror(f)) {
- 		    __pmClearerr(f);
-@@ -908,7 +933,8 @@ PM_FAULT_POINT("libpcp/" __FILE__ ":11",
- 
- 		if (jsonlen < 0 || jsonlen > PM_MAXLABELJSONLEN) {
- 		    if (pmDebugOptions.logmeta)
--			fprintf(stderr, "__pmLogLoadMeta: corrupted json in labelset. jsonlen=%d\n", jsonlen);
-+			fprintf(stderr, "%s: corrupted json in labelset. jsonlen=%d\n",
-+					"__pmLogLoadMeta", jsonlen);
- 		    sts = PM_ERR_LOGREC;
- 		    free(labelsets);
- 		    free(tbuf);
-@@ -935,7 +961,8 @@ PM_FAULT_POINT("libpcp/" __FILE__ ":11",
- 		    if (nlabels > PM_MAXLABELS || k + nlabels * sizeof(pmLabel) > rlen) {
- 			/* corrupt archive metadata detected. GH #475 */
- 			if (pmDebugOptions.logmeta)
--			    fprintf(stderr, "__pmLogLoadMeta: corrupted labelset. nlabels=%d\n", nlabels);
-+			    fprintf(stderr, "%s: corrupted labelset. nlabels=%d\n",
-+					    "__pmLogLoadMeta", nlabels);
- 			sts = PM_ERR_LOGREC;
- 			free(labelsets);
- 			free(tbuf);
-@@ -975,8 +1002,8 @@ PM_FAULT_POINT("libpcp/" __FILE__ ":16",
- 	    }
- 	    if ((n = (int)__pmFread(tbuf, 1, rlen, f)) != rlen) {
- 		if (pmDebugOptions.logmeta) {
--		    fprintf(stderr, "__pmLogLoadMeta: text read -> %d: expected: %d\n",
--			    n, rlen);
-+		    fprintf(stderr, "%s: text read -> %d: expected: %d\n",
-+				    "__pmLogLoadMeta", n, rlen);
- 		}
- 		if (__pmFerror(f)) {
- 		    __pmClearerr(f);
-@@ -1005,8 +1032,8 @@ PM_FAULT_POINT("libpcp/" __FILE__ ":16",
- 		ident = __ntohpmID(*((unsigned int *)&tbuf[k]));
- 	    else {
- 		if (pmDebugOptions.logmeta) {
--		    fprintf(stderr, "__pmLogLoadMeta: bad text ident -> %x\n",
--			    type);
-+		    fprintf(stderr, "%s: bad text ident -> %x\n",
-+				    "__pmLogLoadMeta", type);
- 		}
- 		free(tbuf);
- 		continue;
-@@ -1024,8 +1051,9 @@ PM_FAULT_POINT("libpcp/" __FILE__ ":16",
- 	check = ntohl(check);
- 	if (n != sizeof(check) || h.len != check) {
- 	    if (pmDebugOptions.logmeta) {
--		fprintf(stderr, "__pmLogLoadMeta: trailer read -> %d or len=%d: expected %d @ offset=%d\n",
--		    n, check, h.len, (int)(__pmFtell(f) - sizeof(check)));
-+		fprintf(stderr, "%s: trailer read -> %d or len=%d: "
-+				"expected %d @ offset=%d\n", "__pmLogLoadMeta",
-+			n, check, h.len, (int)(__pmFtell(f) - sizeof(check)));
- 	    }
- 	    if (__pmFerror(f)) {
- 		__pmClearerr(f);
-@@ -1046,7 +1074,7 @@ end:
-     if (sts == 0) {
- 	if (numpmid == 0) {
- 	    if (pmDebugOptions.logmeta) {
--		fprintf(stderr, "__pmLogLoadMeta: no metrics found?\n");
-+		fprintf(stderr, "%s: no metrics found?\n", "__pmLogLoadMeta");
- 	    }
- 	    sts = PM_ERR_LOGREC;
- 	}
-diff -Naurp pcp-5.0.2.orig/src/libpcp/src/logutil.c pcp-5.0.2/src/libpcp/src/logutil.c
---- pcp-5.0.2.orig/src/libpcp/src/logutil.c	2018-07-08 10:58:08.000000000 +1000
-+++ pcp-5.0.2/src/libpcp/src/logutil.c	2020-02-03 13:23:15.263762911 +1100
-@@ -1,5 +1,5 @@
- /*
-- * Copyright (c) 2012-2017 Red Hat.
-+ * Copyright (c) 2012-2017,2020 Red Hat.
-  * Copyright (c) 1995-2002,2004 Silicon Graphics, Inc.  All Rights Reserved.
-  * 
-  * This library is free software; you can redistribute it and/or modify it
-@@ -764,6 +764,22 @@ __pmLogClose(__pmArchCtl *acp)
- }
- 
- int
-+__pmLogAddVolume(__pmArchCtl *acp, unsigned int vol)
-+{
-+    __pmLogCtl	*lcp = acp->ac_log;
-+
-+    if (lcp->l_minvol == -1) {
-+	lcp->l_minvol = vol;
-+	lcp->l_maxvol = vol;
-+    } else if (vol < lcp->l_minvol) {
-+	lcp->l_minvol = vol;
-+    } else if (vol > lcp->l_maxvol) {
-+	lcp->l_maxvol = vol;
-+    }
-+    return 0;
-+}
-+
-+int
- __pmLogLoadLabel(__pmArchCtl *acp, const char *name)
- {
-     __pmLogCtl	*lcp = acp->ac_log;
-@@ -876,21 +892,14 @@ __pmLogLoadLabel(__pmArchCtl *acp, const
- 		}
- 	    }
- 	    else {
--		char	*q;
--		int	vol;
--		vol = (int)strtol(tp, &q, 10);
-+		char		*q;
-+		unsigned int	vol;
-+
-+		vol = (unsigned int)strtoul(tp, &q, 10);
- 		if (*q == '\0') {
- 		    exists = 1;
--		    if (lcp->l_minvol == -1) {
--			lcp->l_minvol = vol;
--			lcp->l_maxvol = vol;
--		    }
--		    else {
--			if (vol < lcp->l_minvol)
--			    lcp->l_minvol = vol;
--			if (vol > lcp->l_maxvol)
--			    lcp->l_maxvol = vol;
--		    }
-+		    if ((sts = __pmLogAddVolume(acp, vol)) < 0)
-+			goto cleanup;
- 		}
- 	    }
- 	}
-@@ -2282,7 +2291,7 @@ __pmLogSetTime(__pmContext *ctxp)
- 	int		match = 0;
- 	int		vol;
- 	int		numti = lcp->l_numti;
--	__pmFILE		*f;
-+	__pmFILE	*f;
- 	__pmLogTI	*tip = lcp->l_ti;
- 	double		t_lo;
- 	struct stat	sbuf;
-diff -Naurp pcp-5.0.2.orig/src/libpcp_web/src/discover.c pcp-5.0.2/src/libpcp_web/src/discover.c
---- pcp-5.0.2.orig/src/libpcp_web/src/discover.c	2019-12-10 17:04:20.000000000 +1100
-+++ pcp-5.0.2/src/libpcp_web/src/discover.c	2020-02-03 13:36:11.958637560 +1100
-@@ -1,5 +1,5 @@
- /*
-- * Copyright (c) 2018-2019 Red Hat.
-+ * Copyright (c) 2018-2020 Red Hat.
-  *
-  * This library is free software; you can redistribute it and/or modify it
-  * under the terms of the GNU Lesser General Public License as published
-@@ -14,6 +14,8 @@
- #include "discover.h"
- #include "slots.h"
- #include "util.h"
-+#include <dirent.h>
-+#include <sys/stat.h>
- 
- /* Decode various archive metafile records (desc, indom, labels, helptext) */
- static int pmDiscoverDecodeMetaDesc(uint32_t *, int, pmDesc *, int *, char ***);
-@@ -24,11 +26,15 @@ static int pmDiscoverDecodeMetaLabelSet(
- /* array of registered callbacks, see pmDiscoverSetup() */
- static int discoverCallBackTableSize;
- static pmDiscoverCallBacks **discoverCallBackTable;
-+static char *pmDiscoverFlagsStr(pmDiscover *);
- 
- /* internal hash table of discovered paths */
--#define PM_DISCOVER_HASHTAB_SIZE 64
-+#define PM_DISCOVER_HASHTAB_SIZE 16
- static pmDiscover *discover_hashtable[PM_DISCOVER_HASHTAB_SIZE];
- 
-+/* pmlogger_daily log-roll lock count */
-+static int lockcnt = 0;
-+
- /* FNV string hash algorithm. Return unsigned in range 0 .. limit-1 */
- static unsigned int
- strhash(const char *s, unsigned int limit)
-@@ -43,18 +49,38 @@ strhash(const char *s, unsigned int limi
-     return h % limit;
- }
- 
-+/* ctime string - note static buf is returned */
-+static char *
-+stamp(void)
-+{
-+    time_t now = time(NULL);
-+    char *p, *c = ctime(&now);
-+
-+    if ((p = strrchr(c, '\n')) != NULL)
-+    	*p = '\0';
-+    return c;
-+}
-+
- /*
-- * Lookup or Add a discovered file path (directory or PCP archive file)
-+ * Lookup or Add a discovered file path (directory or PCP archive file).
-+ * Note: the fullpath suffix (.meta, .[0-9]+) should already be stripped.
-  * Return path table entry (new or existing).
-  */
- static pmDiscover *
--pmDiscoverLookupAdd(const char *path, pmDiscoverModule *module, void *arg)
-+pmDiscoverLookupAdd(const char *fullpath, pmDiscoverModule *module, void *arg)
- {
-     pmDiscover		*p, *h;
--    unsigned int	k = strhash(path, PM_DISCOVER_HASHTAB_SIZE);
-+    unsigned int	k;
-+    sds			name;
-+
-+    name = sdsnew(fullpath);
-+    k = strhash(name, PM_DISCOVER_HASHTAB_SIZE);
-+
-+    if (pmDebugOptions.discovery)
-+	fprintf(stderr, "pmDiscoverLookupAdd: name=%s\n", name);
- 
-     for (p = NULL, h = discover_hashtable[k]; h != NULL; p = h, h = h->next) {
--    	if (strcmp(h->context.name, path) == 0)
-+    	if (sdscmp(h->context.name, name) == 0)
- 	    break;
-     }
- 
-@@ -65,14 +91,24 @@ pmDiscoverLookupAdd(const char *path, pm
- 	h->ctx = -1; /* no PMAPI context initially */
- 	h->flags = PM_DISCOVER_FLAGS_NEW;
- 	h->context.type = PM_CONTEXT_ARCHIVE;
--	h->context.name = sdsnew(path);
-+	h->context.name = name;
- 	h->module = module;
- 	h->data = arg;
- 	if (p == NULL)
- 	    discover_hashtable[k] = h;
- 	else
- 	    p->next = h;
-+	if (pmDebugOptions.discovery)
-+	    fprintf(stderr, "pmDiscoverLookupAdd: --> new entry %s\n", name);
-+
-+    }
-+    else {
-+	/* already in hash table, so free the buffer */
-+	if (pmDebugOptions.discovery)
-+	    fprintf(stderr, "pmDiscoverLookupAdd: --> existing entry %s\n", name);
-+    	sdsfree(name);
-     }
-+
-     return h;
- }
- 
-@@ -82,12 +118,6 @@ pmDiscoverLookup(const char *path)
-     return pmDiscoverLookupAdd(path, NULL, NULL);
- }
- 
--static pmDiscover *
--pmDiscoverAdd(const char *path, pmDiscoverModule *module, void *arg)
--{
--    return pmDiscoverLookupAdd(path, module, arg);
--}
--
- static void
- pmDiscoverFree(pmDiscover *p)
- {
-@@ -101,39 +131,42 @@ pmDiscoverFree(pmDiscover *p)
- 	sdsfree(p->context.source);
-     if (p->context.labelset)
- 	pmFreeLabelSets(p->context.labelset, 1);
-+    if (p->event_handle) {
-+	uv_fs_event_stop(p->event_handle);
-+	free(p->event_handle);
-+	p->event_handle = NULL;
-+    }
-+
-     memset(p, 0, sizeof(*p));
-     free(p);
- }
- 
- /*
-- * Delete tracking of a previously discovered path. Frees resources and
-- * destroy PCP context (if any).
-+ * Traverse and invoke callback for all paths matching any bit
-+ * in the flags bitmap. Callback can be NULL to just get a count.
-+ * Return count of matching paths, may be 0.
-  */
--static void
--pmDiscoverDelete(sds path)
-+static int
-+pmDiscoverTraverse(unsigned int flags, void (*callback)(pmDiscover *))
- {
--    pmDiscover		*p, *h;
--    unsigned int	k = strhash(path, PM_DISCOVER_HASHTAB_SIZE);
-+    int			count = 0, i;
-+    pmDiscover		*p;
- 
--    for (p = NULL, h = discover_hashtable[k]; h != NULL; p = h, h = h->next) {
--    	if (sdscmp(h->context.name, path) == 0) {
--	    if (p == NULL)
--	    	discover_hashtable[k] = NULL;
--	    else
--	    	p->next = h->next;
--	    pmDiscoverFree(h);
--	    break;
-+    for (i = 0; i < PM_DISCOVER_HASHTAB_SIZE; i++) {
-+    	for (p = discover_hashtable[i]; p; p = p->next) {
-+	    if (p->flags & flags) {
-+		if (callback)
-+		    callback(p);
-+		count++;
-+	    }
- 	}
-     }
-+    return count;
- }
- 
--/*
-- * Traverse and invoke callback for all paths matching any bit
-- * in the flags bitmap. Callback can be NULL to just get a count.
-- * Return count of matching paths, may be 0.
-- */
-+/* as above, but with an extra (void *)arg passed to the cb */
- static int
--pmDiscoverTraverse(unsigned int flags, void (*callback)(pmDiscover *))
-+pmDiscoverTraverseArg(unsigned int flags, void (*callback)(pmDiscover *, void *), void *arg)
- {
-     int			count = 0, i;
-     pmDiscover		*p;
-@@ -142,7 +175,7 @@ pmDiscoverTraverse(unsigned int flags, v
-     	for (p = discover_hashtable[i]; p; p = p->next) {
- 	    if (p->flags & flags) {
- 		if (callback)
--		    callback(p);
-+		    callback(p, arg);
- 		count++;
- 	    }
- 	}
-@@ -150,6 +183,7 @@ pmDiscoverTraverse(unsigned int flags, v
-     return count;
- }
- 
-+
- /*
-  * Traverse and purge deleted entries
-  * Return count of purged entries.
-@@ -173,6 +207,9 @@ pmDiscoverPurgeDeleted(void)
- 		    prev->next = next;
- 		else
- 		    discover_hashtable[i] = next;
-+		if (pmDebugOptions.discovery)
-+		    fprintf(stderr, "pmDiscoverPurgeDeleted: deleted %s %s\n",
-+		    	p->context.name, pmDiscoverFlagsStr(p));
- 		pmDiscoverFree(p);
- 		count++;
- 	    }
-@@ -180,14 +217,32 @@ pmDiscoverPurgeDeleted(void)
- 	}
-     }
- 
--    if (pmDebugOptions.discovery)
--	fprintf(stderr, "%s: purged %d entries\n",
--			"pmDiscoverPurgeDeleted", count);
--
-     return count;
- }
- 
- /*
-+ * if string ends with given suffix then return pointer
-+ * to start of suffix in string, else NULL
-+ */
-+static char *
-+strsuffix(char *s, const char *suffix)
-+{
-+    int slen, suflen;
-+    char *ret = NULL;
-+
-+    if (s && suffix) {
-+    	slen = strlen(s);
-+	suflen = strlen(suffix);
-+	if (slen >= suflen) {
-+	    ret = s + (slen - suflen);
-+	    if (strncmp(ret, suffix, suflen) != 0)
-+	    	ret = NULL;
-+	}
-+    }
-+    return ret;
-+}
-+
-+/*
-  * Discover dirs and archives - add new entries or refresh existing.
-  * Call this for each top-level directory. Discovered paths are not
-  * automatically monitored. After discovery, need to traverse and
-@@ -196,44 +251,88 @@ pmDiscoverPurgeDeleted(void)
- static int
- pmDiscoverArchives(const char *dir, pmDiscoverModule *module, void *arg)
- {
--    uv_fs_t		sreq, req;
--    uv_dirent_t		dent;
--    uv_stat_t		*s;
-+    DIR			*dirp;
-+    struct dirent	*dent;
-+    struct stat		*s;
-+    struct stat		statbuf;
-     pmDiscover		*a;
-+    char		*suffix;
-     char		path[MAXNAMELEN];
--    char		basepath[MAXNAMELEN];
-     int			sep = pmPathSeparator();
-+    int			vol;
-+
-+    /*
-+     * note: pmDiscoverLookupAdd sets PM_DISCOVER_FLAGS_NEW
-+     * if this is a newly discovered archive or directory
-+     */
-+    a = pmDiscoverLookupAdd(dir, module, arg);
-+    a->flags |= PM_DISCOVER_FLAGS_DIRECTORY;
- 
--    if (uv_fs_scandir(NULL, &req, dir, 0, NULL) < 0)
-+    if ((dirp = opendir(dir)) == NULL) {
-+	if (pmDebugOptions.discovery)
-+	    fprintf(stderr, "pmDiscoverArchives: opendir %s failed %s: err %d\n", dir, path, errno);
- 	return -ESRCH;
-+    }
- 
--    a = pmDiscoverAdd(dir, module, arg);
--    a->flags |= PM_DISCOVER_FLAGS_DIRECTORY;
-+    while ((dent = readdir(dirp)) != NULL) {
-+	if (dent->d_name[0] == '.')
-+	    continue;
-+	pmsprintf(path, sizeof(path), "%s%c%s", dir, sep, dent->d_name);
-+
-+	if (pmDebugOptions.discovery)
-+	    fprintf(stderr, "pmDiscoverArchives: readdir found %s\n", path);
- 
--    while (uv_fs_scandir_next(&req, &dent) != UV_EOF) {
--	pmsprintf(path, sizeof(path), "%s%c%s", dir, sep, dent.name);
--	if (uv_fs_stat(NULL, &sreq, path, NULL) < 0)
-+	if (stat(path, &statbuf) < 0) {
-+	    if (pmDebugOptions.discovery)
-+		fprintf(stderr, "pmDiscoverArchives: stat failed %s, err %d\n", path, errno);
- 	    continue;
--	s = &sreq.statbuf;
--	strncpy(basepath, path, sizeof(basepath)); /* __pmLogBaseName modifies it's argument */
--	if (S_ISREG(s->st_mode) && __pmLogBaseName(basepath) != NULL) {
--	    /*
--	     * An archive file (index, meta or data vol). If compressed, then
--	     * it is read-only and we don't have to monitor it for growth.
--	     */
--	    a = pmDiscoverAdd(path, module, arg);
--	    a->flags &= ~PM_DISCOVER_FLAGS_DELETED;
-+	}
- 
--	    if (strstr(path, ".meta"))
--	    	a->flags |= PM_DISCOVER_FLAGS_META;
--	    else if (strstr(path, ".index"))
--	    	a->flags |= PM_DISCOVER_FLAGS_INDEX;
--	    else
--	    	a->flags |= PM_DISCOVER_FLAGS_DATAVOL;
--
--	    /* compare to libpcp io.c for suffix list */
--	    if (strstr(path, ".xz") || strstr(path, ".gz"))
--	    	a->flags |= PM_DISCOVER_FLAGS_COMPRESSED;
-+	s = &statbuf;
-+	if (S_ISREG(s->st_mode)) {
-+	    if ((suffix = strsuffix(path, ".meta")) != NULL) {
-+		/*
-+		 * An uncompressed PCP archive meta file. Track the meta
-+		 * file - the matching logvol filename varies because logvols
-+		 * are periodically rolled by pmlogger. Importantly, process all
-+		 * available metadata to EOF before processing any logvol data.
-+		 */
-+		*suffix = '\0'; /* strip suffix from path giving archive name */
-+		a = pmDiscoverLookupAdd(path, module, arg);
-+
-+		/*
-+		 * note: pmDiscoverLookupAdd sets PM_DISCOVER_FLAGS_NEW
-+		 * if this is a newly discovered archive, otherwise we're
-+		 * already tracking this archive.
-+		 */
-+		a->flags |= PM_DISCOVER_FLAGS_META;
-+	    }
-+	    else if ((suffix = __pmLogBaseNameVol(path, &vol)) != NULL && vol >= 0) {
-+		/*
-+		 * An archive logvol. This logvol may have been created since
-+		 * the context was first opened. Update the context maxvol
-+		 * to be sure pmFetchArchive can switch to it in due course.
-+		 */
-+		if ((a = pmDiscoverLookup(path)) != NULL) {
-+		    a->flags |= PM_DISCOVER_FLAGS_DATAVOL;
-+		    /* ensure archive context knows about this volume */
-+		    if (pmDebugOptions.discovery)
-+			fprintf(stderr, "pmDiscoverArchives: found logvol %s %s vol=%d\n",
-+			    a->context.name, pmDiscoverFlagsStr(a), vol);
-+		    if (a->ctx >= 0 && vol >= 0) {
-+			__pmContext *ctxp = __pmHandleToPtr(a->ctx);
-+			__pmArchCtl *acp = ctxp->c_archctl;
-+
-+		    	__pmLogAddVolume(acp, vol);
-+			PM_UNLOCK(ctxp->c_lock);
-+		    }
-+		    if (pmDebugOptions.discovery)
-+			fprintf(stderr, "pmDiscoverArchives: added logvol %s %s vol=%d\n",
-+			    a->context.name, pmDiscoverFlagsStr(a), vol);
-+		}
-+	    } else if (pmDebugOptions.discovery) {
-+		fprintf(stderr, "pmDiscoverArchives: ignored regular file %s\n", path);
-+	    }
- 	}
- 	else if (S_ISDIR(s->st_mode)) {
- 	    /*
-@@ -241,29 +340,117 @@ pmDiscoverArchives(const char *dir, pmDi
- 	     */
- 	    pmDiscoverArchives(path, module, arg);
- 	}
--	uv_fs_req_cleanup(&sreq);
-     }
--    uv_fs_req_cleanup(&req);
-+    if (dirp)
-+	closedir(dirp);
- 
-     /* success */
-     return 0;
- }
- 
-+/*
-+ * Return 1 if monitored path has been deleted.
-+ * For archives, we only check the meta file because
-+ * a logvol can be deleted (e.g. via compression when
-+ * the logvol is rolled to a new volume) without
-+ * actually deleting the archive.
-+ */
-+static int
-+is_deleted(pmDiscover *p, struct stat *sbuf)
-+{
-+    int			ret = 0;
-+
-+    if (p->flags & PM_DISCOVER_FLAGS_DIRECTORY) {
-+	if (stat(p->context.name, sbuf) < 0)
-+	    ret = 1; /* directory has been deleted */
-+    }
-+
-+    if (p->flags & (PM_DISCOVER_FLAGS_META|PM_DISCOVER_FLAGS_DATAVOL)) {
-+    	sds meta = sdsnew(p->context.name);
-+	meta = sdscat(meta, ".meta");
-+	if (stat(meta, sbuf) < 0) {
-+	    /*
-+	     * Archive metadata file has been deleted (or compressed)
-+	     * hence consider the archive to be deleted because there
-+	     * is no more data to logtail.
-+	     */
-+	    ret = 1;
-+	}
-+	sdsfree(meta);
-+    }
-+
-+    if (pmDebugOptions.discovery) {
-+	fprintf(stderr, "is_deleted: checking %s (%s) -> %s\n",
-+		p->context.name, pmDiscoverFlagsStr(p), ret ? "DELETED" : "no");
-+    }
-+
-+    return ret;
-+}
-+
-+static void
-+logdir_is_locked_callBack(pmDiscover *p, void *arg)
-+{
-+    int			*cntp = (int *)arg;
-+    char		sep = pmPathSeparator();
-+    char		path[MAXNAMELEN];
-+
-+    pmsprintf(path, sizeof(path), "%s%c%s", p->context.name, sep, "lock");
-+    if (access(path, F_OK) == 0)
-+    	(*cntp)++;
-+}
-+
-+static void
-+check_deleted(pmDiscover *p)
-+{
-+    struct stat sbuf;
-+    if (!(p->flags & PM_DISCOVER_FLAGS_DELETED) && is_deleted(p, &sbuf))
-+    	p->flags |= PM_DISCOVER_FLAGS_DELETED;
-+}
-+
- static void
- fs_change_callBack(uv_fs_event_t *handle, const char *filename, int events, int status)
- {
-     char		buffer[MAXNAMELEN];
-     size_t		bytes = sizeof(buffer) - 1;
-     pmDiscover		*p;
--    uv_fs_t		sreq;
-+    char		*s;
-     sds			path;
--    int			path_changed = 0;
-+    int			count = 0;
-+    struct stat		statbuf;
-+
-+    /*
-+     * check if logs are currently being rolled by pmlogger_daily et al
-+     * in any of the directories we are tracking. For mutex, the log control
-+     * scripts use a 'lock' file in each directory as it is processed.
-+     */
-+    pmDiscoverTraverseArg(PM_DISCOVER_FLAGS_DIRECTORY,
-+    	logdir_is_locked_callBack, (void *)&count);
-+
-+    if (lockcnt == 0 && count > 0) {
-+	/* log-rolling has started */
-+    	fprintf(stderr, "%s discovery callback ignored: log-rolling is now in progress\n", stamp());
-+	lockcnt = count;
-+	return;
-+    }
-+
-+    if (lockcnt > 0 && count > 0) {
-+	/* log-rolling is still in progress */
-+	lockcnt = count;
-+	return;
-+    }
-+
-+    if (lockcnt > 0 && count == 0) {
-+    	/* log-rolling is finished: check what got deleted, and then purge */
-+    	fprintf(stderr, "%s discovery callback: finished log-rolling\n", stamp());
-+	pmDiscoverTraverse(PM_DISCOVER_FLAGS_META|PM_DISCOVER_FLAGS_DATAVOL, check_deleted);
-+    }
-+    lockcnt = count;
- 
-     uv_fs_event_getpath(handle, buffer, &bytes);
-     path = sdsnewlen(buffer, bytes);
- 
-     if (pmDebugOptions.discovery) {
--	fprintf(stderr, "%s: event on %s -", "fs_change_callBack", path);
-+	fprintf(stderr, "fs_change_callBack: event on %s -", path);
- 	if (events & UV_RENAME)
- 	    fprintf(stderr, " renamed");
- 	if (events & UV_CHANGE)
-@@ -271,38 +458,40 @@ fs_change_callBack(uv_fs_event_t *handle
- 	fputc('\n', stderr);
-     }
- 
-+    	
-     /*
--     * Lookup the path, stat and update it's flags accordingly. If the
--     * path has been deleted, stop it's event monitor and free the req buffer.
--     * Then call the pmDiscovery callback.
-+     * Strip ".meta" suffix (if any) and lookup the path. stat and update it's
-+     * flags accordingly. If the path has been deleted, stop it's event monitor
-+     * and free the req buffer, else call the pmDiscovery callback.
-      */
--    if ((p = pmDiscoverLookup(path)) == NULL) {
-+    if ((s = strsuffix(path, ".meta")) != NULL)
-+	*s = '\0';
-+
-+    p = pmDiscoverLookup(path);
-+    if (p && pmDebugOptions.discovery) {
-+	fprintf(stderr, "fs_change_callBack: ---> found entry %s (%s)\n",
-+		p->context.name, pmDiscoverFlagsStr(p));
-+    }
-+
-+    if (p == NULL) {
- 	if (pmDebugOptions.discovery)
--	    fprintf(stderr, "%s: filename %s lookup failed\n",
--		    "fs_change_callBack", filename);
-+	    fprintf(stderr, "fs_change_callBack: %s lookup failed\n", filename);
-     }
--    else if (uv_fs_stat(NULL, &sreq, p->context.name, NULL) < 0) {
--    	p->flags |= PM_DISCOVER_FLAGS_DELETED;
--	if (p->event_handle) {
--	    uv_fs_event_stop(p->event_handle);
--	    free(p->event_handle);
--	    p->event_handle = NULL;
--	}
-+    else if (is_deleted(p, &statbuf)) {
- 	/* path has been deleted. statbuf is invalid */
-+    	p->flags |= PM_DISCOVER_FLAGS_DELETED;
- 	memset(&p->statbuf, 0, sizeof(p->statbuf));
--	path_changed = 1;
--    }
--    else {
--	/* avoid spurious events. only call the callBack if it really changed */
--	if (p->statbuf.st_mtim.tv_sec != sreq.statbuf.st_mtim.tv_sec ||
--	    p->statbuf.st_mtim.tv_nsec != sreq.statbuf.st_mtim.tv_nsec)
--	    path_changed = 1;
--	p->statbuf = sreq.statbuf; /* struct copy */
--	uv_fs_req_cleanup(&sreq);
-+	if (pmDebugOptions.discovery)
-+	    fprintf(stderr, "fs_change_callBack: %s (%s) has been deleted",
-+	    	p->context.name, pmDiscoverFlagsStr(p));
-     }
- 
--    if (p && p->changed && path_changed && !(p->flags & PM_DISCOVER_FLAGS_DELETED))
--	p->changed(p);
-+    /*
-+     * Something in the directory changed - new or deleted archive, or
-+     * a tracked archive meta data file or logvolume grew
-+     */
-+    if (p)
-+	p->changed(p); /* returns immediately if PM_DISCOVER_FLAGS_DELETED */
- 
-     sdsfree(path);
- }
-@@ -316,9 +505,14 @@ pmDiscoverMonitor(sds path, void (*callb
- {
-     discoverModuleData	*data;
-     pmDiscover		*p;
-+    sds			eventfilename;
- 
--    if ((p = pmDiscoverLookup(path)) == NULL)
-+    if ((p = pmDiscoverLookup(path)) == NULL) {
-+	if (pmDebugOptions.discovery) {
-+	    fprintf(stderr, "pmDiscoverMonitor: lookup failed for %s\n", path);
-+	}
- 	return -ESRCH;
-+    }
-     data = getDiscoverModuleData(p->module);
- 
-     /* save the discovery callback to be invoked */
-@@ -330,9 +524,29 @@ pmDiscoverMonitor(sds path, void (*callb
- 	 * Start monitoring, using given uv loop. Up to the caller to create
- 	 * a PCP PMAPI context and to fetch/logtail in the changed callback.
- 	 */
-+	eventfilename = sdsnew(p->context.name);
- 	uv_fs_event_init(data->events, p->event_handle);
--	uv_fs_event_start(p->event_handle, fs_change_callBack, p->context.name,
-+
-+	if (p->flags & PM_DISCOVER_FLAGS_DIRECTORY) {
-+	    uv_fs_event_start(p->event_handle, fs_change_callBack, eventfilename,
-+			    UV_FS_EVENT_WATCH_ENTRY);
-+	}
-+	else {
-+	    /*
-+	     * Monitor an archive file. This tracks the archive meta file
-+	     * but the change callback processes both meta and logvol on
-+	     * every callback (meta before logvol).
-+	     */
-+	    eventfilename = sdscat(eventfilename, ".meta");
-+	    uv_fs_event_start(p->event_handle, fs_change_callBack, eventfilename,
- 			UV_FS_EVENT_WATCH_ENTRY);
-+	}
-+
-+	if (pmDebugOptions.discovery) {
-+	    fprintf(stderr, "pmDiscoverMonitor: added event for %s (%s)\n",
-+	    	eventfilename, pmDiscoverFlagsStr(p));
-+	}
-+	sdsfree(eventfilename);
-     }
- 
-     return 0;
-@@ -411,41 +625,23 @@ static void changed_callback(pmDiscover
- static void
- created_callback(pmDiscover *p)
- {
-+    if (p->flags & (PM_DISCOVER_FLAGS_COMPRESSED|PM_DISCOVER_FLAGS_INDEX))
-+    	return; /* compressed archives don't grow and we ignore archive index files */
-+
-     if (pmDebugOptions.discovery)
- 	fprintf(stderr, "CREATED %s, %s\n", p->context.name, pmDiscoverFlagsStr(p));
- 
--    p->flags &= ~PM_DISCOVER_FLAGS_NEW;
--
--    if (p->flags & PM_DISCOVER_FLAGS_COMPRESSED)
--    	return; /* compressed archives don't grow */
--
-     if (p->flags & PM_DISCOVER_FLAGS_DIRECTORY) {
- 	if (pmDebugOptions.discovery)
- 	    fprintf(stderr, "MONITOR directory %s\n", p->context.name);
- 	pmDiscoverMonitor(p->context.name, changed_callback);
-     }
--
--    if (p->flags & PM_DISCOVER_FLAGS_DATAVOL) {
-+    else if (p->flags & (PM_DISCOVER_FLAGS_META|PM_DISCOVER_FLAGS_DATAVOL)) {
- 	if (pmDebugOptions.discovery)
--	    fprintf(stderr, "MONITOR logvol %s\n", p->context.name);
-+	    fprintf(stderr, "MONITOR archive %s\n", p->context.name);
- 	pmDiscoverMonitor(p->context.name, changed_callback);
-     }
--
--    if (p->flags & PM_DISCOVER_FLAGS_META) {
--	if (pmDebugOptions.discovery)
--	    fprintf(stderr, "MONITOR metadata %s\n", p->context.name);
--	pmDiscoverMonitor(p->context.name, changed_callback);
--    }
--}
--
--static void
--deleted_callback(pmDiscover *p)
--{
--    if (pmDebugOptions.discovery)
--	fprintf(stderr, "DELETED %s (%s)\n", p->context.name,
--			pmDiscoverFlagsStr(p));
--    pmDiscoverDelete(p->context.name);
--    /* p is now no longer valid */
-+    p->flags &= ~PM_DISCOVER_FLAGS_NEW;
- }
- 
- static void
-@@ -509,37 +705,84 @@ static void
- pmDiscoverInvokeMetricCallBacks(pmDiscover *p, pmTimespec *ts, pmDesc *desc,
- 		int numnames, char **names)
- {
-+    discoverModuleData	*data = getDiscoverModuleData(p->module);
-     pmDiscoverCallBacks	*callbacks;
-     pmDiscoverEvent	event;
-     char		buf[32];
--    int			i;
-+    int			i, sts;
- 
-     if (pmDebugOptions.discovery) {
- 	fprintf(stderr, "%s[%s]: %s name%s", "pmDiscoverInvokeMetricCallBacks",
- 			timespec_str(ts, buf, sizeof(buf)),
- 			p->context.source, numnames > 0 ? " " : "(none)\n");
- 	for (i = 0; i < numnames; i++)
--	    printf("\"%s\"%s", names[i], i < numnames - 1 ? ", " : "\n");
-+	    fprintf(stderr, "[%u/%u] \"%s\"%s", i+1, numnames, names[i],
-+			    i < numnames - 1 ? ", " : "\n");
- 	pmPrintDesc(stderr, desc);
- 	if (pmDebugOptions.labels)
- 	    fprintf(stderr, "context labels %s\n", p->context.labelset->json);
-     }
- 
-+    if (data->pmids) {
-+	if (dictFind(data->pmids, &desc->pmid) != NULL)
-+	    goto out;	/* metric contains an already excluded PMID */
-+	for (i = 0; i < numnames; i++) {
-+	    if (regexec(&data->exclude_names, names[i], 0, NULL, 0) == 0)
-+		break;
-+	}
-+	if (i != numnames) {
-+	    if (pmDebugOptions.discovery)
-+		fprintf(stderr, "%s: excluding metric %s\n",
-+				"pmDiscoverInvokeMetricCallBacks", names[i]);
-+	    /* add this pmid to the exclusion list and return early */
-+	    dictAdd(data->pmids, &desc->pmid, NULL);
-+	    goto out;
-+	}
-+    }
-+    if (data->indoms) {
-+	if (dictFind(data->indoms, &desc->indom) != NULL)
-+	    goto out;	/* metric contains an already excluded InDom */
-+    }
-+
-+    if (p->ctx >= 0 && p->context.type == PM_CONTEXT_ARCHIVE) {
-+	__pmContext	*ctxp = __pmHandleToPtr(p->ctx);
-+	__pmArchCtl	*acp = ctxp->c_archctl;
-+	char		idstr[32];
-+
-+	if ((sts = __pmLogAddDesc(acp, desc)) < 0)
-+	    fprintf(stderr, "%s: failed to add metric descriptor for %s\n",
-+			    "pmDiscoverInvokeMetricCallBacks",
-+			    pmIDStr_r(desc->pmid, idstr, sizeof(idstr)));
-+	for (i = 0; i < numnames; i++) {
-+	    if ((sts = __pmLogAddPMNSNode(acp, desc->pmid, names[i])) < 0)
-+		fprintf(stderr, "%s: failed to add metric name %s for %s\n",
-+				"pmDiscoverInvokeMetricCallBacks", names[i],
-+				pmIDStr_r(desc->pmid, idstr, sizeof(idstr)));
-+	}
-+	PM_UNLOCK(ctxp->c_lock);
-+    }
-+
-     discover_event_init(p, ts, &event);
-     for (i = 0; i < discoverCallBackTableSize; i++) {
- 	if ((callbacks = discoverCallBackTable[i]) &&
- 	    callbacks->on_metric != NULL)
- 	    callbacks->on_metric(&event, desc, numnames, names, p->data);
-     }
-+
-+out:
-+    for (i = 0; i < numnames; i++)
-+	free(names[i]);
-+    free(names);
- }
- 
- static void
- pmDiscoverInvokeInDomCallBacks(pmDiscover *p, pmTimespec *ts, pmInResult *in)
- {
-+    discoverModuleData	*data = getDiscoverModuleData(p->module);
-     pmDiscoverCallBacks	*callbacks;
-     pmDiscoverEvent	event;
-     char		buf[32], inbuf[32];
--    int			i;
-+    int			i, sts = PMLOGPUTINDOM_DUP; /* free after callbacks */
- 
-     if (pmDebugOptions.discovery) {
- 	fprintf(stderr, "%s[%s]: %s numinst %d indom %s\n",
-@@ -551,22 +794,48 @@ pmDiscoverInvokeInDomCallBacks(pmDiscove
- 	    fprintf(stderr, "context labels %s\n", p->context.labelset->json);
-     }
- 
-+    if (data->indoms) {
-+	if (dictFind(data->indoms, &in->indom) != NULL)
-+	    goto out;	/* excluded InDom */
-+    }
-+
-+    if (p->ctx >= 0 && p->context.type == PM_CONTEXT_ARCHIVE) {
-+	__pmContext	*ctxp = __pmHandleToPtr(p->ctx);
-+	__pmArchCtl	*acp = ctxp->c_archctl;
-+	char		errmsg[PM_MAXERRMSGLEN];
-+
-+	if ((sts = __pmLogAddInDom(acp, ts, in, NULL, 0)) < 0)
-+	    fprintf(stderr, "%s: failed to add indom for %s: %s\n",
-+			"pmDiscoverInvokeInDomCallBacks", pmIDStr(in->indom),
-+			pmErrStr_r(sts, errmsg, sizeof(errmsg)));
-+	PM_UNLOCK(ctxp->c_lock);
-+    }
-+
-     discover_event_init(p, ts, &event);
-     for (i = 0; i < discoverCallBackTableSize; i++) {
- 	if ((callbacks = discoverCallBackTable[i]) &&
- 	    callbacks->on_indom != NULL)
- 	    callbacks->on_indom(&event, in, p->data);
-     }
-+
-+out:
-+    if (sts == PMLOGPUTINDOM_DUP) {
-+	for (i = 0; i < in->numinst; i++)
-+	    free(in->namelist[i]);
-+	free(in->namelist);
-+	free(in->instlist);
-+    }
- }
- 
- static void
- pmDiscoverInvokeLabelsCallBacks(pmDiscover *p, pmTimespec *ts,
- 		int ident, int type, pmLabelSet *sets, int nsets)
- {
-+    discoverModuleData	*data = getDiscoverModuleData(p->module);
-     pmDiscoverCallBacks	*callbacks;
-     pmDiscoverEvent	event;
-     char		buf[32], idbuf[64];
--    int			i;
-+    int			i, sts = -EAGAIN; /* free labelsets after callbacks */
- 
-     if (pmDebugOptions.discovery) {
- 	__pmLabelIdentString(ident, type, idbuf, sizeof(idbuf));
-@@ -579,22 +848,48 @@ pmDiscoverInvokeLabelsCallBacks(pmDiscov
- 	    fprintf(stderr, "context labels %s\n", p->context.labelset->json);
-     }
- 
-+    if ((type & PM_LABEL_ITEM) && data->pmids) {
-+	if (dictFind(data->pmids, &ident) != NULL)
-+	    goto out;	/* text from an already excluded InDom */
-+    }
-+    if ((type & (PM_LABEL_INDOM|PM_LABEL_INSTANCES)) && data->indoms) {
-+	if (dictFind(data->indoms, &ident) != NULL)
-+	    goto out;	/* text from an already excluded InDom */
-+    }
-+
-+    if (p->ctx >= 0 && p->context.type == PM_CONTEXT_ARCHIVE) {
-+	__pmContext	*ctxp = __pmHandleToPtr(p->ctx);
-+	__pmArchCtl	*acp = ctxp->c_archctl;
-+	char		errmsg[PM_MAXERRMSGLEN];
-+
-+	if ((sts = __pmLogAddLabelSets(acp, ts, type, ident, nsets, sets)) < 0)
-+	    fprintf(stderr, "%s: failed to add log labelset: %s\n",
-+			"pmDiscoverInvokeLabelsCallBacks",
-+			pmErrStr_r(sts, errmsg, sizeof(errmsg)));
-+	PM_UNLOCK(ctxp->c_lock);
-+    }
-+
-     discover_event_init(p, ts, &event);
-     for (i = 0; i < discoverCallBackTableSize; i++) {
- 	if ((callbacks = discoverCallBackTable[i]) &&
- 	    callbacks->on_labels != NULL)
- 	    callbacks->on_labels(&event, ident, type, sets, nsets, p->data);
-     }
-+
-+out:
-+    if (sts < 0)
-+	pmFreeLabelSets(sets, nsets);
- }
- 
- static void
- pmDiscoverInvokeTextCallBacks(pmDiscover *p, pmTimespec *ts,
- 		int ident, int type, char *text)
- {
-+    discoverModuleData	*data = getDiscoverModuleData(p->module);
-     pmDiscoverCallBacks	*callbacks;
-     pmDiscoverEvent	event;
-     char		buf[32];
--    int			i;
-+    int			i, sts;
- 
-     if (pmDebugOptions.discovery) {
- 	fprintf(stderr, "%s[%s]: %s ", "pmDiscoverInvokeTextCallBacks",
-@@ -612,12 +907,36 @@ pmDiscoverInvokeTextCallBacks(pmDiscover
- 	    fprintf(stderr, "context labels %s\n", p->context.labelset->json);
-     }
- 
-+    if ((type & PM_TEXT_PMID) && data->pmids) {
-+	if (dictFind(data->pmids, &ident) != NULL)
-+	    goto out;	/* text from an already excluded InDom */
-+    }
-+    if ((type & PM_TEXT_INDOM) && data->indoms) {
-+	if (dictFind(data->indoms, &ident) != NULL)
-+	    goto out;	/* text from an already excluded InDom */
-+    }
-+
-+    if (p->ctx >= 0 && p->context.type == PM_CONTEXT_ARCHIVE) {
-+	__pmContext	*ctxp = __pmHandleToPtr(p->ctx);
-+	__pmArchCtl	*acp = ctxp->c_archctl;
-+	char		errmsg[PM_MAXERRMSGLEN];
-+
-+	if ((sts = __pmLogAddText(acp, ident, type, text)) < 0)
-+	    fprintf(stderr, "%s: failed to add %u text for %u: %s\n",
-+	               "pmDiscoverInvokeTextCallBacks", type, ident,
-+			pmErrStr_r(sts, errmsg, sizeof(errmsg)));
-+	PM_UNLOCK(ctxp->c_lock);
-+    }
-+
-     discover_event_init(p, ts, &event);
-     for (i = 0; i < discoverCallBackTableSize; i++) {
- 	if ((callbacks = discoverCallBackTable[i]) &&
- 	    callbacks->on_text != NULL)
- 	    callbacks->on_text(&event, ident, type, text, p->data);
-     }
-+
-+out:
-+    free(text);
- }
- 
- static void
-@@ -645,8 +964,8 @@ pmDiscoverNewSource(pmDiscover *p, int c
-     p->context.labelset = labelset;
- 
-     /* use timestamp from file creation as starting time */
--    timestamp.tv_sec = p->statbuf.st_birthtim.tv_sec;
--    timestamp.tv_nsec = p->statbuf.st_birthtim.tv_nsec;
-+    timestamp.tv_sec = p->statbuf.st_ctim.tv_sec;
-+    timestamp.tv_nsec = p->statbuf.st_ctim.tv_nsec;
- 
-     /* inform utilities that a source has been discovered */
-     pmDiscoverInvokeSourceCallBacks(p, &timestamp);
-@@ -664,7 +983,7 @@ process_metadata(pmDiscover *p)
-     pmDesc		desc;
-     off_t		off;
-     char		*buffer;
--    int			e, i, nb, len, nsets;
-+    int			e, nb, len, nsets;
-     int			type, id; /* pmID or pmInDom */
-     int			nnames;
-     char		**names;
-@@ -674,6 +993,8 @@ process_metadata(pmDiscover *p)
-     __pmLogHdr		hdr;
-     sds			msg, source;
-     static uint32_t	*buf = NULL;
-+    int			deleted;
-+    struct stat		sbuf;
-     static int		buflen = 0;
- 
-     /*
-@@ -683,14 +1004,17 @@ process_metadata(pmDiscover *p)
-      */
-     p->flags |= PM_DISCOVER_FLAGS_META_IN_PROGRESS;
-     if (pmDebugOptions.discovery)
--	fprintf(stderr, "%s: in progress, flags=%s\n",
--			"process_metadata", pmDiscoverFlagsStr(p));
-+	fprintf(stderr, "process_metadata: %s in progress %s\n",
-+		p->context.name, pmDiscoverFlagsStr(p));
-     for (;;) {
- 	off = lseek(p->fd, 0, SEEK_CUR);
- 	nb = read(p->fd, &hdr, sizeof(__pmLogHdr));
- 
--	if (nb <= 0) {
--	    /* we're at EOF or an error. But may still be part way through a record */
-+	deleted = is_deleted(p, &sbuf);
-+	if (nb <= 0 || deleted) {
-+	    /* we're at EOF or an error, or deleted. But may still be part way through a record */
-+	    if (deleted)
-+	    	p->flags |= PM_DISCOVER_FLAGS_DELETED;
- 	    break;
- 	}
- 
-@@ -750,10 +1074,6 @@ process_metadata(pmDiscover *p)
- 	    ts.tv_sec = p->statbuf.st_mtim.tv_sec;
- 	    ts.tv_nsec = p->statbuf.st_mtim.tv_nsec;
- 	    pmDiscoverInvokeMetricCallBacks(p, &ts, &desc, nnames, names);
--	    for (i = 0; i < nnames; i++)
--		free(names[i]);
--	    if (names)
--		free(names);
- 	    break;
- 
- 	case TYPE_INDOM:
-@@ -765,12 +1085,6 @@ process_metadata(pmDiscover *p)
- 		break;
- 	    }
- 	    pmDiscoverInvokeInDomCallBacks(p, &ts, &inresult);
--	    if (inresult.numinst > 0) {
--		for (i = 0; i < inresult.numinst; i++)
--		    free(inresult.namelist[i]);
--		free(inresult.namelist);
--		free(inresult.instlist);
--	    }
- 	    break;
- 
- 	case TYPE_LABEL:
-@@ -795,13 +1109,13 @@ process_metadata(pmDiscover *p)
- 		} else {
- 		    sdsfree(p->context.source);
- 		    p->context.source = source;
--		    p->context.labelset = labelset;
-+		    if (p->context.labelset)
-+			pmFreeLabelSets(p->context.labelset, 1);
-+		    p->context.labelset = __pmDupLabelSets(labelset, 1);
- 		    pmDiscoverInvokeSourceCallBacks(p, &ts);
- 		}
- 	    }
- 	    pmDiscoverInvokeLabelsCallBacks(p, &ts, id, type, labelset, nsets);
--	    if (labelset != p->context.labelset)
--		pmFreeLabelSets(labelset, nsets);
- 	    break;
- 
- 	case TYPE_TEXT:
-@@ -819,8 +1133,6 @@ process_metadata(pmDiscover *p)
- 	    ts.tv_sec = p->statbuf.st_mtim.tv_sec;
- 	    ts.tv_nsec = p->statbuf.st_mtim.tv_nsec;
- 	    pmDiscoverInvokeTextCallBacks(p, &ts, id, type, buffer);
--	    if (buffer)
--		free(buffer);
- 	    break;
- 
- 	default:
-@@ -833,38 +1145,89 @@ process_metadata(pmDiscover *p)
-     }
- 
-     if (partial == 0)
--	/* flag that all available metadata has been now been read */
-+	/* flag that all available metadata has now been read */
- 	p->flags &= ~PM_DISCOVER_FLAGS_META_IN_PROGRESS;
- 
-     if (pmDebugOptions.discovery)
--	fprintf(stderr, "%s : completed, partial=%d flags=%s\n",
--			"process_metadata", partial, pmDiscoverFlagsStr(p));
-+	fprintf(stderr, "%s: completed, partial=%d %s %s\n",
-+			"process_metadata", partial, p->context.name, pmDiscoverFlagsStr(p));
- }
- 
- /*
-- * fetch metric values to EOF and call all registered callbacks
-+ * Fetch metric values to EOF and call all registered callbacks.
-+ * Always process metadata thru to EOF before any logvol data.
-  */
- static void
--process_logvol_callback(pmDiscover *p)
-+process_logvol(pmDiscover *p)
- {
-+    int			sts;
-     pmResult		*r;
-     pmTimespec		ts;
-+    int			oldcurvol;
-+    __pmContext		*ctxp;
-+    __pmArchCtl		*acp;
-+
-+    for (;;) {
-+	pmUseContext(p->ctx);
-+	ctxp = __pmHandleToPtr(p->ctx);
-+	acp = ctxp->c_archctl;
-+	oldcurvol = acp->ac_curvol;
-+	PM_UNLOCK(ctxp->c_lock);
-+
-+	if ((sts = pmFetchArchive(&r)) < 0) {
-+	    /* err handling to skip to the next vol */
-+	    ctxp = __pmHandleToPtr(p->ctx);
-+	    acp = ctxp->c_archctl;
-+	    if (oldcurvol < acp->ac_curvol) {
-+	    	__pmLogChangeVol(acp, acp->ac_curvol);
-+		acp->ac_offset = 0; /* __pmLogFetch will fix it up */
-+	    }
-+	    PM_UNLOCK(ctxp->c_lock);
-+
-+	    if (sts == PM_ERR_EOL) {
-+		if (pmDebugOptions.discovery)
-+		    fprintf(stderr, "process_logvol: %s end of archive reached\n",
-+		    	p->context.name);
-+
-+		/* succesfully processed to current end of log */
-+		break;
-+	    } else {
-+		/* 
-+		 * This log vol was probably deleted (likely compressed)
-+		 * under our feet. Try and skip to the next volume.
-+		 * We hold the context lock during error recovery here.
-+		 */
-+		if (pmDebugOptions.discovery)
-+		    fprintf(stderr, "process_logvol: %s fetch failed:%s\n",
-+			p->context.name, pmErrStr(sts));
-+	    }
- 
--    pmUseContext(p->ctx);
--    while (pmFetchArchive(&r) == 0) {
-+	    /* we are done - return and wait for another callback */
-+	    break;
-+	}
-+
-+	/*
-+	 * Fetch succeeded - call the values callback and continue
-+	 */
- 	if (pmDebugOptions.discovery) {
- 	    char		tbuf[64], bufs[64];
- 
--	    fprintf(stderr, "FETCHED @%s [%s] %d metrics\n",
--		    timeval_str(&r->timestamp, tbuf, sizeof(tbuf)),
-+	    fprintf(stderr, "process_logvol: %s FETCHED @%s [%s] %d metrics\n",
-+		    p->context.name, timeval_str(&r->timestamp, tbuf, sizeof(tbuf)),
- 		    timeval_stream_str(&r->timestamp, bufs, sizeof(bufs)),
- 		    r->numpmid);
- 	}
-+
-+	/*
-+	 * TODO: persistently save current timestamp, so after being restarted,
-+	 * pmproxy can resume where it left off for each archive.
-+	 */
- 	ts.tv_sec = r->timestamp.tv_sec;
- 	ts.tv_nsec = r->timestamp.tv_usec * 1000;
- 	pmDiscoverInvokeValuesCallBack(p, &ts, r);
- 	pmFreeResult(r);
-     }
-+
-     /* datavol is now up-to-date and at EOF */
-     p->flags &= ~PM_DISCOVER_FLAGS_DATAVOL_READY;
- }
-@@ -874,12 +1237,13 @@ pmDiscoverInvokeCallBacks(pmDiscover *p)
- {
-     int			sts;
-     sds			msg;
-+    sds			metaname;
- 
-     if (p->ctx < 0) {
- 	/*
- 	 * once off initialization on the first event
- 	 */
--	if (p->flags & PM_DISCOVER_FLAGS_DATAVOL) {
-+	if (p->flags & (PM_DISCOVER_FLAGS_DATAVOL | PM_DISCOVER_FLAGS_META)) {
- 	    struct timeval	tvp;
- 
- 	    /* create the PMAPI context (once off) */
-@@ -898,28 +1262,25 @@ pmDiscoverInvokeCallBacks(pmDiscover *p)
- 		p->ctx = -1;
- 		return;
- 	    }
-+	    /* seek to end of archive for logvol data - see TODO in process_logvol() */
- 	    pmSetMode(PM_MODE_FORW, &tvp, 1);
--	    /* note: we do not scan pre-existing logvol data. */
--	}
--	else if (p->flags & PM_DISCOVER_FLAGS_META) {
--	    if ((sts = pmNewContext(p->context.type, p->context.name)) < 0) {
--		infofmt(msg, "pmNewContext failed for %s: %s\n",
--				p->context.name, pmErrStr(sts));
--		moduleinfo(p->module, PMLOG_ERROR, msg, p->data);
--		return;
--	    }
--	    pmDiscoverNewSource(p, sts);
- 
--	    /* for archive meta files, p->fd is the direct file descriptor */
--	    if ((p->fd = open(p->context.name, O_RDONLY)) < 0) {
--		infofmt(msg, "open failed for %s: %s\n", p->context.name,
--				osstrerror());
-+	    /*
-+	     * For archive meta files, p->fd is the direct file descriptor
-+	     * and we pre-scan existing metadata. Note: we do NOT scan
-+	     * pre-existing logvol data (see pmSetMode above)
-+	     */
-+	    metaname = sdsnew(p->context.name);
-+	    metaname = sdscat(metaname, ".meta");
-+	    if ((p->fd = open(metaname, O_RDONLY)) < 0) {
-+		infofmt(msg, "open failed for %s: %s\n", metaname, osstrerror());
- 		moduleinfo(p->module, PMLOG_ERROR, msg, p->data);
-+		sdsfree(metaname);
- 		return;
- 	    }
--
--	    /* process all existing metadata */
-+	    /* pre-process all existing metadata */
- 	    process_metadata(p);
-+	    sdsfree(metaname);
- 	}
-     }
- 
-@@ -943,15 +1304,61 @@ pmDiscoverInvokeCallBacks(pmDiscover *p)
-     }
- 
-     if (p->flags & PM_DISCOVER_FLAGS_META) {
--	/* process metadata */
-+	/* process new metadata, if any */
- 	process_metadata(p);
-     }
- 
--    /* process any unprocessed datavol callbacks */
--    pmDiscoverTraverse(PM_DISCOVER_FLAGS_DATAVOL_READY, process_logvol_callback);
-+    if ((p->flags & PM_DISCOVER_FLAGS_META_IN_PROGRESS) == 0) {
-+	/* no metdata read in progress, so process new datavol data, if any */
-+	process_logvol(p);
-+    }
-+}
-+
-+static void
-+print_callback(pmDiscover *p)
-+{
-+    if (p->flags & PM_DISCOVER_FLAGS_DIRECTORY) {
-+	fprintf(stderr, "    DIRECTORY %s %s\n",
-+	    p->context.name, pmDiscoverFlagsStr(p));
-+    }
-+    else {
-+	__pmContext *ctxp;
-+	__pmArchCtl *acp;
- 
--    /* finally, purge deleted entries, if any */
--    pmDiscoverPurgeDeleted();
-+	if (p->ctx >= 0 && (ctxp = __pmHandleToPtr(p->ctx)) != NULL) {
-+	    acp = ctxp->c_archctl;
-+	    fprintf(stderr, "    ARCHIVE %s fd=%d ctx=%d maxvol=%d ac_curvol=%d ac_offset=%ld %s\n",
-+		p->context.name, p->fd, p->ctx, acp->ac_log->l_maxvol, acp->ac_curvol,
-+		acp->ac_offset, pmDiscoverFlagsStr(p));
-+	    PM_UNLOCK(ctxp->c_lock);
-+	} else {
-+	    /* no context yet - probably PM_DISCOVER_FLAGS_NEW */
-+	    fprintf(stderr, "    ARCHIVE %s fd=%d ctx=%d %s\n",
-+		p->context.name, p->fd, p->ctx, pmDiscoverFlagsStr(p));
-+	}
-+    }
-+}
-+
-+/*
-+ * p is a tracked archive and arg is a directory path.
-+ * If p is in the directory, call it's callbacks to
-+ * process metadata and logvol data. This allows better
-+ * scalability because we only process archives in the
-+ * directories that have changed.
-+ */
-+static void
-+directory_changed_cb(pmDiscover *p, void *arg)
-+{
-+    char *dirpath = (char *)arg;
-+    int dlen = strlen(dirpath);
-+
-+    if (strncmp(p->context.name, dirpath, dlen) == 0) {
-+    	/* this archive is in this directory - process it's metadata and logvols */
-+	if (pmDebugOptions.discovery)
-+	    fprintf(stderr, "directory_changed_cb: archive %s is in dir %s\n",
-+		p->context.name, dirpath);
-+	pmDiscoverInvokeCallBacks(p);
-+    }
- }
- 
- static void
-@@ -962,27 +1369,46 @@ changed_callback(pmDiscover *p)
- 			pmDiscoverFlagsStr(p));
- 
-     if (p->flags & PM_DISCOVER_FLAGS_DELETED) {
--	/* path or directory has been deleted - remove from hash table */
--	deleted_callback(p);
--    }
--    else if (p->flags & PM_DISCOVER_FLAGS_DIRECTORY) {
- 	/*
--	 * A changed directory path means a new archive or subdirectory
--	 * has been created - traverse and update the hash table.
-+	 * Path has been deleted. Do nothing for now. Will be purged
-+	 * in due course by pmDiscoverPurgeDeleted.
- 	 */
--	pmDiscoverArchives(p->context.name, p->module, p->data);
--	pmDiscoverTraverse(PM_DISCOVER_FLAGS_NEW, created_callback);
-+	return;
-+	
-     }
--    else if (p->flags & PM_DISCOVER_FLAGS_COMPRESSED) {
-+
-+    if (p->flags & PM_DISCOVER_FLAGS_COMPRESSED) {
-     	/* we do not monitor compressed files - do nothing */
--	; /**/
-+	return;
-     }
--    else if (p->flags & (PM_DISCOVER_FLAGS_DATAVOL|PM_DISCOVER_FLAGS_META)) {
--    	/*
--	 * We only monitor uncompressed logvol and metadata paths. Fetch new data
--	 * (metadata or logvol) and call the registered callbacks.
-+
-+    if (p->flags & PM_DISCOVER_FLAGS_DIRECTORY) {
-+	/*
-+	 * A changed directory path means a new archive or subdirectory may have
-+	 * been created or deleted - traverse and update the hash table.
- 	 */
--	pmDiscoverInvokeCallBacks(p);
-+	if (pmDebugOptions.discovery) {
-+	    fprintf(stderr, "%s DIRECTORY CHANGED %s (%s)\n",
-+	    	stamp(), p->context.name, pmDiscoverFlagsStr(p));
-+	}
-+	pmDiscoverArchives(p->context.name, p->module, p->data);
-+	pmDiscoverTraverse(PM_DISCOVER_FLAGS_NEW, created_callback);
-+
-+	/*
-+	 * Walk directory and invoke callbacks for tracked archives in this
-+	 * directory that have changed
-+	 */
-+	pmDiscoverTraverseArg(PM_DISCOVER_FLAGS_DATAVOL|PM_DISCOVER_FLAGS_META,
-+	    directory_changed_cb, (void *)p->context.name);
-+
-+	/* finally, purge deleted entries (globally), if any */
-+	pmDiscoverPurgeDeleted();
-+    }
-+
-+    if (pmDebugOptions.discovery) {
-+	fprintf(stderr, "%s -- tracking status\n", stamp());
-+	pmDiscoverTraverse(PM_DISCOVER_FLAGS_ALL, print_callback);
-+	fprintf(stderr, "--\n");
-     }
- }
- 
-@@ -995,18 +1421,9 @@ dir_callback(pmDiscover *p)
- static void
- archive_callback(pmDiscover *p)
- {
--    if (p->flags & PM_DISCOVER_FLAGS_COMPRESSED)
--    	return; /* compressed archives don't grow */
--
--    if (p->flags & PM_DISCOVER_FLAGS_DATAVOL) {
--	if (pmDebugOptions.discovery)
--	    fprintf(stderr, "DISCOVERED ARCHIVE LOGVOL %s\n", p->context.name);
--	pmDiscoverMonitor(p->context.name, changed_callback);
--    }
--
-     if (p->flags & PM_DISCOVER_FLAGS_META) {
- 	if (pmDebugOptions.discovery)
--	    fprintf(stderr, "DISCOVERED ARCHIVE METADATA %s\n", p->context.name);
-+	    fprintf(stderr, "DISCOVERED ARCHIVE %s\n", p->context.name);
- 	pmDiscoverMonitor(p->context.name, changed_callback);
-     }
- }
-@@ -1048,9 +1465,9 @@ pmDiscoverRegister(const char *dir, pmDi
-     }
- 
-     if (pmDebugOptions.discovery) {
--	fprintf(stderr, "Now managing %d directories and %d archive files\n",
-+	fprintf(stderr, "Now tracking %d directories and %d archives\n",
- 	    pmDiscoverTraverse(PM_DISCOVER_FLAGS_DIRECTORY, NULL),
--	    pmDiscoverTraverse(PM_DISCOVER_FLAGS_DATAVOL, NULL));
-+	    pmDiscoverTraverse(PM_DISCOVER_FLAGS_DATAVOL|PM_DISCOVER_FLAGS_META, NULL));
-     }
- 
-     /* monitor the directories */
-diff -Naurp pcp-5.0.2.orig/src/libpcp_web/src/discover.h pcp-5.0.2/src/libpcp_web/src/discover.h
---- pcp-5.0.2.orig/src/libpcp_web/src/discover.h	2019-12-10 17:04:20.000000000 +1100
-+++ pcp-5.0.2/src/libpcp_web/src/discover.h	2020-02-03 13:36:09.904659047 +1100
-@@ -1,5 +1,5 @@
- /*
-- * Copyright (c) 2018-2019 Red Hat.
-+ * Copyright (c) 2018-2020 Red Hat.
-  *
-  * This library is free software; you can redistribute it and/or modify it
-  * under the terms of the GNU Lesser General Public License as published
-@@ -18,7 +18,9 @@
- #include "libpcp.h"
- #include "mmv_stats.h"
- #include "slots.h"
--
-+#ifdef HAVE_REGEX_H
-+#include <regex.h>
-+#endif
- #ifdef HAVE_LIBUV
- #include <uv.h>
- #else
-@@ -84,8 +86,8 @@ typedef struct pmDiscover {
-     int				fd;		/* meta file descriptor */
- #ifdef HAVE_LIBUV
-     uv_fs_event_t		*event_handle;	/* uv fs_notify event handle */ 
--    uv_stat_t			statbuf;	/* stat buffer from event CB */
- #endif
-+    struct stat			statbuf;	/* stat buffer */
-     void			*baton;		/* private internal lib data */
-     void			*data;		/* opaque user data pointer */
- } pmDiscover;
-@@ -115,6 +117,10 @@ typedef struct discoverModuleData {
-     struct dict			*config;	/* configuration dict */
-     uv_loop_t			*events;	/* event library loop */
-     redisSlots			*slots;		/* server slots data */
-+    regex_t			exclude_names;	/* metric names to exclude */
-+    struct dict			*pmids;		/* dict of excluded PMIDs */
-+    unsigned int		exclude_indoms;	/* exclude instance domains */
-+    struct dict			*indoms;	/* dict of excluded InDoms */
-     void			*data;		/* user-supplied pointer */
- } discoverModuleData;
- 
-diff -Naurp pcp-5.0.2.orig/src/libpcp_web/src/exports pcp-5.0.2/src/libpcp_web/src/exports
---- pcp-5.0.2.orig/src/libpcp_web/src/exports	2019-11-26 16:29:58.000000000 +1100
-+++ pcp-5.0.2/src/libpcp_web/src/exports	2020-02-03 13:23:15.264762900 +1100
-@@ -178,3 +178,8 @@ PCP_WEB_1.11 {
-   global:
-     pmSeriesLabelValues;
- } PCP_WEB_1.10;
-+
-+PCP_WEB_1.12 {
-+  global:
-+    SDS_NOINIT;
-+} PCP_WEB_1.11;
-diff -Naurp pcp-5.0.2.orig/src/libpcp_web/src/load.c pcp-5.0.2/src/libpcp_web/src/load.c
---- pcp-5.0.2.orig/src/libpcp_web/src/load.c	2019-12-11 14:01:53.000000000 +1100
-+++ pcp-5.0.2/src/libpcp_web/src/load.c	2020-02-03 13:36:03.947721365 +1100
-@@ -1,5 +1,5 @@
- /*
-- * Copyright (c) 2017-2019 Red Hat.
-+ * Copyright (c) 2017-2020 Red Hat.
-  *
-  * This library is free software; you can redistribute it and/or modify it
-  * under the terms of the GNU Lesser General Public License as published
-@@ -112,22 +112,41 @@ load_prepare_metric(const char *name, vo
-  * Iterate over an instance domain and extract names and labels
-  * for each instance.
-  */
--static unsigned int
--get_instance_metadata(seriesLoadBaton *baton, pmInDom indom)
-+static void
-+get_instance_metadata(seriesLoadBaton *baton, pmInDom indom, int force_refresh)
- {
-     context_t		*cp = &baton->pmapi.context;
--    unsigned int	count = 0;
-     domain_t		*dp;
-     indom_t		*ip;
- 
-     if (indom != PM_INDOM_NULL) {
- 	if ((dp = pmwebapi_add_domain(cp, pmInDom_domain(indom))))
- 	    pmwebapi_add_domain_labels(cp, dp);
--	if ((ip = pmwebapi_add_indom(cp, dp, indom)) &&
--	    (count = pmwebapi_add_indom_instances(cp, ip)) > 0)
--	    pmwebapi_add_instances_labels(cp, ip);
-+	if ((ip = pmwebapi_add_indom(cp, dp, indom)) != NULL) {
-+	    if (force_refresh)
-+		ip->updated = 1;
-+	    if (ip->updated) {
-+		pmwebapi_add_indom_instances(cp, ip);
-+		pmwebapi_add_instances_labels(cp, ip);
-+	    }
-+	}
-     }
--    return count;
-+}
-+
-+static void
-+get_metric_metadata(seriesLoadBaton *baton, metric_t *metric)
-+{
-+    context_t		*context = &baton->pmapi.context;
-+
-+    if (metric->cluster) {
-+	if (metric->cluster->domain)
-+	    pmwebapi_add_domain_labels(context, metric->cluster->domain);
-+	pmwebapi_add_cluster_labels(context, metric->cluster);
-+    }
-+    if (metric->indom)
-+	pmwebapi_add_instances_labels(context, metric->indom);
-+    pmwebapi_add_item_labels(context, metric);
-+    pmwebapi_metric_hash(metric);
- }
- 
- static metric_t *
-@@ -140,18 +159,25 @@ new_metric(seriesLoadBaton *baton, pmVal
-     char		**nameall = NULL;
-     int			count, sts, i;
- 
--    if ((sts = pmLookupDesc(vsp->pmid, &desc)) < 0) {
-+    if ((sts = pmUseContext(context->context)) < 0) {
-+	fprintf(stderr, "%s: failed to use context for PMID %s: %s\n",
-+		"new_metric",
-+		pmIDStr_r(vsp->pmid, idbuf, sizeof(idbuf)),
-+		pmErrStr_r(sts, errmsg, sizeof(errmsg)));
-+    } else if ((sts = pmLookupDesc(vsp->pmid, &desc)) < 0) {
- 	if (sts == PM_ERR_IPC)
- 	    context->setup = 0;
- 	if (pmDebugOptions.series)
--	    fprintf(stderr, "failed to lookup metric %s descriptor: %s",
-+	    fprintf(stderr, "%s: failed to lookup metric %s descriptor: %s\n",
-+		"new_metric",
- 		pmIDStr_r(vsp->pmid, idbuf, sizeof(idbuf)),
- 		pmErrStr_r(sts, errmsg, sizeof(errmsg)));
-     } else if ((sts = count = pmNameAll(vsp->pmid, &nameall)) < 0) {
- 	if (sts == PM_ERR_IPC)
- 	    context->setup = 0;
- 	if (pmDebugOptions.series)
--	    fprintf(stderr, "failed to lookup metric %s names: %s",
-+	    fprintf(stderr, "%s: failed to lookup metric %s names: %s\n",
-+		"new_metric",
- 		pmIDStr_r(vsp->pmid, idbuf, sizeof(idbuf)),
- 		pmErrStr_r(sts, errmsg, sizeof(errmsg)));
-     }
-@@ -160,18 +186,10 @@ new_metric(seriesLoadBaton *baton, pmVal
- 
-     if ((metric = pmwebapi_new_metric(context, NULL, &desc, count, nameall)) == NULL)
- 	return NULL;
--    if (metric->cluster) {
--	if (metric->cluster->domain)
--	    pmwebapi_add_domain_labels(context, metric->cluster->domain);
--	pmwebapi_add_cluster_labels(context, metric->cluster);
--    }
--    if (metric->indom)
--	pmwebapi_add_instances_labels(context, metric->indom);
--    pmwebapi_add_item_labels(context, metric);
--    pmwebapi_metric_hash(metric);
-+    get_metric_metadata(baton, metric);
- 
-     if (pmDebugOptions.series) {
--	fprintf(stderr, "new_metric [%s] names:",
-+	fprintf(stderr, "%s [%s] names:\n", "new_metric",
- 		pmIDStr_r(vsp->pmid, idbuf, sizeof(idbuf)));
- 	for (i = 0; i < count; i++) {
- 	    pmwebapi_hash_str(metric->names[i].hash, idbuf, sizeof(idbuf));
-@@ -409,7 +427,7 @@ pmwebapi_add_valueset(metric_t *metric,
- }
- 
- static void
--series_cache_update(seriesLoadBaton *baton)
-+series_cache_update(seriesLoadBaton *baton, struct dict *exclude)
- {
-     seriesGetContext	*context = &baton->pmapi;
-     context_t		*cp = &context->context;
-@@ -418,7 +436,7 @@ series_cache_update(seriesLoadBaton *bat
-     metric_t		*metric = NULL;
-     char		ts[64];
-     sds			timestamp;
--    int			i, write_meta, write_data;
-+    int			i, write_meta, write_inst, write_data;
- 
-     timestamp = sdsnew(timeval_stream_str(&result->timestamp, ts, sizeof(ts)));
-     write_data = (!(baton->flags & PM_SERIES_FLAG_METADATA));
-@@ -441,6 +459,12 @@ series_cache_update(seriesLoadBaton *bat
- 	    dictFetchValue(baton->wanted, &vsp->pmid) == NULL)
- 	    continue;
- 
-+	/* check if metric to be skipped (optional metric exclusion) */
-+	if (exclude && (dictFind(exclude, &vsp->pmid)) != NULL)
-+	    continue;
-+
-+	write_meta = write_inst = 0;
-+
- 	/* check if pmid already in hash list */
- 	if ((metric = dictFetchValue(cp->pmids, &vsp->pmid)) == NULL) {
- 	    /* create a new metric, and add it to load context */
-@@ -448,21 +472,22 @@ series_cache_update(seriesLoadBaton *bat
- 		continue;
- 	    write_meta = 1;
- 	} else {	/* pmid already observed */
--	    write_meta = 0;
-+	    if ((write_meta = metric->cached) == 0)
-+		get_metric_metadata(baton, metric);
- 	}
- 
- 	/* iterate through result instances and ensure metric_t is complete */
- 	if (metric->error == 0 && vsp->numval < 0)
- 	    write_meta = 1;
- 	if (pmwebapi_add_valueset(metric, vsp) != 0)
--	    write_meta = 1;
-+	    write_meta = write_inst = 1;
- 
- 	/* record the error code in the cache */
- 	metric->error = (vsp->numval < 0) ? vsp->numval : 0;
- 
- 	/* make PMAPI calls to cache metadata */
--	if (write_meta && get_instance_metadata(baton, metric->desc.indom) != 0)
--	    continue;
-+	if (write_meta)
-+	    get_instance_metadata(baton, metric->desc.indom, write_inst);
- 
- 	/* initiate writes to backend caching servers (Redis) */
- 	server_cache_metric(baton, metric, timestamp, write_meta, write_data);
-@@ -549,7 +574,7 @@ server_cache_window(void *arg)
- 	    (finish->tv_sec == result->timestamp.tv_sec &&
- 	     finish->tv_usec >= result->timestamp.tv_usec)) {
- 	    context->done = server_cache_update_done;
--	    series_cache_update(baton);
-+	    series_cache_update(baton, NULL);
- 	}
- 	else {
- 	    if (pmDebugOptions.series)
-@@ -1023,7 +1048,7 @@ pmSeriesDiscoverSource(pmDiscoverEvent *
-     sds			msg;
-     int			i;
- 
--    if (data == NULL || data->slots == NULL)
-+    if (data == NULL || data->slots == NULL || data->slots->setup == 0)
- 	return;
- 
-     baton = (seriesLoadBaton *)calloc(1, sizeof(seriesLoadBaton));
-@@ -1032,22 +1057,31 @@ pmSeriesDiscoverSource(pmDiscoverEvent *
- 	moduleinfo(module, PMLOG_ERROR, msg, arg);
- 	return;
-     }
-+    if ((set = pmwebapi_labelsetdup(p->context.labelset)) == NULL) {
-+	infofmt(msg, "%s: out of memory for labels", "pmSeriesDiscoverSource");
-+	moduleinfo(module, PMLOG_ERROR, msg, arg);
-+	free(baton);
-+	return;
-+    }
-+
-     initSeriesLoadBaton(baton, module, 0 /*flags*/,
- 			module->on_info, series_discover_done,
- 			data->slots, arg);
-     initSeriesGetContext(&baton->pmapi, baton);
-     p->baton = baton;
- 
-+    cp = &baton->pmapi.context;
-+
-     if (pmDebugOptions.discovery)
--	fprintf(stderr, "%s: new source %s context=%d\n",
--			"pmSeriesDiscoverSource", p->context.name, p->ctx);
-+	fprintf(stderr, "%s: new source %s context=%p ctxid=%d\n",
-+			"pmSeriesDiscoverSource", p->context.name, cp, p->ctx);
- 
--    cp = &baton->pmapi.context;
-     cp->context = p->ctx;
-     cp->type = p->context.type;
-     cp->name.sds = sdsdup(p->context.name);
--    cp->host = p->context.hostname;
--    cp->labelset = set = p->context.labelset;
-+    cp->host = sdsdup(p->context.hostname);
-+    cp->labelset = set;
-+
-     pmwebapi_source_hash(cp->name.hash, set->json, set->jsonlen);
-     pmwebapi_setup_context(cp);
-     set_source_origin(cp);
-@@ -1095,21 +1129,22 @@ pmSeriesDiscoverLabels(pmDiscoverEvent *
-     sds			msg;
-     int			i, id;
- 
-+    if (baton == NULL || baton->slots == NULL || baton->slots->setup == 0)
-+	return;
-+
-     switch (type) {
-     case PM_LABEL_CONTEXT:
- 	if (pmDebugOptions.discovery)
- 	    fprintf(stderr, "%s: context\n", "pmSeriesDiscoverLabels");
- 
- 	if ((labels = pmwebapi_labelsetdup(sets)) != NULL) {
--#if 0 /* PCP GH#800 do not free this labelset - it's owned by the discover code */
- 	    if (cp->labelset)
- 		pmFreeLabelSets(cp->labelset, 1);
--#endif
- 	    cp->labelset = labels;
- 	    pmwebapi_locate_context(cp);
- 	    cp->updated = 1;
- 	} else {
--	    infofmt(msg, "failed to duplicate label set");
-+	    infofmt(msg, "failed to duplicate %s label set", "context");
- 	    moduleinfo(event->module, PMLOG_ERROR, msg, arg);
- 	}
- 	break;
-@@ -1125,8 +1160,8 @@ pmSeriesDiscoverLabels(pmDiscoverEvent *
- 		pmFreeLabelSets(domain->labelset, 1);
- 	    domain->labelset = labels;
- 	    domain->updated = 1;
--	} else {
--	    infofmt(msg, "failed to duplicate label set");
-+	} else if (domain) {
-+	    infofmt(msg, "failed to duplicate %s label set", "domain");
- 	    moduleinfo(event->module, PMLOG_ERROR, msg, arg);
- 	}
- 	break;
-@@ -1142,8 +1177,8 @@ pmSeriesDiscoverLabels(pmDiscoverEvent *
- 		pmFreeLabelSets(cluster->labelset, 1);
- 	    cluster->labelset = labels;
- 	    cluster->updated = 1;
--	} else {
--	    infofmt(msg, "failed to duplicate label set");
-+	} else if (cluster) {
-+	    infofmt(msg, "failed to duplicate %s label set", "cluster");
- 	    moduleinfo(event->module, PMLOG_ERROR, msg, arg);
- 	}
- 	break;
-@@ -1159,8 +1194,8 @@ pmSeriesDiscoverLabels(pmDiscoverEvent *
- 		pmFreeLabelSets(metric->labelset, 1);
- 	    metric->labelset = labels;
- 	    metric->updated = 1;
--	} else {
--	    infofmt(msg, "failed to duplicate label set");
-+	} else if (metric) {
-+	    infofmt(msg, "failed to duplicate %s label set", "item");
- 	    moduleinfo(event->module, PMLOG_ERROR, msg, arg);
- 	}
- 	break;
-@@ -1177,8 +1212,8 @@ pmSeriesDiscoverLabels(pmDiscoverEvent *
- 		pmFreeLabelSets(indom->labelset, 1);
- 		    indom->labelset = labels;
- 	    indom->updated = 1;
--	} else {
--	    infofmt(msg, "failed to duplicate label set");
-+	} else if (indom) {
-+	    infofmt(msg, "failed to duplicate %s label set", "indom");
- 	    moduleinfo(event->module, PMLOG_ERROR, msg, arg);
- 	}
- 	break;
-@@ -1196,7 +1231,7 @@ pmSeriesDiscoverLabels(pmDiscoverEvent *
- 	    if ((instance = dictFetchValue(indom->insts, &id)) == NULL)
- 		continue;
- 	    if ((labels = pmwebapi_labelsetdup(&sets[i])) == NULL) {
--		infofmt(msg, "failed to dup %s instance labels: %s",
-+		infofmt(msg, "failed to dup indom %s instance label set: %s",
- 			pmInDomStr_r(indom->indom, idbuf, sizeof(idbuf)),
- 			pmErrStr_r(-ENOMEM, errmsg, sizeof(errmsg)));
- 		moduleinfo(event->module, PMLOG_ERROR, msg, arg);
-@@ -1229,10 +1264,13 @@ pmSeriesDiscoverMetric(pmDiscoverEvent *
- 
-     if (pmDebugOptions.discovery) {
- 	for (i = 0; i < numnames; i++)
--	    fprintf(stderr, "pmSeriesDiscoverMetric: [%d/%d] %s - %s\n",
-+	    fprintf(stderr, "%s: [%d/%d] %s - %s\n", "pmSeriesDiscoverMetric",
- 			i + 1, numnames, pmIDStr(desc->pmid), names[i]);
-     }
- 
-+    if (baton == NULL || baton->slots == NULL || baton->slots->setup == 0)
-+	return;
-+
-     if ((metric = pmwebapi_add_metric(&baton->pmapi.context,
- 				NULL, desc, numnames, names)) == NULL) {
- 	infofmt(msg, "%s: failed metric discovery", "pmSeriesDiscoverMetric");
-@@ -1244,18 +1282,23 @@ pmSeriesDiscoverMetric(pmDiscoverEvent *
- void
- pmSeriesDiscoverValues(pmDiscoverEvent *event, pmResult *result, void *arg)
- {
-+    pmDiscoverModule	*module = event->module;
-     pmDiscover		*p = (pmDiscover *)event->data;
-     seriesLoadBaton	*baton = p->baton;
-     seriesGetContext	*context = &baton->pmapi;
-+    discoverModuleData	*data = getDiscoverModuleData(module);
- 
-     if (pmDebugOptions.discovery)
- 	fprintf(stderr, "%s: result numpmids=%d\n", "pmSeriesDiscoverValues", result->numpmid);
- 
-+    if (baton == NULL || baton->slots == NULL || baton->slots->setup == 0)
-+	return;
-+
-     seriesBatonReference(context, "pmSeriesDiscoverValues");
-     baton->arg = arg;
-     context->result = result;
- 
--    series_cache_update(baton);
-+    series_cache_update(baton, data->pmids);
- }
- 
- void
-@@ -1271,7 +1314,10 @@ pmSeriesDiscoverInDom(pmDiscoverEvent *e
-     int			i;
- 
-     if (pmDebugOptions.discovery)
--	fprintf(stderr, "pmSeriesDiscoverInDom: %s\n", pmInDomStr(id));
-+	fprintf(stderr, "%s: %s\n", "pmSeriesDiscoverInDom", pmInDomStr(id));
-+
-+    if (baton == NULL || baton->slots == NULL || baton->slots->setup == 0)
-+	return;
- 
-     if ((domain = pmwebapi_add_domain(context, pmInDom_domain(id))) == NULL) {
- 	infofmt(msg, "%s: failed indom discovery (domain %u)",
-@@ -1303,11 +1349,10 @@ pmSeriesDiscoverText(pmDiscoverEvent *ev
-     pmDiscover		*p = (pmDiscover *)event->data;
-     seriesLoadBaton	*baton = p->baton;
- 
--    (void)baton;
--    (void)ident;
--    (void)type;
--    (void)text;
--    (void)arg;
-+    if (pmDebugOptions.discovery)
-+	fprintf(stderr, "%s: ident=%u type=%u arg=%p\n",
-+			"pmSeriesDiscoverText", ident, type, arg);
- 
--    /* for Redis, help text will need special handling (RediSearch) */
-+    if (baton == NULL || baton->slots == NULL || baton->slots->setup == 0)
-+	return;
- }
-diff -Naurp pcp-5.0.2.orig/src/libpcp_web/src/query.c pcp-5.0.2/src/libpcp_web/src/query.c
---- pcp-5.0.2.orig/src/libpcp_web/src/query.c	2019-12-05 17:29:43.000000000 +1100
-+++ pcp-5.0.2/src/libpcp_web/src/query.c	2020-02-03 13:23:15.265762890 +1100
-@@ -1,5 +1,5 @@
- /*
-- * Copyright (c) 2017-2019 Red Hat.
-+ * Copyright (c) 2017-2020 Red Hat.
-  *
-  * This library is free software; you can redistribute it and/or modify it
-  * under the terms of the GNU Lesser General Public License as published
-@@ -1243,24 +1243,43 @@ series_prepare_time_reply(
-     series_query_end_phase(baton);
- }
- 
-+unsigned int
-+series_value_count_only(timing_t *tp)
-+{
-+    if (tp->window.range || tp->window.delta ||
-+	tp->window.start || tp->window.end)
-+	return 0;
-+    return tp->count;
-+}
-+
- static void
- series_prepare_time(seriesQueryBaton *baton, series_set_t *result)
- {
-     timing_t		*tp = &baton->u.query.timing;
-     unsigned char	*series = result->series;
-     seriesGetSID	*sid;
--    char		buffer[64];
-+    char		buffer[64], revbuf[64];
-     sds			start, end, key, cmd;
--    unsigned int	i;
-+    unsigned int	i, revlen = 0, reverse = 0;
-+
-+    /* if only 'count' is requested, work back from most recent value */
-+    if ((reverse = series_value_count_only(tp)) != 0) {
-+	revlen = pmsprintf(revbuf, sizeof(revbuf), "%u", reverse);
-+	start = sdsnew("+");
-+    } else {
-+	start = sdsnew(timeval_stream_str(&tp->start, buffer, sizeof(buffer)));
-+    }
- 
--    start = sdsnew(timeval_stream_str(&tp->start, buffer, sizeof(buffer)));
-     if (pmDebugOptions.series)
- 	fprintf(stderr, "START: %s\n", start);
- 
--    if (tp->end.tv_sec)
-+    if (reverse)
-+	end = sdsnew("-");
-+    else if (tp->end.tv_sec)
- 	end = sdsnew(timeval_stream_str(&tp->end, buffer, sizeof(buffer)));
-     else
- 	end = sdsnew("+");	/* "+" means "no end" - to the most recent */
-+
-     if (pmDebugOptions.series)
- 	fprintf(stderr, "END: %s\n", end);
- 
-@@ -1277,12 +1296,21 @@ series_prepare_time(seriesQueryBaton *ba
- 
- 	key = sdscatfmt(sdsempty(), "pcp:values:series:%S", sid->name);
- 
--	/* XRANGE key t1 t2 */
--	cmd = redis_command(4);
--	cmd = redis_param_str(cmd, XRANGE, XRANGE_LEN);
-+	/* X[REV]RANGE key t1 t2 [count N] */
-+	if (reverse) {
-+	    cmd = redis_command(6);
-+	    cmd = redis_param_str(cmd, XREVRANGE, XREVRANGE_LEN);
-+	} else {
-+	    cmd = redis_command(4);
-+	    cmd = redis_param_str(cmd, XRANGE, XRANGE_LEN);
-+	}
- 	cmd = redis_param_sds(cmd, key);
- 	cmd = redis_param_sds(cmd, start);
- 	cmd = redis_param_sds(cmd, end);
-+	if (reverse) {
-+	    cmd = redis_param_str(cmd, "COUNT", sizeof("COUNT")-1);
-+	    cmd = redis_param_str(cmd, revbuf, revlen);
-+	}
- 	redisSlotsRequest(baton->slots, XRANGE, key, cmd,
- 				series_prepare_time_reply, sid);
-     }
-diff -Naurp pcp-5.0.2.orig/src/libpcp_web/src/schema.c pcp-5.0.2/src/libpcp_web/src/schema.c
---- pcp-5.0.2.orig/src/libpcp_web/src/schema.c	2019-11-18 19:35:11.000000000 +1100
-+++ pcp-5.0.2/src/libpcp_web/src/schema.c	2020-02-03 13:36:03.948721355 +1100
-@@ -1,5 +1,5 @@
- /*
-- * Copyright (c) 2017-2019 Red Hat.
-+ * Copyright (c) 2017-2020 Red Hat.
-  *
-  * This library is free software; you can redistribute it and/or modify it
-  * under the terms of the GNU Lesser General Public License as published
-@@ -819,7 +819,7 @@ redis_series_metric(redisSlots *slots, m
-      */
- 
-     /* ensure all metric name strings are mapped */
--    for (i = 0; i < metric->numnames; i++) {
-+    for (i = 0; metric->cached == 0 && i < metric->numnames; i++) {
- 	assert(metric->names[i].sds != NULL);
- 	seriesBatonReference(baton, "redis_series_metric");
- 	redisGetMap(slots,
-@@ -830,7 +830,8 @@ redis_series_metric(redisSlots *slots, m
- 
-     /* ensure all metric or instance label strings are mapped */
-     if (metric->desc.indom == PM_INDOM_NULL || metric->u.vlist == NULL) {
--	series_metric_label_mapping(metric, baton);
-+	if (metric->cached == 0)
-+	    series_metric_label_mapping(metric, baton);
-     } else {
- 	for (i = 0; i < metric->u.vlist->listcount; i++) {
- 	    value = &metric->u.vlist->value[i];
-@@ -847,7 +848,8 @@ redis_series_metric(redisSlots *slots, m
- 			series_name_mapping_callback,
- 			baton->info, baton->userdata, baton);
- 
--	    series_instance_label_mapping(metric, instance, baton);
-+	    if (instance->cached == 0)
-+		series_instance_label_mapping(metric, instance, baton);
- 	}
-     }
- 
-@@ -941,6 +943,9 @@ redis_series_metadata(context_t *context
-     sds				cmd, key;
-     int				i;
- 
-+    if (metric->cached)
-+	goto check_instances;
-+
-     indom = pmwebapi_indom_str(metric, ibuf, sizeof(ibuf));
-     pmid = pmwebapi_pmid_str(metric, pbuf, sizeof(pbuf));
-     sem = pmwebapi_semantics_str(metric, sbuf, sizeof(sbuf));
-@@ -1000,16 +1005,24 @@ redis_series_metadata(context_t *context
- 	cmd = redis_param_sha(cmd, metric->names[i].hash);
-     redisSlotsRequest(slots, SADD, key, cmd, redis_series_source_callback, arg);
- 
-+check_instances:
-     if (metric->desc.indom == PM_INDOM_NULL || metric->u.vlist == NULL) {
--	redis_series_labelset(slots, metric, NULL, baton);
-+	if (metric->cached == 0) {
-+	    redis_series_labelset(slots, metric, NULL, baton);
-+	    metric->cached = 1;
-+	}
-     } else {
- 	for (i = 0; i < metric->u.vlist->listcount; i++) {
- 	    value = &metric->u.vlist->value[i];
- 	    if ((instance = dictFetchValue(metric->indom->insts, &value->inst)) == NULL)
- 		continue;
--	    redis_series_instance(slots, metric, instance, baton);
--	    redis_series_labelset(slots, metric, instance, baton);
-+	    if (instance->cached == 0 || metric->cached == 0) {
-+		redis_series_instance(slots, metric, instance, baton);
-+		redis_series_labelset(slots, metric, instance, baton);
-+	    }
-+	    instance->cached = 1;
- 	}
-+	metric->cached = 1;
-     }
- }
- 
-@@ -1210,7 +1223,6 @@ redis_series_stream(redisSlots *slots, s
- 
-     redisSlotsRequest(slots, XADD, key, cmd, redis_series_stream_callback, baton);
- 
--
-     key = sdscatfmt(sdsempty(), "pcp:values:series:%s", hash);
-     cmd = redis_command(3);	/* EXPIRE key timer */
-     cmd = redis_param_str(cmd, EXPIRE, EXPIRE_LEN);
-@@ -1228,9 +1240,6 @@ redis_series_streamed(sds stamp, metric_
-     char			hashbuf[42];
-     int				i;
- 
--    if (metric->updated == 0)
--	return;
--
-     for (i = 0; i < metric->numnames; i++) {
- 	pmwebapi_hash_str(metric->names[i].hash, hashbuf, sizeof(hashbuf));
- 	redis_series_stream(slots, stamp, metric, hashbuf, arg);
-@@ -1545,7 +1554,10 @@ redis_load_slots_callback(
-     redisSlots		*slots = baton->slots;
- 
-     seriesBatonCheckMagic(baton, MAGIC_SLOTS, "redis_load_slots_callback");
-+
-+    slots->setup = 1;	/* we've received initial response from Redis */
-     slots->refresh = 0;	/* we're processing CLUSTER SLOTS command now */
-+
-     /* no cluster redirection checking is needed for this callback */
-     sdsfree(cmd);
- 
-@@ -1832,12 +1844,47 @@ pmDiscoverSetup(pmDiscoverModule *module
-     const char		fallback[] = "/var/log/pcp";
-     const char		*paths[] = { "pmlogger", "pmmgr" };
-     const char		*logdir = pmGetOptionalConfig("PCP_LOG_DIR");
-+    struct dict		*config;
-+    unsigned int	domain, serial;
-+    pmInDom		indom;
-     char		path[MAXPATHLEN];
-     char		sep = pmPathSeparator();
--    int			i, sts, count = 0;
-+    sds			option, *ids;
-+    int			i, sts, nids, count = 0;
- 
-     if (data == NULL)
- 	return -ENOMEM;
-+    config = data->config;
-+
-+    /* double-check that we are supposed to be in here */
-+    if ((option = pmIniFileLookup(config, "discover", "enabled"))) {
-+	if (strcasecmp(option, "false") == 0)
-+	    return 0;
-+    }
-+
-+    /* prepare for optional metric and indom exclusion */
-+    if ((option = pmIniFileLookup(config, "discover", "exclude.metrics"))) {
-+	if ((data->pmids = dictCreate(&intKeyDictCallBacks, NULL)) == NULL)
-+	    return -ENOMEM;
-+	/* parse regular expression string for matching on metric names */
-+	regcomp(&data->exclude_names, option, REG_EXTENDED|REG_NOSUB);
-+    }
-+    if ((option = pmIniFileLookup(config, "discover", "exclude.indoms"))) {
-+	if ((data->indoms = dictCreate(&intKeyDictCallBacks, NULL)) == NULL)
-+	    return -ENOMEM;
-+	/* parse comma-separated indoms in 'option', convert to pmInDom */
-+	if ((ids = sdssplitlen(option, sdslen(option), ",", 1, &nids))) {
-+	    data->exclude_indoms = nids;
-+	    for (i = 0; i < nids; i++) {
-+		if (sscanf(ids[i], "%u.%u", &domain, &serial) == 2) {
-+		    indom = pmInDom_build(domain, serial);
-+		    dictAdd(data->indoms, &indom, NULL);
-+		}
-+		sdsfree(ids[i]);
-+	    }
-+	    free(ids);
-+	}
-+    }
- 
-     /* create global EVAL hashes and string map caches */
-     redisGlobalsInit(data->config);
-diff -Naurp pcp-5.0.2.orig/src/libpcp_web/src/schema.h pcp-5.0.2/src/libpcp_web/src/schema.h
---- pcp-5.0.2.orig/src/libpcp_web/src/schema.h	2019-10-11 17:16:29.000000000 +1100
-+++ pcp-5.0.2/src/libpcp_web/src/schema.h	2020-02-03 13:23:15.266762879 +1100
-@@ -1,5 +1,5 @@
- /*
-- * Copyright (c) 2017-2018 Red Hat.
-+ * Copyright (c) 2017-2020 Red Hat.
-  * 
-  * This library is free software; you can redistribute it and/or modify it
-  * under the terms of the GNU Lesser General Public License as published
-@@ -51,6 +51,10 @@
- #define HSET_LEN	(sizeof(HSET)-1)
- #define HVALS		"HVALS"
- #define HVALS_LEN	(sizeof(HVALS)-1)
-+#define INFO		"INFO"
-+#define INFO_LEN	(sizeof(INFO)-1)
-+#define PING		"PING"
-+#define PING_LEN	(sizeof(PING)-1)
- #define PUBLISH		"PUBLISH"
- #define PUBLISH_LEN	(sizeof(PUBLISH)-1)
- #define SADD		"SADD"
-@@ -63,6 +67,8 @@
- #define XADD_LEN	(sizeof(XADD)-1)
- #define XRANGE		"XRANGE"
- #define XRANGE_LEN	(sizeof(XRANGE)-1)
-+#define XREVRANGE	"XREVRANGE"
-+#define XREVRANGE_LEN	(sizeof(XREVRANGE)-1)
- 
- /* create a Redis protocol command (e.g. XADD, SMEMBER) */
- static inline sds
-diff -Naurp pcp-5.0.2.orig/src/libpcp_web/src/slots.c pcp-5.0.2/src/libpcp_web/src/slots.c
---- pcp-5.0.2.orig/src/libpcp_web/src/slots.c	2019-10-11 17:16:29.000000000 +1100
-+++ pcp-5.0.2/src/libpcp_web/src/slots.c	2020-02-03 13:23:15.266762879 +1100
-@@ -356,6 +356,21 @@ redisSlotsRequest(redisSlots *slots, con
- 
-     if (UNLIKELY(pmDebugOptions.desperate))
- 	fputs(cmd, stderr);
-+    if (UNLIKELY(!key && !slots->setup)) {
-+	/*
-+	 * First request must be CLUSTER, PING, or similar - must
-+	 * not allow regular requests until these have completed.
-+	 * This is because the low layers accumulate async requests
-+	 * until connection establishment, which might not happen.
-+	 * Over time this becomes a memory leak - if we do not ever
-+	 * establish an initial connection).
-+	 */
-+	if (strcmp(topic, CLUSTER) != 0 &&
-+	    strcmp(topic, PING) != 0 && strcmp(topic, INFO) != 0) {
-+	    sdsfree(cmd);
-+	    return -ENOTCONN;
-+	}
-+    }
- 
-     sts = redisAsyncFormattedCommand(context, callback, cmd, arg);
-     if (key)
-diff -Naurp pcp-5.0.2.orig/src/libpcp_web/src/slots.h pcp-5.0.2/src/libpcp_web/src/slots.h
---- pcp-5.0.2.orig/src/libpcp_web/src/slots.h	2019-04-08 09:11:00.000000000 +1000
-+++ pcp-5.0.2/src/libpcp_web/src/slots.h	2020-02-03 13:23:15.266762879 +1100
-@@ -44,10 +44,11 @@ typedef struct redisSlotRange {
- typedef struct redisSlots {
-     unsigned int	counter;
-     unsigned int	nslots;
-+    unsigned int	setup;		/* slots info all successfully setup */
-+    unsigned int	refresh;	/* do slot refresh whenever possible */
-     redisSlotRange	*slots;		/* all instances; e.g. CLUSTER SLOTS */
-     redisMap		*keymap;	/* map command names to key position */
-     dict		*contexts;	/* async contexts access by hostspec */
--    unsigned int	refresh;	/* do slot refresh whenever possible */
-     void		*events;
- } redisSlots;
- 
-diff -Naurp pcp-5.0.2.orig/src/libpcp_web/src/util.c pcp-5.0.2/src/libpcp_web/src/util.c
---- pcp-5.0.2.orig/src/libpcp_web/src/util.c	2019-12-10 17:39:49.000000000 +1100
-+++ pcp-5.0.2/src/libpcp_web/src/util.c	2020-02-03 13:23:15.266762879 +1100
-@@ -1,5 +1,5 @@
- /*
-- * Copyright (c) 2017-2019 Red Hat.
-+ * Copyright (c) 2017-2020 Red Hat.
-  *
-  * This library is free software; you can redistribute it and/or modify it
-  * under the terms of the GNU Lesser General Public License as published
-@@ -535,6 +535,8 @@ pmwebapi_metric_hash(metric_t *metric)
- 	sdsclear(identifier);
-     }
-     sdsfree(identifier);
-+
-+    metric->cached = 0;
- }
- 
- void
-@@ -574,6 +576,8 @@ pmwebapi_instance_hash(indom_t *ip, inst
-     SHA1Update(&shactx, (unsigned char *)identifier, sdslen(identifier));
-     SHA1Final(instance->name.hash, &shactx);
-     sdsfree(identifier);
-+
-+    instance->cached = 0;
- }
- 
- sds
-@@ -1046,7 +1050,6 @@ pmwebapi_add_instance(struct indom *indo
- 	    instance->name.sds = sdscatlen(instance->name.sds, name, length);
- 	    pmwebapi_string_hash(instance->name.id, name, length);
- 	    pmwebapi_instance_hash(indom, instance);
--	    instance->cached = 0;
- 	}
- 	return instance;
-     }
-@@ -1202,12 +1205,14 @@ struct metric *
- pmwebapi_add_metric(context_t *cp, const sds base, pmDesc *desc, int numnames, char **names)
- {
-     struct metric	*metric;
--    sds			name = sdsempty();
-+    sds			name;
-     int			i;
- 
-     /* search for a match on any of the given names */
-     if (base && (metric = dictFetchValue(cp->metrics, base)) != NULL)
- 	return metric;
-+
-+    name = sdsempty();
-     for (i = 0; i < numnames; i++) {
- 	sdsclear(name);
- 	name = sdscat(name, names[i]);
-@@ -1217,6 +1222,7 @@ pmwebapi_add_metric(context_t *cp, const
- 	}
-     }
-     sdsfree(name);
-+
-     return pmwebapi_new_metric(cp, base, desc, numnames, names);
- }
- 
-@@ -1230,21 +1236,24 @@ pmwebapi_new_pmid(context_t *cp, const s
-     int			sts, numnames;
- 
-     if ((sts = pmUseContext(cp->context)) < 0) {
--	fprintf(stderr, "failed to use context for PMID %s: %s",
-+	fprintf(stderr, "%s: failed to use context for PMID %s: %s\n",
-+		"pmwebapi_new_pmid",
- 		pmIDStr_r(pmid, buffer, sizeof(buffer)),
- 		pmErrStr_r(sts, errmsg, sizeof(errmsg)));
-     } else if ((sts = pmLookupDesc(pmid, &desc)) < 0) {
- 	if (sts == PM_ERR_IPC)
- 	    cp->setup = 0;
- 	if (pmDebugOptions.series)
--	    fprintf(stderr, "failed to lookup metric %s descriptor: %s",
-+	    fprintf(stderr, "%s: failed to lookup metric %s descriptor: %s\n",
-+		    "pmwebapi_new_pmid",
- 		    pmIDStr_r(pmid, buffer, sizeof(buffer)),
- 		    pmErrStr_r(sts, errmsg, sizeof(errmsg)));
-     } else if ((numnames = sts = pmNameAll(pmid, &names)) < 0) {
- 	if (sts == PM_ERR_IPC)
- 	    cp->setup = 0;
- 	if (pmDebugOptions.series)
--	    fprintf(stderr, "failed to lookup metric %s names: %s",
-+	    fprintf(stderr, "%s: failed to lookup metric %s names: %s\n",
-+		    "pmwebapi_new_pmid",
- 		    pmIDStr_r(pmid, buffer, sizeof(buffer)),
- 		    pmErrStr_r(sts, errmsg, sizeof(errmsg)));
-     } else {
-diff -Naurp pcp-5.0.2.orig/src/pmproxy/pmproxy.conf pcp-5.0.2/src/pmproxy/pmproxy.conf
---- pcp-5.0.2.orig/src/pmproxy/pmproxy.conf	2019-08-09 15:50:17.000000000 +1000
-+++ pcp-5.0.2/src/pmproxy/pmproxy.conf	2020-02-03 13:36:03.948721355 +1100
-@@ -43,6 +43,11 @@ secure.enabled = true
- # propogate archives from pmlogger(1) into Redis querying
- enabled = true
- 
-+# metrics name regex to skip during discovery (eg due to high volume)
-+exclude.metrics = proc.*
-+
-+# comma-separated list of instance domains to skip during discovery
-+exclude.indoms = 3.9,79.7
- 
- #####################################################################
- ## settings for fast, scalable time series quering via Redis
-diff -Naurp pcp-5.0.2.orig/src/pmproxy/src/redis.c pcp-5.0.2/src/pmproxy/src/redis.c
---- pcp-5.0.2.orig/src/pmproxy/src/redis.c	2019-12-02 16:39:33.000000000 +1100
-+++ pcp-5.0.2/src/pmproxy/src/redis.c	2020-02-03 13:36:13.585620539 +1100
-@@ -145,11 +145,11 @@ setup_redis_module(struct proxy *proxy)
- 	proxy->slots = redisSlotsConnect(proxy->config,
- 			flags, proxylog, on_redis_connected,
- 			proxy, proxy->events, proxy);
--	if (archive_discovery)
-+	if (archive_discovery && series_queries)
- 	    pmDiscoverSetSlots(&redis_discover.module, proxy->slots);
-     }
- 
--    if (archive_discovery) {
-+    if (archive_discovery && series_queries) {
- 	pmDiscoverSetEventLoop(&redis_discover.module, proxy->events);
- 	pmDiscoverSetConfiguration(&redis_discover.module, proxy->config);
- 	pmDiscoverSetMetricRegistry(&redis_discover.module, metric_registry);
diff --git a/SOURCES/multilib-pcp-devel.patch b/SOURCES/multilib-pcp-devel.patch
deleted file mode 100644
index 74bfaf3..0000000
--- a/SOURCES/multilib-pcp-devel.patch
+++ /dev/null
@@ -1,22 +0,0 @@
-commit 43cc36abff0fbaa5b5b434ca17b4b74f45dad98a
-Author: Nathan Scott <nathans@redhat.com>
-Date:   Thu Jan 9 16:01:18 2020 +1100
-
-    build: ensure generated demo Makefile matches on 32/64 Linux
-    
-    This corrects a multilib issue with the pcp-devel RPMs.
-    Resolves Red Hat bugzilla #1788119
-
-diff --git a/src/pmdas/trace/GNUmakefile b/src/pmdas/trace/GNUmakefile
-index b7087d017..b5e0589ad 100644
---- a/src/pmdas/trace/GNUmakefile
-+++ b/src/pmdas/trace/GNUmakefile
-@@ -101,7 +101,7 @@ MY_INC_DIR	= -I$(PCP_INC_DIR)/..
- else
- MY_INC_DIR	=
- endif
--ifneq "$(PCP_LIB_DIR)" "/usr/lib"
-+ifeq "$(findstring $(PCP_LIB_DIR), /usr/lib /usr/lib64)" ""
- # for ld add -L<run-time-lib-dir> and include -rpath when
- # $(PCP_LIB_DIR) may not be on the default ld search path.
- #
diff --git a/SOURCES/redhat-bugzilla-1541406.patch b/SOURCES/redhat-bugzilla-1541406.patch
new file mode 100644
index 0000000..9841486
--- /dev/null
+++ b/SOURCES/redhat-bugzilla-1541406.patch
@@ -0,0 +1,83 @@
+91dd4ae6b logutil: use $PCP_TMPFILE_DIR for intermediate pmlogger_merge archives
+b0c90d858 packaging: activate pmlogger_rewrite on upgrades
+b5e602187 packaging: revert pcp_archive_dir subsitution in build/rpm/GNUmakefile
+
+--- a/src/pmlogger/pmlogger_merge.sh	2018-06-18 16:24:25.000000000 +1000
++++ b/src/pmlogger/pmlogger_merge.sh	2020-06-11 13:10:57.401576513 +1000
+@@ -26,8 +26,9 @@
+ 
+ prog=`basename $0`
+ tmp=`mktemp -d /tmp/pcp.XXXXXXXXX` || exit 1
++tmpmerge=`mktemp -d $PCP_TMPFILE_DIR/pcp.XXXXXXXXX` || exit 1
+ status=0
+-trap "rm -rf $tmp; exit \$status" 0 1 2 3 15
++trap "rm -rf $tmp $tmpmerge; exit \$status" 0 1 2 3 15
+ 
+ force=false
+ VERBOSE=false
+@@ -229,8 +230,8 @@
+ 	    # output = 108 file descriptors which should be well below any
+ 	    # shell-imposed or system-imposed limits
+ 	    #
+-	    $VERBOSE && echo "		-> partial merge to $tmp/$part"
+-	    cmd="pmlogextract $list $tmp/$part"
++	    $VERBOSE && echo "		-> partial merge to $tmpmerge/$part"
++	    cmd="pmlogextract $list $tmpmerge/$part"
+ 	    if $SHOWME
+ 	    then
+ 		echo "+ $cmd"
+@@ -239,13 +240,13 @@
+ 		then
+ 		    :
+ 		else
+-		    $VERBOSE || echo "		-> partial merge to $tmp/$part"
++		    $VERBOSE || echo "		-> partial merge to $tmpmerge/$part"
+ 		    echo "$prog: Directory: `pwd`"
+-		    echo "$prog: Failed: pmlogextract $list $tmp/$part"
++		    echo "$prog: Failed: pmlogextract $list $tmpmerge/$part"
+ 		    _warning
+ 		fi
+ 	    fi
+-	    list=$tmp/$part
++	    list=$tmpmerge/$part
+ 	    part=`expr $part + 1`
+ 	    i=0
+ 	fi
+--- a/build/rpm/fedora.spec	2020-05-29 09:15:44.000000000 +1000
++++ b/build/rpm/fedora.spec	2020-06-11 13:10:57.402576528 +1000
+@@ -2814,6 +2814,7 @@
+ chown -R pcp:pcp %{_logsdir}/pmie 2>/dev/null
+ chown -R pcp:pcp %{_logsdir}/pmproxy 2>/dev/null
+ %{install_file "$PCP_PMNS_DIR" .NeedRebuild}
++%{install_file "$PCP_LOG_DIR/pmlogger" .NeedRewrite}
+ %if !%{disable_systemd}
+     %systemd_postun_with_restart pmcd.service
+     %systemd_post pmcd.service
+--- a/build/rpm/pcp.spec.in	2020-05-29 09:16:19.000000000 +1000
++++ b/build/rpm/pcp.spec.in	2020-06-11 13:10:57.402576528 +1000
+@@ -3149,6 +3149,7 @@
+ chown -R pcp:pcp "$PCP_LOG_DIR/pmie" 2>/dev/null
+ chown -R pcp:pcp "$PCP_LOG_DIR/pmproxy" 2>/dev/null
+ %{install_file "$PCP_PMNS_DIR" .NeedRebuild}
++%{install_file "$PCP_LOG_DIR/pmlogger" .NeedRewrite}
+ %if "@enable_systemd@" == "true"
+     %systemd_postun_with_restart pmcd.service
+     %systemd_post pmcd.service
+--- a/debian/pcp.postinst.tail	2019-06-13 09:59:16.000000000 +1000
++++ b/debian/pcp.postinst.tail	2020-06-11 13:10:57.402576528 +1000
+@@ -6,6 +6,8 @@
+ 
+ touch /var/lib/pcp/pmns/.NeedRebuild
+ chmod 644 /var/lib/pcp/pmns/.NeedRebuild
++touch /var/log/pcp/pmlogger/.NeedRewrite
++chmod 644 /var/log/pcp/pmlogger/.NeedRewrite
+ 
+ getent group pcp >/dev/null || groupadd -r pcp
+ getent passwd pcp >/dev/null || \
+--- a/debian/pcp.prerm	2017-08-17 10:54:50.000000000 +1000
++++ b/debian/pcp.prerm	2020-06-11 13:10:57.402576528 +1000
+@@ -24,3 +24,4 @@
+     fi
+ fi
+ rm -f /var/lib/pcp/pmns/.NeedRebuild
++rm -f /var/log/pcp/pmlogger/.NeedRewrite
diff --git a/SOURCES/redhat-bugzilla-1790433.patch b/SOURCES/redhat-bugzilla-1790433.patch
new file mode 100644
index 0000000..31643bd
--- /dev/null
+++ b/SOURCES/redhat-bugzilla-1790433.patch
@@ -0,0 +1,21 @@
+BZ 1790433 - Missing dependency of pcp-pmda-snmp on net-snmp-perl
+bc4abb291 pmdasnmp: add Install checks for the required Net::SNMP module
+
+diff --git a/src/pmdas/snmp/Install b/src/pmdas/snmp/Install
+index 7fe4193e4..21a76ab56 100755
+--- a/src/pmdas/snmp/Install
++++ b/src/pmdas/snmp/Install
+@@ -22,6 +22,13 @@ iam=snmp
+ perl_opt=true
+ daemon_opt=false
+ 
++perl -e "use Net::SNMP" 2>/dev/null
++if test $? -ne 0; then
++    echo "Net::SNMP (Simple Network Management Protocol) perl module is not installed"
++    status=1
++    exit
++fi
++
+ pmdaSetup
+ pmdaInstall
+ exit
diff --git a/SOURCES/redhat-bugzilla-1790452.patch b/SOURCES/redhat-bugzilla-1790452.patch
new file mode 100644
index 0000000..ef28a8f
--- /dev/null
+++ b/SOURCES/redhat-bugzilla-1790452.patch
@@ -0,0 +1,45 @@
+BZ 1790452 - Installation of pcp-pmda-samba causes SELinux issues
+73772a60f selinux: fix pmdasamba(1) operating with selinux enforcing
+
+--- a/qa/917.out.in	2020-05-19 20:34:46.000000000 +1000
++++ pcp-5.1.1/qa/917.out.in	2020-06-22 17:29:14.346713826 +1000
+@@ -34,6 +34,8 @@
+ ! allow [pcp_pmcd_t] [unreserved_port_t] : [tcp_socket] { name_bind name_connect };
+ ! allow [pcp_pmcd_t] [unreserved_port_t] : [udp_socket] { name_bind };
+ ! allow [pcp_pmlogger_t] [unreserved_port_t] : [tcp_socket] { name_bind };
++  allow [pcp_pmcd_t] [samba_var_t] : [dir] { add_name write };
++  allow [pcp_pmcd_t] [samba_var_t] : [file] { create };
+   allow [pcp_pmcd_t] [websm_port_t] : [tcp_socket] { name_connect };
+ ! allow [pcp_pmcd_t] [pcp_tmp_t] : [file] { execute execute_no_trans map };
+   allow [pcp_pmcd_t] [hostname_exec_t] : [file] { execute execute_no_trans getattr open read };
+--- a/src/pmdas/samba/pmdasamba.pl	2020-02-04 14:51:57.000000000 +1100
++++ pcp-5.1.1/src/pmdas/samba/pmdasamba.pl	2020-06-22 17:29:14.346713826 +1000
+@@ -41,6 +41,7 @@
+ 	$pmda->err("pmdasamba failed to open $smbstats pipe: $!");
+ 
+     while (<STATS>) {
++	$_ =~ s/"//g;
+ 	if (m/^\*\*\*\*\s+(\w+[^*]*)\**$/) {
+ 	    my $heading = $1;
+ 	    $heading =~ s/ +$//g;
+--- a/src/selinux/pcpupstream.te.in	2020-05-19 20:34:32.000000000 +1000
++++ pcp-5.1.1/src/selinux/pcpupstream.te.in	2020-06-22 17:29:14.347713837 +1000
+@@ -22,6 +22,7 @@
+ 	type pcp_pmie_exec_t; # pmda.summary
+ 	type ping_exec_t; # pmda.netcheck
+ 	type openvswitch_exec_t; # pmda.openvswitch
++	type samba_var_t; # pmda.samba
+ 	type websm_port_t; # pmda.openmetrics
+         type system_cronjob_t;
+         type user_home_t;
+@@ -151,6 +152,10 @@
+ #type=AVC msg=audit(YYY.94): avc: denied { name_bind } for pid=9365 comm=pmlogger src=4332 scontext=system_u:system_r:pcp_pmlogger_t:s0 tcontext=system_u:object_r:unreserved_port_t:s0 tclass=tcp_socket permissive=0
+ @PCP_UNRESERVED_PORT_RULE_PMLOGGER@
+ 
++#type=AVC msg=audit(YYY.97): avc: denied { write } for pid=3507787 comm="smbstatus" name="msg.lock" dev="dm-0" ino=283321 scontext=system_u:system_r:pcp_pmcd_t:s0 tcontext=system_u:object_r:samba_var_t:s0 tclass=dir permissive=0
++allow pcp_pmcd_t samba_var_t:dir { add_name write }; # pmda.samba
++allow pcp_pmcd_t samba_var_t:file { create }; # pmda.samba
++
+ #type=AVC msg=audit(YYY.15): avc:  denied  { name_connect } for  pid=13816 comm="python3" dest=9090 scontext=system_u:system_r:pcp_pmcd_t:s0 tcontext=system_u:object_r:websm_port_t:s0 tclass=tcp_socket permissive=0
+ allow pcp_pmcd_t websm_port_t:tcp_socket name_connect; # pmda.openmetrics
+ 
diff --git a/SOURCES/redhat-bugzilla-1792971.patch b/SOURCES/redhat-bugzilla-1792971.patch
new file mode 100644
index 0000000..37e6e69
--- /dev/null
+++ b/SOURCES/redhat-bugzilla-1792971.patch
@@ -0,0 +1,1351 @@
+5af58c8af pmdastatsd: fix minor sizeof issues found by Coverity scan
+b3f78dc82 pmlogconf: fix resource leak found by coverity scan
+8a3ed1b26 pmdastatsd: initialize stack variable to keep Coverity happy
+6902959e5 pmdastatsd: fix Coverity LOCK issues on error paths
+548cad8c5 libpcp_web: ensure context is freed only after timer is fully closed
+01e8bb436 services: pmlogger and pmie services want pmcd on boot
+20959e794 Fix of 1845241 - Intermittent pmlogconf core dumps
+32d6febf4 pcp-atop: resolve other paths of potential null task pointer dereference
+cda567efe pmproxy: improve diagnostics, particularly relating to http requests
+e0bb9e66c pmproxy: cleanup, remove unused flags and dead code in http encoding
+9da331eb8 pmproxy: support the OPTIONS protocol in HTTP 1.1
+1d84081af libpcp_web: add resilience to descriptor lookup paths
+
+--- a/src/pmdas/statsd/src/aggregator-metric-duration-exact.c	2019-08-21 11:33:26.000000000 +1000
++++ b/src/pmdas/statsd/src/aggregator-metric-duration-exact.c	2020-06-11 13:10:57.393576397 +1000
+@@ -45,7 +45,7 @@
+     double** new_values = realloc(collection->values, sizeof(double*) * new_length);
+     ALLOC_CHECK("Unable to allocate memory for collection value.");
+     collection->values = new_values;
+-    collection->values[collection->length] = (double*) malloc(sizeof(double*));
++    collection->values[collection->length] = (double*) malloc(sizeof(double));
+     ALLOC_CHECK("Unable to allocate memory for duration collection value.");
+     *(collection->values[collection->length]) = value;
+     collection->length = new_length;
+--- a/src/pmdas/statsd/src/aggregator-metric-labels.c	2020-02-18 16:32:40.000000000 +1100
++++ b/src/pmdas/statsd/src/aggregator-metric-labels.c	2020-06-11 13:10:57.393576397 +1000
+@@ -140,7 +140,7 @@
+ 
+ static char*
+ create_instance_label_segment_str(char* tags) {
+-    char buffer[JSON_BUFFER_SIZE];
++    char buffer[JSON_BUFFER_SIZE] = {'\0'};
+     size_t tags_length = strlen(tags) + 1;
+     if (tags_length > JSON_BUFFER_SIZE) {
+         return NULL;
+@@ -197,7 +197,7 @@
+     ALLOC_CHECK("Unable to allocate memory for labels string in metric label record.");
+     memcpy((*out)->labels, datagram->tags, labels_length);
+     struct metric_label_metadata* meta = 
+-        (struct metric_label_metadata*) malloc(sizeof(struct metric_label_metadata*));
++        (struct metric_label_metadata*) malloc(sizeof(struct metric_label_metadata));
+     ALLOC_CHECK("Unable to allocate memory for metric label metadata.");
+     (*out)->meta = meta;
+     (*out)->type = METRIC_TYPE_NONE;
+--- a/src/pmdas/statsd/src/network-listener.c	2019-08-27 11:09:16.000000000 +1000
++++ b/src/pmdas/statsd/src/network-listener.c	2020-06-11 13:10:57.393576397 +1000
+@@ -68,7 +68,7 @@
+     struct timeval tv;
+     freeaddrinfo(res);
+     int max_udp_packet_size = config->max_udp_packet_size;
+-    char *buffer = (char *) malloc(max_udp_packet_size * sizeof(char*));
++    char *buffer = (char *) malloc(max_udp_packet_size * sizeof(char));
+     struct sockaddr_storage src_addr;
+     socklen_t src_addr_len = sizeof(src_addr);
+     int rv;
+--- a/src/pmlogconf/pmlogconf.c	2020-05-23 13:33:27.000000000 +1000
++++ b/src/pmlogconf/pmlogconf.c	2020-06-11 13:10:57.394576411 +1000
+@@ -735,7 +735,7 @@
+ static int
+ evaluate_number_values(group_t *group, int type, numeric_cmp_t compare)
+ {
+-    unsigned int	i, found;
++    int			i, found;
+     pmValueSet		*vsp;
+     pmValue		*vp;
+     pmAtomValue		atom;
+@@ -769,7 +769,7 @@
+ static int
+ evaluate_string_values(group_t *group, string_cmp_t compare)
+ {
+-    unsigned int	i, found;
++    int			i, found;
+     pmValueSet		*vsp;
+     pmValue		*vp;
+     pmAtomValue		atom;
+@@ -828,7 +828,7 @@
+ static int
+ evaluate_string_regexp(group_t *group, regex_cmp_t compare)
+ {
+-    unsigned int	i, found;
++    int			i, found;
+     pmValueSet		*vsp;
+     pmValue		*vp;
+     pmAtomValue		atom;
+@@ -1478,6 +1478,10 @@
+ 	} else if (strncmp("#+ groupdir ", bytes, 12) == 0) {
+ 	    group_dircheck(bytes + 12);
+ 	} else if (strncmp("#+ ", bytes, 3) == 0) {
++	    if (group) {
++		/* reported by COVERITY RESOURCE LEAK */
++	    	group_free(group);
++	    }
+ 	    group = group_create(bytes + 3, line);
+ 	    head = 0;
+ 	} else if (group) {
+--- a/src/pmdas/statsd/src/aggregator-metrics.c	2020-02-18 16:32:40.000000000 +1100
++++ b/src/pmdas/statsd/src/aggregator-metrics.c	2020-06-11 13:10:57.394576411 +1000
+@@ -212,7 +212,10 @@
+     VERBOSE_LOG(0, "Writing metrics to file...");
+     pthread_mutex_lock(&container->mutex);
+     metrics* m = container->metrics;
+-    if (strlen(config->debug_output_filename) == 0) return; 
++    if (strlen(config->debug_output_filename) == 0) {
++        pthread_mutex_unlock(&container->mutex);
++        return; 
++    }
+     int sep = pmPathSeparator();
+     char debug_output[MAXPATHLEN];
+     pmsprintf(
+--- a/src/pmdas/statsd/src/aggregator-stats.c	2020-02-18 16:32:40.000000000 +1100
++++ b/src/pmdas/statsd/src/aggregator-stats.c	2020-06-11 13:10:57.394576411 +1000
+@@ -141,7 +141,10 @@
+ write_stats_to_file(struct agent_config* config, struct pmda_stats_container* stats) {
+     VERBOSE_LOG(0, "Writing stats to file...");
+     pthread_mutex_lock(&stats->mutex);
+-    if (strlen(config->debug_output_filename) == 0) return; 
++    if (strlen(config->debug_output_filename) == 0) {
++        pthread_mutex_unlock(&stats->mutex);
++        return; 
++    }
+     int sep = pmPathSeparator();
+     char debug_output[MAXPATHLEN];
+     pmsprintf(
+--- a/src/libpcp_web/src/webgroup.c	2020-05-22 11:29:27.000000000 +1000
++++ b/src/libpcp_web/src/webgroup.c	2020-06-11 13:10:57.394576411 +1000
+@@ -56,17 +56,28 @@
+ }
+ 
+ static void
++webgroup_release_context(uv_handle_t *handle)
++{
++    struct context	*context = (struct context *)handle->data;
++
++    if (pmDebugOptions.http)
++	fprintf(stderr, "releasing context %p\n", context);
++
++    pmwebapi_free_context(context);
++}
++
++static void
+ webgroup_destroy_context(struct context *context, struct webgroups *groups)
+ {
+     context->garbage = 1;
+ 
+     if (pmDebugOptions.http)
+-	fprintf(stderr, "freeing context %p\n", context);
++	fprintf(stderr, "destroying context %p\n", context);
+ 
+     uv_timer_stop(&context->timer);
+     if (groups)
+ 	dictUnlink(groups->contexts, &context->randomid);
+-    pmwebapi_free_context(context);
++    uv_close((uv_handle_t *)&context->timer, webgroup_release_context);
+ }
+ 
+ static void
+--- a/src/pmie/pmie.service.in	2020-05-27 13:36:47.000000000 +1000
++++ b/src/pmie/pmie.service.in	2020-06-11 13:10:57.394576411 +1000
+@@ -4,6 +4,7 @@
+ After=network-online.target pmcd.service
+ After=pmie_check.timer pmie_check.path pmie_daily.timer
+ BindsTo=pmie_check.timer pmie_check.path pmie_daily.timer
++Wants=pmcd.service
+ 
+ [Service]
+ Type=notify
+--- a/src/pmlogger/pmlogger.service.in	2020-05-22 16:48:32.000000000 +1000
++++ b/src/pmlogger/pmlogger.service.in	2020-06-11 13:10:57.394576411 +1000
+@@ -4,6 +4,7 @@
+ After=network-online.target pmcd.service
+ After=pmlogger_check.timer pmlogger_check.path pmlogger_daily.timer pmlogger_daily-poll.timer
+ BindsTo=pmlogger_check.timer pmlogger_check.path pmlogger_daily.timer pmlogger_daily-poll.timer
++Wants=pmcd.service
+ 
+ [Service]
+ Type=notify
+--- a/src/pcp/atop/showgeneric.c	2020-03-30 12:13:55.000000000 +1100
++++ b/src/pcp/atop/showgeneric.c	2020-06-11 13:10:57.395576426 +1000
+@@ -2024,6 +2024,9 @@
+ 	*/
+ 	for (numusers=i=0; i < numprocs; i++, curprocs++)
+ 	{
++	        if (*curprocs == NULL)
++		        continue;
++		
+ 		if (procsuppress(*curprocs, &procsel))
+ 			continue;
+ 
+@@ -2069,6 +2072,9 @@
+ 	*/
+ 	for (numprogs=i=0; i < numprocs; i++, curprocs++)
+ 	{
++	        if (*curprocs == NULL)
++		        continue;
++		
+ 		if (procsuppress(*curprocs, &procsel))
+ 			continue;
+ 
+@@ -2112,6 +2118,9 @@
+ 	*/
+ 	for (numconts=i=0; i < numprocs; i++, curprocs++)
+ 	{
++	        if (*curprocs == NULL)
++		        continue;
++		
+ 		if (procsuppress(*curprocs, &procsel))
+ 			continue;
+ 
+--- a/src/libpcp_web/src/exports	2020-05-22 15:38:47.000000000 +1000
++++ b/src/libpcp_web/src/exports	2020-06-11 13:10:57.397576455 +1000
+@@ -189,3 +189,14 @@
+     pmWebGroupDestroy;
+     sdsKeyDictCallBacks;
+ } PCP_WEB_1.12;
++
++PCP_WEB_1.14 {
++  global:
++    dictFetchValue;
++    http_method_str;
++    http_body_is_final;
++    http_parser_version;
++    http_parser_url_init;
++    http_parser_parse_url;
++    http_parser_settings_init;
++} PCP_WEB_1.13;
+--- a/src/pmproxy/src/http.c	2020-03-23 09:47:47.000000000 +1100
++++ b/src/pmproxy/src/http.c	2020-06-11 13:10:57.398576470 +1000
+@@ -21,6 +21,18 @@
+ static int chunked_transfer_size; /* pmproxy.chunksize, pagesize by default */
+ static int smallest_buffer_size = 128;
+ 
++#define MAX_PARAMS_SIZE 4096
++#define MAX_HEADERS_SIZE 128
++
++static sds HEADER_ACCESS_CONTROL_REQUEST_HEADERS,
++	   HEADER_ACCESS_CONTROL_REQUEST_METHOD,
++	   HEADER_ACCESS_CONTROL_ALLOW_METHODS,
++	   HEADER_ACCESS_CONTROL_ALLOW_HEADERS,
++	   HEADER_ACCESS_CONTROL_ALLOW_ORIGIN,
++	   HEADER_ACCESS_CONTROL_ALLOWED_HEADERS,
++	   HEADER_CONNECTION, HEADER_CONTENT_LENGTH,
++	   HEADER_ORIGIN, HEADER_WWW_AUTHENTICATE;
++
+ /*
+  * Simple helpers to manage the cumulative addition of JSON
+  * (arrays and/or objects) to a buffer.
+@@ -121,45 +133,9 @@
+ 	return "text/html";
+     if (flags & HTTP_FLAG_TEXT)
+ 	return "text/plain";
+-    if (flags & HTTP_FLAG_JS)
+-	return "text/javascript";
+-    if (flags & HTTP_FLAG_CSS)
+-	return "text/css";
+-    if (flags & HTTP_FLAG_ICO)
+-	return "image/x-icon";
+-    if (flags & HTTP_FLAG_JPG)
+-	return "image/jpeg";
+-    if (flags & HTTP_FLAG_PNG)
+-	return "image/png";
+-    if (flags & HTTP_FLAG_GIF)
+-	return "image/gif";
+     return "application/octet-stream";
+ }
+ 
+-http_flags
+-http_suffix_type(const char *suffix)
+-{
+-    if (strcmp(suffix, "js") == 0)
+-	return HTTP_FLAG_JS;
+-    if (strcmp(suffix, "ico") == 0)
+-	return HTTP_FLAG_ICO;
+-    if (strcmp(suffix, "css") == 0)
+-	return HTTP_FLAG_CSS;
+-    if (strcmp(suffix, "png") == 0)
+-	return HTTP_FLAG_PNG;
+-    if (strcmp(suffix, "gif") == 0)
+-	return HTTP_FLAG_GIF;
+-    if (strcmp(suffix, "jpg") == 0)
+-	return HTTP_FLAG_JPG;
+-    if (strcmp(suffix, "jpeg") == 0)
+-	return HTTP_FLAG_JPG;
+-    if (strcmp(suffix, "html") == 0)
+-	return HTTP_FLAG_HTML;
+-    if (strcmp(suffix, "txt") == 0)
+-	return HTTP_FLAG_TEXT;
+-    return 0;
+-}
+-
+ static const char * const
+ http_content_encoding(http_flags flags)
+ {
+@@ -259,26 +235,28 @@
+ 
+     header = sdscatfmt(sdsempty(),
+ 		"HTTP/%u.%u %u %s\r\n"
+-		"Connection: Keep-Alive\r\n"
+-		"Access-Control-Allow-Origin: *\r\n"
+-		"Access-Control-Allow-Headers: Accept, Accept-Language, Content-Language, Content-Type\r\n",
++		"%S: Keep-Alive\r\n",
+ 		parser->http_major, parser->http_minor,
+-		sts, http_status_mapping(sts));
++		sts, http_status_mapping(sts), HEADER_CONNECTION);
++    header = sdscatfmt(header,
++		"%S: *\r\n"
++		"%S: %S\r\n",
++		HEADER_ACCESS_CONTROL_ALLOW_ORIGIN,
++		HEADER_ACCESS_CONTROL_ALLOW_HEADERS,
++		HEADER_ACCESS_CONTROL_ALLOWED_HEADERS);
+ 
+     if (sts == HTTP_STATUS_UNAUTHORIZED && client->u.http.realm)
+-	header = sdscatfmt(header, "WWW-Authenticate: Basic realm=\"%S\"\r\n",
+-				client->u.http.realm);
++	header = sdscatfmt(header, "%S: Basic realm=\"%S\"\r\n",
++				HEADER_WWW_AUTHENTICATE, client->u.http.realm);
+ 
+-    if ((flags & HTTP_FLAG_STREAMING))
+-	header = sdscatfmt(header, "Transfer-encoding: %s\r\n", "chunked");
+-
+-    if (!(flags & HTTP_FLAG_STREAMING))
+-	header = sdscatfmt(header, "Content-Length: %u\r\n", length);
++    if ((flags & (HTTP_FLAG_STREAMING | HTTP_FLAG_NO_BODY)))
++	header = sdscatfmt(header, "Transfer-encoding: chunked\r\n");
++    else
++	header = sdscatfmt(header, "%S: %u\r\n", HEADER_CONTENT_LENGTH, length);
+ 
+-    header = sdscatfmt(header,
+-		"Content-Type: %s%s\r\n"
+-		"Date: %s\r\n\r\n",
+-		http_content_type(flags), http_content_encoding(flags),
++    header = sdscatfmt(header, "Content-Type: %s%s\r\n",
++		http_content_type(flags), http_content_encoding(flags));
++    header = sdscatfmt(header, "Date: %s\r\n\r\n",
+ 		http_date_string(time(NULL), date, sizeof(date)));
+ 
+     if (pmDebugOptions.http && pmDebugOptions.desperate) {
+@@ -288,8 +266,130 @@
+     return header;
+ }
+ 
++static sds
++http_header_value(struct client *client, sds header)
++{
++    if (client->u.http.headers == NULL)
++	return NULL;
++    return (sds)dictFetchValue(client->u.http.headers, header);
++}
++
++static sds
++http_headers_allowed(sds headers)
++{
++    (void)headers;
++    return sdsdup(HEADER_ACCESS_CONTROL_ALLOWED_HEADERS);
++}
++
++/* check whether the (preflight) method being proposed is acceptable */
++static int
++http_method_allowed(sds value, http_options options)
++{
++    if (strcmp(value, "GET") == 0 && (options & HTTP_OPT_GET))
++	return 1;
++    if (strcmp(value, "PUT") == 0 && (options & HTTP_OPT_PUT))
++	return 1;
++    if (strcmp(value, "POST") == 0 && (options & HTTP_OPT_POST))
++	return 1;
++    if (strcmp(value, "HEAD") == 0 && (options & HTTP_OPT_HEAD))
++	return 1;
++    if (strcmp(value, "TRACE") == 0 && (options & HTTP_OPT_TRACE))
++	return 1;
++    return 0;
++}
++
++static char *
++http_methods_string(char *buffer, size_t length, http_options options)
++{
++    char		*p = buffer;
++
++    /* ensure room for all options, spaces and comma separation */
++    if (!options || length < 48)
++	return NULL;
++
++    memset(buffer, 0, length);
++    if (options & HTTP_OPT_GET)
++	strcat(p, ", GET");
++    if (options & HTTP_OPT_PUT)
++	strcat(p, ", PUT");
++    if (options & HTTP_OPT_HEAD)
++	strcat(p, ", HEAD");
++    if (options & HTTP_OPT_POST)
++	strcat(p, ", POST");
++    if (options & HTTP_OPT_TRACE)
++	strcat(p, ", TRACE");
++    if (options & HTTP_OPT_OPTIONS)
++	strcat(p, ", OPTIONS");
++    return p + 2; /* skip leading comma+space */
++}
++
++static sds
++http_response_trace(struct client *client)
++{
++    dictIterator	*iterator;
++    dictEntry		*entry;
++    sds			result = sdsempty();
++
++    iterator = dictGetSafeIterator(client->u.http.headers);
++    while ((entry = dictNext(iterator)) != NULL)
++	result = sdscatfmt("%S: %S\r\n", dictGetKey(entry), dictGetVal(entry));
++    dictReleaseIterator(iterator);
++    return result;
++}
++
++static sds
++http_response_access(struct client *client, http_code sts, http_options options)
++{
++    struct http_parser	*parser = &client->u.http.parser;
++    char		buffer[64];
++    sds			header, value, result;
++
++    value = http_header_value(client, HEADER_ACCESS_CONTROL_REQUEST_METHOD);
++    if (value && http_method_allowed(value, options) == 0)
++	sts = HTTP_STATUS_METHOD_NOT_ALLOWED;
++
++    parser->http_major = parser->http_minor = 1;
++
++    header = sdscatfmt(sdsempty(),
++		"HTTP/%u.%u %u %s\r\n"
++		"%S: Keep-Alive\r\n",
++		parser->http_major, parser->http_minor,
++		sts, http_status_mapping(sts), HEADER_CONNECTION);
++    header = sdscatfmt(header, "%S: %u\r\n", HEADER_CONTENT_LENGTH, 0);
++
++    if (sts >= HTTP_STATUS_OK && sts < HTTP_STATUS_BAD_REQUEST) {
++	if ((value = http_header_value(client, HEADER_ORIGIN)))
++	    header = sdscatfmt(header, "%S: %S\r\n",
++			        HEADER_ACCESS_CONTROL_ALLOW_ORIGIN, value);
++
++	header = sdscatfmt(header, "%S: %s\r\n",
++			    HEADER_ACCESS_CONTROL_ALLOW_METHODS,
++			    http_methods_string(buffer, sizeof(buffer), options));
++
++	value = http_header_value(client, HEADER_ACCESS_CONTROL_REQUEST_HEADERS);
++	if (value && (result = http_headers_allowed(value)) != NULL) {
++	    header = sdscatfmt(header, "%S: %S\r\n",
++				HEADER_ACCESS_CONTROL_ALLOW_HEADERS, result);
++	    sdsfree(result);
++	}
++    }
++    if (sts == HTTP_STATUS_UNAUTHORIZED && client->u.http.realm)
++	header = sdscatfmt(header, "%S: Basic realm=\"%S\"\r\n",
++			    HEADER_WWW_AUTHENTICATE, client->u.http.realm);
++
++    header = sdscatfmt(header, "Date: %s\r\n\r\n",
++		http_date_string(time(NULL), buffer, sizeof(buffer)));
++
++    if (pmDebugOptions.http && pmDebugOptions.desperate) {
++	fprintf(stderr, "access response to client %p\n", client);
++	fputs(header, stderr);
++    }
++    return header;
++}
++
+ void
+-http_reply(struct client *client, sds message, http_code sts, http_flags type)
++http_reply(struct client *client, sds message,
++		http_code sts, http_flags type, http_options options)
+ {
+     http_flags		flags = client->u.http.flags;
+     char		length[32]; /* hex length */
+@@ -313,6 +413,15 @@
+ 
+ 	suffix = sdsnewlen("0\r\n\r\n", 5);		/* chunked suffix */
+ 	client->u.http.flags &= ~HTTP_FLAG_STREAMING;	/* end of stream! */
++
++    } else if (flags & HTTP_FLAG_NO_BODY) {
++	if (client->u.http.parser.method == HTTP_OPTIONS)
++	    buffer = http_response_access(client, sts, options);
++	else if (client->u.http.parser.method == HTTP_TRACE)
++	    buffer = http_response_trace(client);
++	else	/* HTTP_HEAD */
++	    buffer = http_response_header(client, 0, sts, type);
++	suffix = NULL;
+     } else {	/* regular non-chunked response - headers + response body */
+ 	if (client->buffer == NULL) {
+ 	    suffix = message;
+@@ -326,10 +435,11 @@
+ 	buffer = http_response_header(client, sdslen(suffix), sts, type);
+     }
+ 
+-    if (pmDebugOptions.http) {
+-	fprintf(stderr, "HTTP response (client=%p)\n%s%s",
+-			client, buffer, suffix);
+-    }
++    if (pmDebugOptions.http)
++	fprintf(stderr, "HTTP %s response (client=%p)\n%s%s",
++			http_method_str(client->u.http.parser.method),
++			client, buffer, suffix ? suffix : "");
++
+     client_write(client, buffer, suffix);
+ }
+ 
+@@ -363,7 +473,7 @@
+ 	if (pmDebugOptions.desperate)
+ 	    fputs(message, stderr);
+     }
+-    http_reply(client, message, status, HTTP_FLAG_HTML);
++    http_reply(client, message, status, HTTP_FLAG_HTML, 0);
+ }
+ 
+ void
+@@ -371,6 +481,7 @@
+ {
+     struct http_parser	*parser = &client->u.http.parser;
+     http_flags		flags = client->u.http.flags;
++    const char		*method;
+     sds			buffer, suffix;
+ 
+     /* If the client buffer length is now beyond a set maximum size,
+@@ -390,16 +501,18 @@
+ 		buffer = sdsempty();
+ 	    }
+ 	    /* prepend a chunked transfer encoding message length (hex) */
+-	    buffer = sdscatprintf(buffer, "%lX\r\n", (unsigned long)sdslen(client->buffer));
++	    buffer = sdscatprintf(buffer, "%lX\r\n",
++				 (unsigned long)sdslen(client->buffer));
+ 	    suffix = sdscatfmt(client->buffer, "\r\n");
+ 	    /* reset for next call - original released on I/O completion */
+ 	    client->buffer = NULL;	/* safe, as now held in 'suffix' */
+ 
+ 	    if (pmDebugOptions.http) {
+-		fprintf(stderr, "HTTP chunked buffer (client %p, len=%lu)\n%s"
+-				"HTTP chunked suffix (client %p, len=%lu)\n%s",
+-				client, (unsigned long)sdslen(buffer), buffer,
+-				client, (unsigned long)sdslen(suffix), suffix);
++		method = http_method_str(client->u.http.parser.method);
++		fprintf(stderr, "HTTP %s chunk buffer (client %p, len=%lu)\n%s"
++				"HTTP %s chunk suffix (client %p, len=%lu)\n%s",
++			method, client, (unsigned long)sdslen(buffer), buffer,
++			method, client, (unsigned long)sdslen(suffix), suffix);
+ 	    }
+ 	    client_write(client, buffer, suffix);
+ 
+@@ -527,6 +640,8 @@
+ 
+     if (length == 0)
+ 	return NULL;
++    if (length > MAX_PARAMS_SIZE)
++	return NULL;
+     for (p = url; p < end; p++) {
+ 	if (*p == '\0')
+ 	    break;
+@@ -558,6 +673,11 @@
+     struct servlet	*servlet;
+     sds			url;
+ 
++    if (pmDebugOptions.http || pmDebugOptions.appl0)
++	fprintf(stderr, "HTTP %s %.*s\n",
++			http_method_str(client->u.http.parser.method),
++			(int)length, offset);
++
+     if (!(url = http_url_decode(offset, length, &client->u.http.parameters)))
+ 	return NULL;
+     for (servlet = proxy->servlets; servlet != NULL; servlet = servlet->next) {
+@@ -576,13 +696,24 @@
+ {
+     struct client	*client = (struct client *)request->data;
+     struct servlet	*servlet;
++    sds			buffer;
+     int			sts;
+ 
+     http_client_release(client);	/* new URL, clean slate */
+-
+-    if ((servlet = servlet_lookup(client, offset, length)) != NULL) {
++    /* server options - https://tools.ietf.org/html/rfc7231#section-4.3.7 */
++    if (length == 1 && *offset == '*' &&
++	client->u.http.parser.method == HTTP_OPTIONS) {
++	buffer = http_response_access(client, HTTP_STATUS_OK, HTTP_SERVER_OPTIONS);
++	client_write(client, buffer, NULL);
++    } else if ((servlet = servlet_lookup(client, offset, length)) != NULL) {
+ 	client->u.http.servlet = servlet;
+ 	if ((sts = client->u.http.parser.status_code) == 0) {
++	    if (client->u.http.parser.method == HTTP_OPTIONS ||
++		client->u.http.parser.method == HTTP_TRACE ||
++		client->u.http.parser.method == HTTP_HEAD)
++		client->u.http.flags |= HTTP_FLAG_NO_BODY;
++	    else
++		client->u.http.flags &= ~HTTP_FLAG_NO_BODY;
+ 	    client->u.http.headers = dictCreate(&sdsOwnDictCallBacks, NULL);
+ 	    return 0;
+ 	}
+@@ -616,6 +747,11 @@
+ 
+     if (client->u.http.parser.status_code || !client->u.http.headers)
+ 	return 0;	/* already in process of failing connection */
++    if (dictSize(client->u.http.headers) >= MAX_HEADERS_SIZE) {
++	client->u.http.parser.status_code =
++		HTTP_STATUS_REQUEST_HEADER_FIELDS_TOO_LARGE;
++	return 0;
++    }
+ 
+     field = sdsnewlen(offset, length);
+     if (pmDebugOptions.http)
+@@ -826,6 +962,17 @@
+     if (chunked_transfer_size < smallest_buffer_size)
+ 	chunked_transfer_size = smallest_buffer_size;
+ 
++    HEADER_ACCESS_CONTROL_REQUEST_HEADERS = sdsnew("Access-Control-Request-Headers");
++    HEADER_ACCESS_CONTROL_REQUEST_METHOD = sdsnew("Access-Control-Request-Method");
++    HEADER_ACCESS_CONTROL_ALLOW_METHODS = sdsnew("Access-Control-Allow-Methods");
++    HEADER_ACCESS_CONTROL_ALLOW_HEADERS = sdsnew("Access-Control-Allow-Headers");
++    HEADER_ACCESS_CONTROL_ALLOW_ORIGIN = sdsnew("Access-Control-Allow-Origin");
++    HEADER_ACCESS_CONTROL_ALLOWED_HEADERS = sdsnew("Accept, Accept-Language, Content-Language, Content-Type");
++    HEADER_CONNECTION = sdsnew("Connection");
++    HEADER_CONTENT_LENGTH = sdsnew("Content-Length");
++    HEADER_ORIGIN = sdsnew("Origin");
++    HEADER_WWW_AUTHENTICATE = sdsnew("WWW-Authenticate");
++
+     register_servlet(proxy, &pmseries_servlet);
+     register_servlet(proxy, &pmwebapi_servlet);
+ }
+@@ -839,4 +986,15 @@
+ 	servlet->close(proxy);
+ 
+     proxymetrics_close(proxy, METRICS_HTTP);
++
++    sdsfree(HEADER_ACCESS_CONTROL_REQUEST_HEADERS);
++    sdsfree(HEADER_ACCESS_CONTROL_REQUEST_METHOD);
++    sdsfree(HEADER_ACCESS_CONTROL_ALLOW_METHODS);
++    sdsfree(HEADER_ACCESS_CONTROL_ALLOW_HEADERS);
++    sdsfree(HEADER_ACCESS_CONTROL_ALLOW_ORIGIN);
++    sdsfree(HEADER_ACCESS_CONTROL_ALLOWED_HEADERS);
++    sdsfree(HEADER_CONNECTION);
++    sdsfree(HEADER_CONTENT_LENGTH);
++    sdsfree(HEADER_ORIGIN);
++    sdsfree(HEADER_WWW_AUTHENTICATE);
+ }
+--- a/src/pmproxy/src/series.c	2020-02-25 17:47:56.000000000 +1100
++++ b/src/pmproxy/src/series.c	2020-06-11 13:10:57.398576470 +1000
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright (c) 2019 Red Hat.
++ * Copyright (c) 2019-2020 Red Hat.
+  *
+  * This program is free software; you can redistribute it and/or modify it
+  * under the terms of the GNU Lesser General Public License as published
+@@ -15,8 +15,7 @@
+ #include <assert.h>
+ 
+ typedef enum pmSeriesRestKey {
+-    RESTKEY_NONE	= 0,
+-    RESTKEY_SOURCE,
++    RESTKEY_SOURCE	= 1,
+     RESTKEY_DESC,
+     RESTKEY_INSTS,
+     RESTKEY_LABELS,
+@@ -29,7 +28,8 @@
+ 
+ typedef struct pmSeriesRestCommand {
+     const char		*name;
+-    unsigned int	size;
++    unsigned int	namelen : 16;
++    unsigned int	options : 16;
+     pmSeriesRestKey	key;
+ } pmSeriesRestCommand;
+ 
+@@ -39,7 +39,8 @@
+     pmSeriesFlags	flags;
+     pmSeriesTimeWindow	window;
+     uv_work_t		loading;
+-    unsigned int	working;
++    unsigned int	working : 1;
++    unsigned int	options : 16;
+     int			nsids;
+     pmSID		*sids;
+     pmSID		sid;
+@@ -55,16 +56,25 @@
+ } pmSeriesBaton;
+ 
+ static pmSeriesRestCommand commands[] = {
+-    { .key = RESTKEY_QUERY, .name = "query", .size = sizeof("query")-1 },
+-    { .key = RESTKEY_DESC,  .name = "descs",  .size = sizeof("descs")-1 },
+-    { .key = RESTKEY_INSTS, .name = "instances", .size = sizeof("instances")-1 },
+-    { .key = RESTKEY_LABELS, .name = "labels", .size = sizeof("labels")-1 },
+-    { .key = RESTKEY_METRIC, .name = "metrics", .size = sizeof("metrics")-1 },
+-    { .key = RESTKEY_SOURCE, .name = "sources", .size = sizeof("sources")-1 },
+-    { .key = RESTKEY_VALUES, .name = "values", .size = sizeof("values")-1 },
+-    { .key = RESTKEY_LOAD, .name = "load", .size = sizeof("load")-1 },
+-    { .key = RESTKEY_PING, .name = "ping", .size = sizeof("ping")-1 },
+-    { .key = RESTKEY_NONE }
++    { .key = RESTKEY_QUERY, .options = HTTP_OPTIONS_GET | HTTP_OPTIONS_POST,
++	    .name = "query", .namelen = sizeof("query")-1 },
++    { .key = RESTKEY_DESC, .options = HTTP_OPTIONS_GET | HTTP_OPTIONS_POST,
++	    .name = "descs", .namelen = sizeof("descs")-1 },
++    { .key = RESTKEY_INSTS, .options = HTTP_OPTIONS_GET | HTTP_OPTIONS_POST,
++	    .name = "instances", .namelen = sizeof("instances")-1 },
++    { .key = RESTKEY_LABELS, .options = HTTP_OPTIONS_GET | HTTP_OPTIONS_POST,
++	    .name = "labels", .namelen = sizeof("labels")-1 },
++    { .key = RESTKEY_METRIC, .options = HTTP_OPTIONS_GET | HTTP_OPTIONS_POST,
++	    .name = "metrics", .namelen = sizeof("metrics")-1 },
++    { .key = RESTKEY_SOURCE, .options = HTTP_OPTIONS_GET | HTTP_OPTIONS_POST,
++	    .name = "sources", .namelen = sizeof("sources")-1 },
++    { .key = RESTKEY_VALUES, .options = HTTP_OPTIONS_GET | HTTP_OPTIONS_POST,
++	    .name = "values", .namelen = sizeof("values")-1 },
++    { .key = RESTKEY_LOAD, .options = HTTP_OPTIONS_GET | HTTP_OPTIONS_POST,
++	    .name = "load", .namelen = sizeof("load")-1 },
++    { .key = RESTKEY_PING, .options = HTTP_OPTIONS_GET,
++	    .name = "ping", .namelen = sizeof("ping")-1 },
++    { .name = NULL }	/* sentinel */
+ };
+ 
+ /* constant string keys (initialized during servlet setup) */
+@@ -78,8 +88,8 @@
+ static const char pmseries_success[] = "{\"success\":true}\r\n";
+ static const char pmseries_failure[] = "{\"success\":false}\r\n";
+ 
+-static pmSeriesRestKey
+-pmseries_lookup_restkey(sds url)
++static pmSeriesRestCommand *
++pmseries_lookup_rest_command(sds url)
+ {
+     pmSeriesRestCommand	*cp;
+     const char		*name;
+@@ -88,11 +98,11 @@
+ 	strncmp(url, "/series/", sizeof("/series/") - 1) == 0) {
+ 	name = (const char *)url + sizeof("/series/") - 1;
+ 	for (cp = &commands[0]; cp->name; cp++) {
+-	    if (strncmp(cp->name, name, cp->size) == 0)
+-		return cp->key;
++	    if (strncmp(cp->name, name, cp->namelen) == 0)
++		return cp;
+ 	}
+     }
+-    return RESTKEY_NONE;
++    return NULL;
+ }
+ 
+ static void
+@@ -518,6 +528,7 @@
+ {
+     pmSeriesBaton	*baton = (pmSeriesBaton *)arg;
+     struct client	*client = baton->client;
++    http_options	options = baton->options;
+     http_flags		flags = client->u.http.flags;
+     http_code		code;
+     sds			msg;
+@@ -545,7 +556,7 @@
+ 	    msg = sdsnewlen(pmseries_failure, sizeof(pmseries_failure) - 1);
+ 	flags |= HTTP_FLAG_JSON;
+     }
+-    http_reply(client, msg, code, flags);
++    http_reply(client, msg, code, flags, options);
+ }
+ 
+ static void
+@@ -555,6 +566,14 @@
+ 	fprintf(stderr, "series module setup (arg=%p)\n", arg);
+ }
+ 
++static void
++pmseries_log(pmLogLevel level, sds message, void *arg)
++{
++    pmSeriesBaton	*baton = (pmSeriesBaton *)arg;
++
++    proxylog(level, message, baton->client->proxy);
++}
++
+ static pmSeriesSettings pmseries_settings = {
+     .callbacks.on_match		= on_pmseries_match,
+     .callbacks.on_desc		= on_pmseries_desc,
+@@ -567,7 +586,7 @@
+     .callbacks.on_label		= on_pmseries_label,
+     .callbacks.on_done		= on_pmseries_done,
+     .module.on_setup		= pmseries_setup,
+-    .module.on_info		= proxylog,
++    .module.on_info		= pmseries_log,
+ };
+ 
+ static void
+@@ -686,7 +705,6 @@
+     case RESTKEY_PING:
+ 	break;
+ 
+-    case RESTKEY_NONE:
+     default:
+ 	client->u.http.parser.status_code = HTTP_STATUS_BAD_REQUEST;
+ 	break;
+@@ -702,15 +720,16 @@
+ pmseries_request_url(struct client *client, sds url, dict *parameters)
+ {
+     pmSeriesBaton	*baton;
+-    pmSeriesRestKey	key;
++    pmSeriesRestCommand	*command;
+ 
+-    if ((key = pmseries_lookup_restkey(url)) == RESTKEY_NONE)
++    if ((command = pmseries_lookup_rest_command(url)) == NULL)
+ 	return 0;
+ 
+     if ((baton = calloc(1, sizeof(*baton))) != NULL) {
+ 	client->u.http.data = baton;
+ 	baton->client = client;
+-	baton->restkey = key;
++	baton->restkey = command->key;
++	baton->options = command->options;
+ 	pmseries_setup_request_parameters(client, baton, parameters);
+     } else {
+ 	client->u.http.parser.status_code = HTTP_STATUS_INTERNAL_SERVER_ERROR;
+@@ -794,10 +813,12 @@
+ 
+     if (baton->query == NULL) {
+ 	message = sdsnewlen(failed, sizeof(failed) - 1);
+-	http_reply(client, message, HTTP_STATUS_BAD_REQUEST, HTTP_FLAG_JSON);
++	http_reply(client, message, HTTP_STATUS_BAD_REQUEST,
++			HTTP_FLAG_JSON, baton->options);
+     } else if (baton->working) {
+ 	message = sdsnewlen(loading, sizeof(loading) - 1);
+-	http_reply(client, message, HTTP_STATUS_CONFLICT, HTTP_FLAG_JSON);
++	http_reply(client, message, HTTP_STATUS_CONFLICT,
++			HTTP_FLAG_JSON, baton->options);
+     } else {
+ 	uv_queue_work(client->proxy->events, &baton->loading,
+ 			pmseries_load_work, pmseries_load_done);
+@@ -810,8 +831,17 @@
+     pmSeriesBaton	*baton = (pmSeriesBaton *)client->u.http.data;
+     int			sts;
+ 
+-    if (client->u.http.parser.status_code)
++    if (client->u.http.parser.status_code) {
++	on_pmseries_done(-EINVAL, baton);
++	return 1;
++    }
++
++    if (client->u.http.parser.method == HTTP_OPTIONS ||
++	client->u.http.parser.method == HTTP_TRACE ||
++	client->u.http.parser.method == HTTP_HEAD) {
++	on_pmseries_done(0, baton);
+ 	return 0;
++    }
+ 
+     switch (baton->restkey) {
+     case RESTKEY_QUERY:
+--- a/src/pmproxy/src/webapi.c	2020-04-17 15:39:17.000000000 +1000
++++ b/src/pmproxy/src/webapi.c	2020-06-11 13:10:57.399576484 +1000
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright (c) 2019 Red Hat.
++ * Copyright (c) 2019-2020 Red Hat.
+  *
+  * This program is free software; you can redistribute it and/or modify it
+  * under the terms of the GNU Lesser General Public License as published
+@@ -18,8 +18,7 @@
+ #include "util.h"
+ 
+ typedef enum pmWebRestKey {
+-    RESTKEY_NONE	= 0,
+-    RESTKEY_CONTEXT,
++    RESTKEY_CONTEXT	= 1,
+     RESTKEY_METRIC,
+     RESTKEY_FETCH,
+     RESTKEY_INDOM,
+@@ -32,7 +31,8 @@
+ 
+ typedef struct pmWebRestCommand {
+     const char		*name;
+-    unsigned int	size;
++    unsigned int	namelen : 16;
++    unsigned int	options : 16;
+     pmWebRestKey	key;
+ } pmWebRestCommand;
+ 
+@@ -47,6 +47,7 @@
+     sds			password;	/* from basic auth header */
+     unsigned int	times : 1;
+     unsigned int	compat : 1;
++    unsigned int	options : 16;
+     unsigned int	numpmids;
+     unsigned int	numvsets;
+     unsigned int	numinsts;
+@@ -56,21 +57,31 @@
+ } pmWebGroupBaton;
+ 
+ static pmWebRestCommand commands[] = {
+-    { .key = RESTKEY_CONTEXT, .name = "context", .size = sizeof("context")-1 },
+-    { .key = RESTKEY_PROFILE, .name = "profile", .size = sizeof("profile")-1 },
+-    { .key = RESTKEY_SCRAPE, .name = "metrics", .size = sizeof("metrics")-1 },
+-    { .key = RESTKEY_METRIC, .name = "metric", .size = sizeof("metric")-1 },
+-    { .key = RESTKEY_DERIVE, .name = "derive", .size = sizeof("derive")-1 },
+-    { .key = RESTKEY_FETCH, .name = "fetch", .size = sizeof("fetch")-1 },
+-    { .key = RESTKEY_INDOM, .name = "indom", .size = sizeof("indom")-1 },
+-    { .key = RESTKEY_STORE, .name = "store", .size = sizeof("store")-1 },
+-    { .key = RESTKEY_CHILD, .name = "children", .size = sizeof("children")-1 },
+-    { .key = RESTKEY_NONE }
++    { .key = RESTKEY_CONTEXT, .options = HTTP_OPTIONS_GET,
++	    .name = "context", .namelen = sizeof("context")-1 },
++    { .key = RESTKEY_PROFILE, .options = HTTP_OPTIONS_GET,
++	    .name = "profile", .namelen = sizeof("profile")-1 },
++    { .key = RESTKEY_SCRAPE, .options = HTTP_OPTIONS_GET,
++	    .name = "metrics", .namelen = sizeof("metrics")-1 },
++    { .key = RESTKEY_METRIC, .options = HTTP_OPTIONS_GET,
++	    .name = "metric", .namelen = sizeof("metric")-1 },
++    { .key = RESTKEY_DERIVE, .options = HTTP_OPTIONS_GET | HTTP_OPTIONS_POST,
++	    .name = "derive", .namelen = sizeof("derive")-1 },
++    { .key = RESTKEY_FETCH, .options = HTTP_OPTIONS_GET,
++	    .name = "fetch", .namelen = sizeof("fetch")-1 },
++    { .key = RESTKEY_INDOM, .options = HTTP_OPTIONS_GET,
++	    .name = "indom", .namelen = sizeof("indom")-1 },
++    { .key = RESTKEY_STORE, .options = HTTP_OPTIONS_GET,
++	    .name = "store", .namelen = sizeof("store")-1 },
++    { .key = RESTKEY_CHILD, .options = HTTP_OPTIONS_GET,
++	    .name = "children", .namelen = sizeof("children")-1 },
++    { .name = NULL }	/* sentinel */
+ };
+ 
+ static pmWebRestCommand openmetrics[] = {
+-    { .key = RESTKEY_SCRAPE, .name = "/metrics", .size = sizeof("/metrics")-1 },
+-    { .key = RESTKEY_NONE }
++    { .key = RESTKEY_SCRAPE, .options = HTTP_OPTIONS_GET,
++	    .name = "/metrics", .namelen = sizeof("/metrics")-1 },
++    { .name = NULL }	/* sentinel */
+ };
+ 
+ static sds PARAM_NAMES, PARAM_NAME, PARAM_PMIDS, PARAM_PMID,
+@@ -78,8 +89,8 @@
+ 	   PARAM_CONTEXT, PARAM_CLIENT;
+ 
+ 
+-static pmWebRestKey
+-pmwebapi_lookup_restkey(sds url, unsigned int *compat, sds *context)
++static pmWebRestCommand *
++pmwebapi_lookup_rest_command(sds url, unsigned int *compat, sds *context)
+ {
+     pmWebRestCommand	*cp;
+     const char		*name, *ctxid = NULL;
+@@ -94,7 +105,7 @@
+ 		name++;
+ 	    } while (isdigit((int)(*name)));
+ 	    if (*name++ != '/')
+-		return RESTKEY_NONE;
++		return NULL;
+ 	    *context = sdsnewlen(ctxid, name - ctxid - 1);
+ 	}
+ 	if (*name == '_') {
+@@ -102,13 +113,13 @@
+ 	    *compat = 1;	/* backward-compatibility mode */
+ 	}
+ 	for (cp = &commands[0]; cp->name; cp++)
+-	    if (strncmp(cp->name, name, cp->size) == 0)
+-		return cp->key;
++	    if (strncmp(cp->name, name, cp->namelen) == 0)
++		return cp;
+     }
+     for (cp = &openmetrics[0]; cp->name; cp++)
+-	if (strncmp(cp->name, url, cp->size) == 0)
+-	    return cp->key;
+-    return RESTKEY_NONE;
++	if (strncmp(cp->name, url, cp->namelen) == 0)
++	    return cp;
++    return NULL;
+ }
+ 
+ static void
+@@ -584,9 +595,10 @@
+ {
+     pmWebGroupBaton	*baton = (pmWebGroupBaton *)arg;
+     struct client	*client = (struct client *)baton->client;
+-    sds			quoted, msg;
++    http_options	options = baton->options;
+     http_flags		flags = client->u.http.flags;
+     http_code		code;
++    sds			quoted, msg;
+ 
+     if (pmDebugOptions.series)
+ 	fprintf(stderr, "%s: client=%p (sts=%d,msg=%s)\n", "on_pmwebapi_done",
+@@ -596,7 +608,9 @@
+ 	code = HTTP_STATUS_OK;
+ 	/* complete current response with JSON suffix if needed */
+ 	if ((msg = baton->suffix) == NULL) {	/* empty OK response */
+-	    if (flags & HTTP_FLAG_JSON) {
++	    if (flags & HTTP_FLAG_NO_BODY) {
++		msg = sdsempty();
++	    } else if (flags & HTTP_FLAG_JSON) {
+ 		msg = sdsnewlen("{", 1);
+ 		if (context)
+ 		    msg = sdscatfmt(msg, "\"context\":%S,", context);
+@@ -628,10 +642,18 @@
+ 	sdsfree(quoted);
+     }
+ 
+-    http_reply(client, msg, code, flags);
++    http_reply(client, msg, code, flags, options);
+     client_put(client);
+ }
+ 
++static void
++on_pmwebapi_info(pmLogLevel level, sds message, void *arg)
++{
++    pmWebGroupBaton	*baton = (pmWebGroupBaton *)arg;
++
++    proxylog(level, message, baton->client->proxy);
++}
++
+ static pmWebGroupSettings pmwebapi_settings = {
+     .callbacks.on_context	= on_pmwebapi_context,
+     .callbacks.on_metric	= on_pmwebapi_metric,
+@@ -645,7 +667,7 @@
+     .callbacks.on_scrape_labels	= on_pmwebapi_scrape_labels,
+     .callbacks.on_check		= on_pmwebapi_check,
+     .callbacks.on_done		= on_pmwebapi_done,
+-    .module.on_info		= proxylog,
++    .module.on_info		= on_pmwebapi_info,
+ };
+ 
+ /*
+@@ -734,7 +756,6 @@
+ 	client->u.http.flags |= HTTP_FLAG_JSON;
+ 	break;
+ 
+-    case RESTKEY_NONE:
+     default:
+ 	client->u.http.parser.status_code = HTTP_STATUS_BAD_REQUEST;
+ 	break;
+@@ -750,11 +771,11 @@
+ pmwebapi_request_url(struct client *client, sds url, dict *parameters)
+ {
+     pmWebGroupBaton	*baton;
+-    pmWebRestKey	key;
++    pmWebRestCommand	*command;
+     unsigned int	compat = 0;
+     sds			context = NULL;
+ 
+-    if ((key = pmwebapi_lookup_restkey(url, &compat, &context)) == RESTKEY_NONE) {
++    if (!(command = pmwebapi_lookup_rest_command(url, &compat, &context))) {
+ 	sdsfree(context);
+ 	return 0;
+     }
+@@ -762,7 +783,8 @@
+     if ((baton = calloc(1, sizeof(*baton))) != NULL) {
+ 	client->u.http.data = baton;
+ 	baton->client = client;
+-	baton->restkey = key;
++	baton->restkey = command->key;
++	baton->options = command->options;
+ 	baton->compat = compat;
+ 	baton->context = context;
+ 	pmwebapi_setup_request_parameters(client, baton, parameters);
+@@ -885,17 +907,27 @@
+     uv_loop_t		*loop = client->proxy->events;
+     uv_work_t		*work;
+ 
+-    /* fail early if something has already gone wrong */
+-    if (client->u.http.parser.status_code != 0)
++    /* take a reference on the client to prevent freeing races on close */
++    client_get(client);
++
++    if (client->u.http.parser.status_code) {
++	on_pmwebapi_done(NULL, -EINVAL, NULL, baton);
+ 	return 1;
++    }
++
++    if (client->u.http.parser.method == HTTP_OPTIONS ||
++	client->u.http.parser.method == HTTP_TRACE ||
++	client->u.http.parser.method == HTTP_HEAD) {
++	on_pmwebapi_done(NULL, 0, NULL, baton);
++	return 0;
++    }
+ 
+-    if ((work = (uv_work_t *)calloc(1, sizeof(uv_work_t))) == NULL)
++    if ((work = (uv_work_t *)calloc(1, sizeof(uv_work_t))) == NULL) {
++	client_put(client);
+ 	return 1;
++    }
+     work->data = baton;
+ 
+-    /* take a reference on the client to prevent freeing races on close */
+-    client_get(client);
+-
+     /* submit command request to worker thread */
+     switch (baton->restkey) {
+     case RESTKEY_CONTEXT:
+@@ -925,11 +957,10 @@
+     case RESTKEY_SCRAPE:
+ 	uv_queue_work(loop, work, pmwebapi_scrape, pmwebapi_work_done);
+ 	break;
+-    case RESTKEY_NONE:
+     default:
++	pmwebapi_work_done(work, -EINVAL);
+ 	client->u.http.parser.status_code = HTTP_STATUS_BAD_REQUEST;
+-	client_put(client);
+-	free(work);
++	on_pmwebapi_done(NULL, -EINVAL, NULL, baton);
+ 	return 1;
+     }
+     return 0;
+--- a/src/pmproxy/src/http.h	2019-12-02 16:43:20.000000000 +1100
++++ b/src/pmproxy/src/http.h	2020-06-11 13:10:57.398576470 +1000
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright (c) 2019 Red Hat.
++ * Copyright (c) 2019-2020 Red Hat.
+  * 
+  * This program is free software; you can redistribute it and/or modify it
+  * under the terms of the GNU Lesser General Public License as published
+@@ -34,29 +34,39 @@
+     HTTP_FLAG_JSON	= (1<<0),
+     HTTP_FLAG_TEXT	= (1<<1),
+     HTTP_FLAG_HTML	= (1<<2),
+-    HTTP_FLAG_JS	= (1<<3),
+-    HTTP_FLAG_CSS	= (1<<4),
+-    HTTP_FLAG_ICO	= (1<<5),
+-    HTTP_FLAG_JPG	= (1<<6),
+-    HTTP_FLAG_PNG	= (1<<7),
+-    HTTP_FLAG_GIF	= (1<<8),
+     HTTP_FLAG_UTF8	= (1<<10),
+     HTTP_FLAG_UTF16	= (1<<11),
++    HTTP_FLAG_NO_BODY	= (1<<13),
+     HTTP_FLAG_COMPRESS	= (1<<14),
+     HTTP_FLAG_STREAMING	= (1<<15),
+     /* maximum 16 for server.h */
+ } http_flags;
+ 
++typedef enum http_options {
++    HTTP_OPT_GET	= (1 << HTTP_GET),
++    HTTP_OPT_PUT	= (1 << HTTP_PUT),
++    HTTP_OPT_HEAD	= (1 << HTTP_HEAD),
++    HTTP_OPT_POST	= (1 << HTTP_POST),
++    HTTP_OPT_TRACE	= (1 << HTTP_TRACE),
++    HTTP_OPT_OPTIONS	= (1 << HTTP_OPTIONS),
++    /* maximum 16 in command opts fields */
++} http_options;
++
++#define HTTP_COMMON_OPTIONS (HTTP_OPT_HEAD | HTTP_OPT_TRACE | HTTP_OPT_OPTIONS)
++#define HTTP_OPTIONS_GET    (HTTP_COMMON_OPTIONS | HTTP_OPT_GET)
++#define HTTP_OPTIONS_PUT    (HTTP_COMMON_OPTIONS | HTTP_OPT_PUT)
++#define HTTP_OPTIONS_POST   (HTTP_COMMON_OPTIONS | HTTP_OPT_POST)
++#define HTTP_SERVER_OPTIONS (HTTP_OPTIONS_GET | HTTP_OPT_PUT | HTTP_OPT_POST)
++
+ typedef unsigned int http_code;
+ 
+ extern void http_transfer(struct client *);
+-extern void http_reply(struct client *, sds, http_code, http_flags);
++extern void http_reply(struct client *, sds, http_code, http_flags, http_options);
+ extern void http_error(struct client *, http_code, const char *);
+ 
+ extern int http_decode(const char *, size_t, sds);
+ extern const char *http_status_mapping(http_code);
+ extern const char *http_content_type(http_flags);
+-extern http_flags http_suffix_type(const char *);
+ 
+ extern sds http_get_buffer(struct client *);
+ extern void http_set_buffer(struct client *, sds, http_flags);
+--- a/qa/1837	1970-01-01 10:00:00.000000000 +1000
++++ b/qa/1837	2020-06-11 13:10:57.396576440 +1000
+@@ -0,0 +1,55 @@
++#!/bin/sh
++# PCP QA Test No. 1837
++# Exercise PMWEBAPI handling server OPTIONS.
++#
++# Copyright (c) 2020 Red Hat.  All Rights Reserved.
++#
++
++seq=`basename $0`
++echo "QA output created by $seq"
++
++# get standard environment, filters and checks
++. ./common.product
++. ./common.filter
++. ./common.check
++
++_check_series
++which curl >/dev/null 2>&1 || _notrun "No curl binary installed"
++curl --request-targets 2>&1 | grep -q 'requires parameter' && \
++	_notrun "Test requires curl --request-targets option"
++
++status=1	# failure is the default!
++$sudo rm -rf $tmp.* $seq.full
++trap "cd $here; _cleanup; exit \$status" 0 1 2 3 15
++
++pmproxy_was_running=false
++[ -f $PCP_RUN_DIR/pmproxy.pid ] && pmproxy_was_running=true
++echo "pmproxy_was_running=$pmproxy_was_running" >>$here/$seq.full
++
++_cleanup()
++{
++    if $pmproxy_was_running
++    then
++	echo "Restart pmproxy ..." >>$here/$seq.full
++	_service pmproxy restart >>$here/$seq.full 2>&1
++	_wait_for_pmproxy
++    else
++	echo "Stopping pmproxy ..." >>$here/$seq.full
++	_service pmproxy stop >>$here/$seq.full 2>&1
++    fi
++    $sudo rm -f $tmp.*
++}
++
++# real QA test starts here
++_service pmproxy restart >/dev/null 2>&1
++
++curl -isS --request-target "*" -X OPTIONS http://localhost:44322 \
++	2>&1 | tee -a $here/$seq.full | _webapi_header_filter
++
++echo >>$here/$seq.full
++echo "=== pmproxy log ===" >>$here/$seq.full
++cat $PCP_LOG_DIR/pmproxy/pmproxy.log >>$here/$seq.full
++
++# success, all done
++status=0
++exit
+--- a/qa/1837.out	1970-01-01 10:00:00.000000000 +1000
++++ b/qa/1837.out	2020-06-11 13:10:57.397576455 +1000
+@@ -0,0 +1,6 @@
++QA output created by 1837
++
++Access-Control-Allow-Methods: GET, PUT, HEAD, POST, TRACE, OPTIONS
++Content-Length: 0
++Date: DATE
++HTTP/1.1 200 OK
+--- a/qa/780	2020-04-14 14:41:41.000000000 +1000
++++ b/qa/780	2020-06-11 13:10:57.397576455 +1000
+@@ -1,8 +1,8 @@
+ #!/bin/sh
+ # PCP QA Test No. 780
+-# Exercise PMWEBAPI Access-Control-Allow-Origin HTTP header.
++# Exercise PMWEBAPI CORS headers.
+ #
+-# Copyright (c) 2014,2019 Red Hat.
++# Copyright (c) 2014,2019-2020 Red Hat.
+ #
+ 
+ seq=`basename $0`
+@@ -16,7 +16,6 @@
+ _check_series
+ which curl >/dev/null 2>&1 || _notrun "No curl binary installed"
+ 
+-signal=$PCP_BINADM_DIR/pmsignal
+ status=1	# failure is the default!
+ $sudo rm -rf $tmp.* $seq.full
+ trap "cd $here; _cleanup; exit \$status" 0 1 2 3 15
+@@ -39,13 +38,21 @@
+     $sudo rm -f $tmp.*
+ }
+ 
+-unset http_proxy
+-unset HTTP_PROXY
+-
+ # real QA test starts here
+ _service pmproxy restart >/dev/null 2>&1
+ 
+-curl -s -S "http://localhost:44323/pmapi/context" -I | _webapi_header_filter
++echo "=== Basic" | tee -a $here/$seq.full
++curl -IsS "http://localhost:44323/pmapi/context" | _webapi_header_filter
++
++echo "=== Preflight" | tee -a $here/$seq.full
++curl -isS -X OPTIONS "http://localhost:44323/series/query?expr=hinv*" | _webapi_header_filter
++
++echo "=== OK Request Method" | tee -a $here/$seq.full
++curl -isS -X OPTIONS -H "Origin: http://example.com" -H "Access-Control-Request-Method: GET" "http://localhost:44323/pmapi/context" | _webapi_header_filter
++
++echo "=== Bad Request Method" | tee -a $here/$seq.full
++curl -isS -X OPTIONS -H "Origin: http://example.com" -H "Access-Control-Request-Method: BAD" "http://localhost:44323/pmapi/context" | _webapi_header_filter
++
+ echo >>$here/$seq.full
+ echo "=== pmproxy log ===" >>$here/$seq.full
+ cat $PCP_LOG_DIR/pmproxy/pmproxy.log >>$here/$seq.full
+--- a/qa/780.out	2020-03-23 09:47:47.000000000 +1100
++++ b/qa/780.out	2020-06-11 13:10:57.397576455 +1000
+@@ -1,8 +1,27 @@
+ QA output created by 780
++=== Basic
+ 
+ Access-Control-Allow-Headers: Accept, Accept-Language, Content-Language, Content-Type
+ Access-Control-Allow-Origin: *
+-Content-Length: SIZE
+ Content-Type: application/json
+ Date: DATE
+ HTTP/1.1 200 OK
++Transfer-encoding: chunked
++=== Preflight
++
++Access-Control-Allow-Methods: GET, HEAD, POST, TRACE, OPTIONS
++Content-Length: 0
++Date: DATE
++HTTP/1.1 200 OK
++=== OK Request Method
++
++Access-Control-Allow-Methods: GET, HEAD, TRACE, OPTIONS
++Access-Control-Allow-Origin: http://example.com
++Content-Length: 0
++Date: DATE
++HTTP/1.1 200 OK
++=== Bad Request Method
++
++Content-Length: 0
++Date: DATE
++HTTP/1.1 405 Method Not Allowed
+--- a/qa/common.check	2020-05-20 10:51:37.000000000 +1000
++++ b/qa/common.check	2020-06-11 13:10:57.397576455 +1000
+@@ -2696,7 +2696,7 @@
+     tee -a $here/$seq.full \
+     | col -b \
+     | sed \
+-	-e 's/^\(Content-Length:\) [0-9][0-9]*/\1 SIZE/g' \
++	-e 's/^\(Content-Length:\) [1-9][0-9]*/\1 SIZE/g' \
+ 	-e 's/^\(Date:\).*/\1 DATE/g' \
+ 	-e 's/\(\"context\":\) [0-9][0-9]*/\1 CTXID/g' \
+ 	-e '/^Connection: Keep-Alive/d' \
+--- a/qa/group	2020-05-28 09:15:22.000000000 +1000
++++ b/qa/group	2020-06-11 13:10:57.397576455 +1000
+@@ -1757,6 +1757,7 @@
+ 1724 pmda.bpftrace local python
+ 1768 pmfind local
+ 1793 pmrep pcp2xxx python local
++1837 pmproxy local
+ 1855 pmda.rabbitmq local
+ 1896 pmlogger logutil pmlc local
+ 4751 libpcp threads valgrind local pcp
+--- a/qa/1211.out	2020-01-20 16:53:42.000000000 +1100
++++ b/qa/1211.out	2020-06-11 13:10:57.399576484 +1000
+@@ -507,9 +507,11 @@
+ Perform simple source-based query ...
+ 
+ Error handling - descriptor for bad series identifier
+-pmseries: [Error] no descriptor for series identifier no.such.identifier
+ 
+ no.such.identifier
++    PMID: PM_ID_NULL
++    Data Type: ???  InDom: unknown 0xffffffff
++    Semantics: unknown  Units: unknown
+ 
+ Error handling - metric name for bad series identifier
+ 
+--- a/src/libpcp_web/src/query.c	2020-01-20 15:43:31.000000000 +1100
++++ b/src/libpcp_web/src/query.c	2020-06-11 13:10:57.399576484 +1000
+@@ -1938,11 +1938,15 @@
+ 	return -EPROTO;
+     }
+ 
+-    /* sanity check - were we given an invalid series identifier? */
++    /* were we given a non-metric series identifier? (e.g. an instance) */
+     if (elements[0]->type == REDIS_REPLY_NIL) {
+-	infofmt(msg, "no descriptor for series identifier %s", series);
+-	batoninfo(baton, PMLOG_ERROR, msg);
+-	return -EINVAL;
++	desc->indom = sdscpylen(desc->indom, "unknown", 7);
++	desc->pmid = sdscpylen(desc->pmid, "PM_ID_NULL", 10);
++	desc->semantics = sdscpylen(desc->semantics, "unknown", 7);
++	desc->source = sdscpylen(desc->source, "unknown", 7);
++	desc->type = sdscpylen(desc->type, "unknown", 7);
++	desc->units = sdscpylen(desc->units, "unknown", 7);
++	return 0;
+     }
+ 
+     if (extract_string(baton, series, elements[0], &desc->indom, "indom") < 0)
diff --git a/SOURCES/redhat-bugzilla-1846705.patch b/SOURCES/redhat-bugzilla-1846705.patch
new file mode 100644
index 0000000..6504334
--- /dev/null
+++ b/SOURCES/redhat-bugzilla-1846705.patch
@@ -0,0 +1,36 @@
+BZ 1846705 - Possible memory leak detected in pcp-atop
+f30aff90b qa: add valgrind suppressions needed for valgrind 3.16
+
+diff --git a/qa/valgrind-suppress-3.16.0 b/qa/valgrind-suppress-3.16.0
+new file mode 100644
+index 000000000..515591747
+--- /dev/null
++++ b/qa/valgrind-suppress-3.16.0
+@@ -0,0 +1,27 @@
++# qa/1080 and qa/490 and qa/386 and qa/459 on Fedora 32
++# at 0x483880B: malloc (vg_replace_malloc.c:299)
++# by 0x4A0D490: tsearch (in /usr/lib64/libc-2.28.so)
++# by 0x4871EA6: __pmFindPDUBuf (pdubuf.c:126)
++# ...
++{
++   tsearch
++   Memcheck:Leak
++   match-leak-kinds: possible
++   fun:malloc
++   fun:tsearch
++   fun:__pmFindPDUBuf
++   ...
++}
++
++# qa/1080 and qa/490 and qa/386 and qa/459 on Fedora 32
++# at 0x483880B: malloc (vg_replace_malloc.c:299)
++# by 0x4871E5F: __pmFindPDUBuf (pdubuf.c:115)
++# ...
++{
++   findpdubuf
++   Memcheck:Leak
++   match-leak-kinds: possible
++   fun:malloc
++   fun:__pmFindPDUBuf
++   ...
++}
diff --git a/SOURCES/redhat-bugzilla-1846711.patch b/SOURCES/redhat-bugzilla-1846711.patch
new file mode 100644
index 0000000..1c57ff2
--- /dev/null
+++ b/SOURCES/redhat-bugzilla-1846711.patch
@@ -0,0 +1,268 @@
+BZ 1846711 - pcp-pmda-openmetrics produces warnings querying grafana in its default configuration
+0b2ef2d79 pmdaopenmetrics: add control.status metrics, de-verbosify the log, QA updates
+63605e3db qa/1102: tweak openmetrics QA to be more deterministic
+649a0c3a2 qa: improve _filter_pmda_remove() in common.filter
+
+commit 0b2ef2d79686d1e44901263093edeb9e1b9b5f77
+Author: Mark Goodwin <mgoodwin@redhat.com>
+Date:   Fri Jun 19 12:18:47 2020 +1000
+
+    pmdaopenmetrics: add control.status metrics, de-verbosify the log, QA updates
+    
+    Resolves: RHBZ#1846711
+    
+    Add openmetrics.control.status (string status per configured URL
+    of the last fetch) and openmetrics.control.status_code, which
+    is the integer response code (e.g. 200 is success) with discrete
+    semantics.
+    
+    In addition, we now only spam the PMDA log and systemd journal
+    when a URL fetch fails if openmetrics.control.debug is non-zero.
+    Users can instead rely on the new status metrics, which can also
+    be used for service availability monitoring. These metrics
+    complement the openmetrics.control.parse_time, fetch_time and
+    calls counters.
+    
+    Includes QA updates and pmdaopenmetrics(1) doc updates.
+
+diff --git a/qa/1321.out b/qa/1321.out
+index cee072cd2..4533bccd8 100644
+--- a/qa/1321.out
++++ b/qa/1321.out
+@@ -13,6 +13,8 @@ openmetrics.control.calls
+ openmetrics.control.debug
+ openmetrics.control.fetch_time
+ openmetrics.control.parse_time
++openmetrics.control.status
++openmetrics.control.status_code
+ openmetrics.source1.metric1
+ 
+ == Created URL file /var/lib/pcp/pmdas/openmetrics/config.d/source2.url
+@@ -22,6 +24,8 @@ openmetrics.control.calls
+ openmetrics.control.debug
+ openmetrics.control.fetch_time
+ openmetrics.control.parse_time
++openmetrics.control.status
++openmetrics.control.status_code
+ openmetrics.source1.metric1
+ openmetrics.source2.metric1
+ openmetrics.source2.metric2
+@@ -33,6 +37,8 @@ openmetrics.control.calls
+ openmetrics.control.debug
+ openmetrics.control.fetch_time
+ openmetrics.control.parse_time
++openmetrics.control.status
++openmetrics.control.status_code
+ openmetrics.source1.metric1
+ openmetrics.source2.metric1
+ openmetrics.source2.metric2
+@@ -47,6 +53,8 @@ openmetrics.control.calls
+ openmetrics.control.debug
+ openmetrics.control.fetch_time
+ openmetrics.control.parse_time
++openmetrics.control.status
++openmetrics.control.status_code
+ openmetrics.source1.metric1
+ openmetrics.source2.metric1
+ openmetrics.source2.metric2
+@@ -63,6 +71,8 @@ openmetrics.control.calls
+ openmetrics.control.debug
+ openmetrics.control.fetch_time
+ openmetrics.control.parse_time
++openmetrics.control.status
++openmetrics.control.status_code
+ openmetrics.source1.metric1
+ openmetrics.source2.metric1
+ openmetrics.source2.metric2
+diff --git a/src/pmdas/openmetrics/pmdaopenmetrics.1 b/src/pmdas/openmetrics/pmdaopenmetrics.1
+index d3c7aa85f..0c92e2a11 100644
+--- a/src/pmdas/openmetrics/pmdaopenmetrics.1
++++ b/src/pmdas/openmetrics/pmdaopenmetrics.1
+@@ -413,10 +413,37 @@ log mandatory on 2 second {
+ The PMDA maintains special control metrics, as described below.
+ Apart from
+ .BR openmetrics.control.debug ,
+-each of these metrics is a counter and has one instance for each configured metric source.
+-The instance domain is adjusted dynamically as new sources are discovered.
++each of these metrics has one instance for each configured metric source.
++All of these metrics have integer values with counter semantics, except
++.BR openmetrics.control.status ,
++which has a string value.
++It is important to note that fetching any of the
++.B openmetrics.control
++metrics will only update the counters and status values if the corresponding URL is actually fetched.
++If the source URL is not fetched, the control metric values do not trigger a refresh and the control
++values reported represent the most recent fetch of each corresponding source.
++.PP
++The instance domain for the
++.B openmetrics.control
++metrics is adjusted dynamically as new sources are discovered.
+ If there are no sources configured, the metric names are still defined
+ but the instance domain will be empty and a fetch will return no values.
++.IP \fBopenmetrics.control.status\fP
++A string representing the status of the last fetch of the corresponding source.
++This will generally be
++.B success
++for an http response code of 200.
++This metric can be used for service availability monitoring - provided, as stated above,
++the corresponding source URL is fetched too.
++.IP \fBopenmetrics.control.status_code\fP
++This metric is similar to
++.B openmetrics.control.status
++except that it is the integer response code of the last fetch.
++A value of
++.B 200
++usually signifies success and any other value failure.
++This metric can also be used for service availability monitoring, with the same caveats as
++.BR openmetrics.control.status .
+ .IP \fBopenmetrics.control.calls\fP
+ total number of times each configured metric source has been fetched (if it's a URL)
+ or executed (if it's a script), since the PMDA started.
+diff --git a/src/pmdas/openmetrics/pmdaopenmetrics.python b/src/pmdas/openmetrics/pmdaopenmetrics.python
+index a5ed22f13..1486ed676 100755
+--- a/src/pmdas/openmetrics/pmdaopenmetrics.python
++++ b/src/pmdas/openmetrics/pmdaopenmetrics.python
+@@ -1,6 +1,6 @@
+ #!/usr/bin/env pmpython
+ #
+-# Copyright (c) 2017-2019 Red Hat.
++# Copyright (c) 2017-2020 Red Hat.
+ # Copyright (c) 2017 Ronak Jain.
+ #
+ # This program is free software; you can redistribute it and/or modify it
+@@ -704,6 +704,7 @@ class Source(object):
+             return
+ 
+         # fetch the document
++        status_code = 0
+         try:
+             if self.is_scripted:
+                 # Execute file, expecting openmetrics metric data on stdout.
+@@ -715,6 +716,7 @@ class Source(object):
+                     self.document = open(self.url[7:], 'r').read()
+                 else:
+                     r = self.requests.get(self.url, headers=self.headers, timeout=timeout)
++                    status_code = r.status_code
+                     r.raise_for_status() # non-200?  ERROR
+                     # NB: the requests package automatically enables http keep-alive and compression
+                     self.document = r.text
+@@ -723,9 +725,13 @@ class Source(object):
+             incr = int(1000 * (time.time() - fetch_time))
+             self.pmda.stats_fetch_time[self.cluster] += incr
+             self.pmda.stats_fetch_time[0] += incr # total for all sources
++            self.pmda.stats_status[self.cluster] = "success"
++            self.pmda.stats_status_code[self.cluster] = status_code
+ 
+         except Exception as e:
+-            self.pmda.err('Warning: cannot fetch URL or execute script %s: %s' % (self.path, e))
++            self.pmda.stats_status[self.cluster] = 'failed to fetch URL or execute script %s: %s' % (self.path, e)
++            self.pmda.stats_status_code[self.cluster] = status_code
++            self.pmda.debug('Warning: cannot fetch URL or execute script %s: %s' % (self.path, e)) if self.pmda.dbg else None
+             return
+ 
+     def refresh2(self, timeout):
+@@ -844,6 +850,20 @@ class OpenMetricsPMDA(PMDA):
+             pmUnits(0, 0, 0, 0, 0, 0)),
+             'debug flag to enable verbose log messages, to enable: pmstore %s.control.debug 1' % self.pmda_name)
+ 
++        # response status string, per-source end-point
++        self.stats_status = {0:"none"} # status string, keyed by cluster number
++        self.add_metric('%s.control.status' % self.pmda_name, pmdaMetric(self.pmid(0, 5),
++            c_api.PM_TYPE_STRING, self.sources_indom, c_api.PM_SEM_INSTANT,
++            pmUnits(0, 0, 0, 0, 0, 0)), # no units
++            'per-end-point source URL response status after the most recent fetch')
++
++        # response status code, per-source end-point
++        self.stats_status_code = {0:0} # status code, keyed by cluster number
++        self.add_metric('%s.control.status_code' % self.pmda_name, pmdaMetric(self.pmid(0, 6),
++            c_api.PM_TYPE_32, self.sources_indom, c_api.PM_SEM_DISCRETE,
++            pmUnits(0, 0, 0, 0, 0, 0)), # no units
++            'per-end-point source URL response status code after the most recent fetch')
++
+         # schedule a refresh
+         self.set_need_refresh()
+ 
+@@ -961,6 +981,8 @@ class OpenMetricsPMDA(PMDA):
+                     self.stats_fetch_calls[cluster] = 0
+                     self.stats_fetch_time[cluster] = 0
+                     self.stats_parse_time[cluster] = 0
++                    self.stats_status[cluster] = "unknown"
++                    self.stats_status_code[cluster] = 0
+ 
+                     save_cluster_table = True
+                     self.log("Found source %s cluster %d" % (name, cluster))
+@@ -996,6 +1018,10 @@ class OpenMetricsPMDA(PMDA):
+                 return [self.stats_parse_time[inst], 1] if inst in self.stats_parse_time else [c_api.PM_ERR_INST, 0]
+             elif item == 4: # $(pmda_name).control.debug
+                 return [self.dbg, 1]
++            elif item == 5: # per-source status string
++                return [self.stats_status[inst], 1] if inst in self.stats_status else [c_api.PM_ERR_INST, 0]
++            elif item == 6: # per-source status code
++                return [self.stats_status_code[inst], 1] if inst in self.stats_status_code else [c_api.PM_ERR_INST, 0]
+             return [c_api.PM_ERR_PMID, 0]
+ 
+         self.assert_source_invariants(cluster=cluster)
+
+commit 63605e3db4b2821df2a6ffb21507af91d97f3a8b
+Author: Mark Goodwin <mgoodwin@redhat.com>
+Date:   Fri Jun 19 10:02:04 2020 +1000
+
+    qa/1102: tweak openmetrics QA to be more deterministic
+    
+    Now that pmdaopenmetrics is Installed by default with the localhost
+    grafana metrics URL configured, after _pmdaopenmetrics_save_config
+    we need to _pmdaopenmetrics_remove before _pmdaopenmetrics_install
+    to make qa/1102 deterministic.
+
+diff --git a/qa/1102 b/qa/1102
+index f573d14f4..98ff61f5e 100755
+--- a/qa/1102
++++ b/qa/1102
+@@ -46,6 +46,7 @@ trap "_cleanup; exit \$status" 0 1 2 3 15
+ _stop_auto_restart pmcd
+ 
+ _pmdaopenmetrics_save_config
++_pmdaopenmetrics_remove
+ _pmdaopenmetrics_install
+ 
+ port=`_find_free_port 10000`
+diff --git a/qa/1102.out b/qa/1102.out
+index 5094e4a82..aa74abe44 100644
+--- a/qa/1102.out
++++ b/qa/1102.out
+@@ -1,5 +1,12 @@
+ QA output created by 1102
+ 
++=== remove openmetrics agent ===
++Culling the Performance Metrics Name Space ...
++openmetrics ... done
++Updating the PMCD control file, and notifying PMCD ...
++[...removing files...]
++Check openmetrics metrics have gone away ... OK
++
+ === openmetrics agent installation ===
+ Fetch and desc openmetrics metrics: success
+ 
+
+commit 649a0c3a2745f549b139ce1250e38a1e90308426
+Author: Mark Goodwin <mgoodwin@redhat.com>
+Date:   Fri Jun 19 09:55:58 2020 +1000
+
+    qa: improve _filter_pmda_remove() in common.filter
+    
+    Filter "Job for pmcd.service canceled" in _filter_pmda_remove.
+    Systemd sometimes (uncommonly) prints this if a PMDA is still
+    starting when a QA test ./Removes it.
+
+diff --git a/qa/common.filter b/qa/common.filter
+index a53d4a49d..b327abedc 100644
+--- a/qa/common.filter
++++ b/qa/common.filter
+@@ -760,6 +760,7 @@ _filter_pmda_remove()
+     _filter_pmda_install |
+     sed \
+ 	-e '/Removing files/d' \
++	-e '/Job for pmcd.service canceled/d' \
+ 	-e '/Updating the PMCD control file/c\
+ Updating the PMCD control file, and notifying PMCD ...\
+ [...removing files...]'
diff --git a/SOURCES/redhat-bugzilla-1848995.patch b/SOURCES/redhat-bugzilla-1848995.patch
new file mode 100644
index 0000000..edabc1a
--- /dev/null
+++ b/SOURCES/redhat-bugzilla-1848995.patch
@@ -0,0 +1,17 @@
+BZ 1848995 - Intermittent pminfo crashes (core dumps)
+51bb36a84 libpcp: minor clarification to previous fix to use __pmHashInit
+d0df00ad1 derive_parser.y.in: fix of incomplete initialization of __pmHashCtl struct for F_REGEX node
+
+diff --git a/src/libpcp/src/derive_parser.y.in b/src/libpcp/src/derive_parser.y.in
+index 9ed375e01..6756daa77 100644
+--- a/src/libpcp/src/derive_parser.y.in
++++ b/src/libpcp/src/derive_parser.y.in
+@@ -2598,6 +2598,8 @@ regexpr	: opt_bang L_PATTERN
+ 		      return -1;
+ 		  }
+ 		  np->data.pattern->invert = $1;
++		  np->data.pattern->used = 0;
++		  __pmHashInit(&np->data.pattern->hash);
+ 		  $$ = np;
+ 		}
+ 
diff --git a/SOURCES/redhat-bugzilla-1849511.patch b/SOURCES/redhat-bugzilla-1849511.patch
new file mode 100644
index 0000000..864a91d
--- /dev/null
+++ b/SOURCES/redhat-bugzilla-1849511.patch
@@ -0,0 +1,996 @@
+BZ 1849511 - resolve covscan and other issues from upstream QA 
+f7f1dd546 pmproxy: complete handling of HTTP/1.1 TRACE requests
+cc662872b qa: add pcp-free-tera archive to pcp-testsuite package
+80639d05b pmlogger_check.sh: major overhaul (diags and systemd fixups)
+460b7ac2a src/pmlogger/rc_pmlogger: use --quick to pmlogger_check
+0b3b4d4ee src/pmlogger/pmlogger_check.service.in: add --skip-primary arg to pmlogger_check
+3a68366a8 src/pmlogger/pmlogger.service.in: change ancillary services from After to Before
+5d65a6035 src/pmlogger/pmlogger_daily.sh: similar changes to pmlogger_check.sh
+ace576907 src/pmlogger/pmlogger_check.sh: fix locking snarfoo
+2b2c3db11 src/pmlogger/pmlogger_daily.sh: fix diagnostic spaghetti
+4cc54287f pmproxy: allow URLs up to 8k in length
+
+diff -auNr pcp-5.1.1-004/man/man1/pmlogger_check.1 pcp-5.1.1-005/man/man1/pmlogger_check.1
+--- pcp-5.1.1-004/man/man1/pmlogger_check.1	2020-04-07 13:31:03.000000000 +1000
++++ pcp-5.1.1-005/man/man1/pmlogger_check.1	2020-06-22 20:08:18.454403788 +1000
+@@ -19,7 +19,7 @@
+ \f3pmlogger_daily\f1 \- administration of Performance Co-Pilot archive log files
+ .SH SYNOPSIS
+ .B $PCP_BINADM_DIR/pmlogger_check
+-[\f3\-CNsTV?\f1]
++[\f3\-CNpqsTV?\f1]
+ [\f3\-c\f1 \f2control\f1]
+ [\f3\-l\f1 \f2logfile\f1]
+ .br
+@@ -269,6 +269,20 @@
+ .TP
+ \fB\-p\fR
+ If this option is specified for
++.B pmlogger_check
++then any line from the control files for the
++.I primary
++.B pmlogger
++will be ignored.
++This option is intended for environments where some system daemon,
++like
++.BR systemd (1),
++is responsible for controlling (starting, stopping, restarting, etc.) the
++.I primary
++.BR pmlogger .
++.TP
++\fB\-p\fR
++If this option is specified for
+ .B pmlogger_daily
+ then the status of the daily processing is polled and if the daily
+ .BR pmlogger (1)
+@@ -296,6 +310,12 @@
+ .B pmlogger_daily
+ are mutually exclusive.
+ .TP
++\fB\-q\fR
++If this option is specified for
++.B pmlogger_check
++then the script will ``quickstart'' avoiding any optional processing
++like file compression.
++.TP
+ \fB\-r\fR, \fB\-\-norewrite\fR
+ This command line option acts as an override and prevents all archive
+ rewriting with
+diff -auNr pcp-5.1.1-004/qa/1837 pcp-5.1.1-005/qa/1837
+--- pcp-5.1.1-004/qa/1837	2020-06-22 20:00:17.636331169 +1000
++++ pcp-5.1.1-005/qa/1837	2020-06-22 20:08:18.457403819 +1000
+@@ -1,6 +1,6 @@
+ #!/bin/sh
+ # PCP QA Test No. 1837
+-# Exercise PMWEBAPI handling server OPTIONS.
++# Exercise PMWEBAPI handling server OPTIONS and TRACE.
+ #
+ # Copyright (c) 2020 Red Hat.  All Rights Reserved.
+ #
+@@ -43,7 +43,12 @@
+ # real QA test starts here
+ _service pmproxy restart >/dev/null 2>&1
+ 
+-curl -isS --request-target "*" -X OPTIONS http://localhost:44322 \
++echo; echo "=== OPTIONS"
++curl -isS -X OPTIONS --request-target "*" http://localhost:44322 \
++	2>&1 | tee -a $here/$seq.full | _webapi_header_filter
++
++echo; echo "=== TRACE"
++curl -isS -X TRACE http://localhost:44322 \
+ 	2>&1 | tee -a $here/$seq.full | _webapi_header_filter
+ 
+ echo >>$here/$seq.full
+diff -auNr pcp-5.1.1-004/qa/1837.out pcp-5.1.1-005/qa/1837.out
+--- pcp-5.1.1-004/qa/1837.out	2020-06-22 20:00:17.637331179 +1000
++++ pcp-5.1.1-005/qa/1837.out	2020-06-22 20:08:18.457403819 +1000
+@@ -1,6 +1,17 @@
+ QA output created by 1837
+ 
++=== OPTIONS
++
+ Access-Control-Allow-Methods: GET, PUT, HEAD, POST, TRACE, OPTIONS
+ Content-Length: 0
+ Date: DATE
+ HTTP/1.1 200 OK
++
++=== TRACE
++
++Accept: */*
++Content-Length: 0
++Date: DATE
++HTTP/1.1 200 OK
++Host: localhost:44322
++User-Agent: curl VERSION
+diff -auNr pcp-5.1.1-004/qa/archives/GNUmakefile pcp-5.1.1-005/qa/archives/GNUmakefile
+--- pcp-5.1.1-004/qa/archives/GNUmakefile	2020-03-19 15:15:42.000000000 +1100
++++ pcp-5.1.1-005/qa/archives/GNUmakefile	2020-06-22 20:08:18.461403861 +1000
+@@ -35,6 +35,7 @@
+ 	  pcp-atop.0.xz pcp-atop.meta pcp-atop.index \
+ 	  pcp-atop-boot.0.xz pcp-atop-boot.meta pcp-atop-boot.index \
+ 	  pcp-dstat.0.xz pcp-dstat.meta pcp-dstat.index \
++	  pcp-free-tera.0.xz pcp-free-tera.meta.xz pcp-free-tera.index \
+ 	  pcp-hotatop.0.xz pcp-hotatop.meta pcp-hotatop.index \
+ 	  pcp-zeroconf.0.xz pcp-zeroconf.meta pcp-zeroconf.index \
+ 	  value-test.0.xz value-test.meta value-test.index \
+diff -auNr pcp-5.1.1-004/qa/common.check pcp-5.1.1-005/qa/common.check
+--- pcp-5.1.1-004/qa/common.check	2020-06-22 20:00:17.637331179 +1000
++++ pcp-5.1.1-005/qa/common.check	2020-06-22 20:08:18.459403840 +1000
+@@ -2697,6 +2697,7 @@
+     | col -b \
+     | sed \
+ 	-e 's/^\(Content-Length:\) [1-9][0-9]*/\1 SIZE/g' \
++	-e 's/^\(User-Agent: curl\).*/\1 VERSION/g' \
+ 	-e 's/^\(Date:\).*/\1 DATE/g' \
+ 	-e 's/\(\"context\":\) [0-9][0-9]*/\1 CTXID/g' \
+ 	-e '/^Connection: Keep-Alive/d' \
+diff -auNr pcp-5.1.1-004/src/pmlogger/pmlogger_check.service.in pcp-5.1.1-005/src/pmlogger/pmlogger_check.service.in
+--- pcp-5.1.1-004/src/pmlogger/pmlogger_check.service.in	2020-05-22 16:40:09.000000000 +1000
++++ pcp-5.1.1-005/src/pmlogger/pmlogger_check.service.in	2020-06-22 20:08:18.452403767 +1000
+@@ -6,7 +6,7 @@
+ [Service]
+ Type=oneshot
+ TimeoutStartSec=25m
+-Environment="PMLOGGER_CHECK_PARAMS=-C"
++Environment="PMLOGGER_CHECK_PARAMS=-C --skip-primary"
+ EnvironmentFile=-@PCP_SYSCONFIG_DIR@/pmlogger_timers
+ ExecStart=@PCP_BINADM_DIR@/pmlogger_check $PMLOGGER_CHECK_PARAMS
+ WorkingDirectory=@PCP_VAR_DIR@
+diff -auNr pcp-5.1.1-004/src/pmlogger/pmlogger_check.sh pcp-5.1.1-005/src/pmlogger/pmlogger_check.sh
+--- pcp-5.1.1-004/src/pmlogger/pmlogger_check.sh	2020-05-04 09:52:04.000000000 +1000
++++ pcp-5.1.1-005/src/pmlogger/pmlogger_check.sh	2020-06-22 20:13:04.029416598 +1000
+@@ -36,16 +36,24 @@
+ echo >$tmp/lock
+ prog=`basename $0`
+ PROGLOG=$PCP_LOG_DIR/pmlogger/$prog.log
++MYPROGLOG=$PROGLOG.$$
+ USE_SYSLOG=true
+ 
+ _cleanup()
+ {
++    if [ -s "$MYPROGLOG" ]
++    then
++	rm -f "$PROGLOG"
++	mv "$MYPROGLOG" "$PROGLOG"
++    else
++	rm -f "$MYPROGLOG"
++    fi
+     $USE_SYSLOG && [ $status -ne 0 ] && \
+     $PCP_SYSLOG_PROG -p daemon.error "$prog failed - see $PROGLOG"
+-    [ -s "$PROGLOG" ] || rm -f "$PROGLOG"
+     lockfile=`cat $tmp/lock 2>/dev/null`
+     rm -f "$lockfile"
+     rm -rf $tmp
++    $VERY_VERBOSE && echo "End: `date '+%F %T.%N'`"
+ }
+ trap "_cleanup; exit \$status" 0 1 2 3 15
+ 
+@@ -86,6 +94,8 @@
+ CHECK_RUNLEVEL=false
+ START_PMLOGGER=true
+ STOP_PMLOGGER=false
++QUICKSTART=false
++SKIP_PRIMARY=false
+ 
+ echo > $tmp/usage
+ cat >> $tmp/usage << EOF
+@@ -94,6 +104,8 @@
+   -l=FILE,--logfile=FILE  send important diagnostic messages to FILE
+   -C                      query system service runlevel information
+   -N,--showme             perform a dry run, showing what would be done
++  -p,--skip-primary       do not start or stop the primary pmlogger instance
++  -q,--quick              quick start, no compression
+   -s,--stop               stop pmlogger processes instead of starting them
+   -T,--terse              produce a terser form of output
+   -V,--verbose            increase diagnostic verbosity
+@@ -117,6 +129,7 @@
+ 	-C)	CHECK_RUNLEVEL=true
+ 		;;
+ 	-l)	PROGLOG="$2"
++		MYPROGLOG="$PROGLOG".$$
+ 		USE_SYSLOG=false
+ 		daily_args="${daily_args} -l $2.from.check"
+ 		shift
+@@ -129,6 +142,10 @@
+ 		KILL="echo + kill"
+ 		daily_args="${daily_args} -N"
+ 		;;
++	-p)	SKIP_PRIMARY=true
++		;;
++	-q)	QUICKSTART=true
++		;;
+ 	-s)	START_PMLOGGER=false
+ 		STOP_PMLOGGER=true
+ 		;;
+@@ -162,9 +179,15 @@
+ 
+ _compress_now()
+ {
+-    # If $PCP_COMPRESSAFTER=0 in the control file(s), compress archives now.
+-    # Invoked just before exit when this script has finished successfully.
+-    $PCP_BINADM_DIR/pmlogger_daily -K $daily_args
++    if $QUICKSTART
++    then
++	$VERY_VERBOSE && echo "Skip compression, -q/--quick on command line"
++    else
++	# If $PCP_COMPRESSAFTER=0 in the control file(s), compress archives now.
++	# Invoked just before exit when this script has finished successfully.
++	$VERY_VERBOSE && echo "Doing compression ..."
++	$PCP_BINADM_DIR/pmlogger_daily -K $daily_args
++    fi
+ }
+ 
+ # after argument checking, everything must be logged to ensure no mail is
+@@ -187,26 +210,37 @@
+     #
+     # Exception ($SHOWME, above) is for -N where we want to see the output.
+     #
+-    touch "$PROGLOG"
+-    chown $PCP_USER:$PCP_GROUP "$PROGLOG" >/dev/null 2>&1
+-    exec 1>"$PROGLOG" 2>&1
++    touch "$MYPROGLOG"
++    chown $PCP_USER:$PCP_GROUP "$MYPROGLOG" >/dev/null 2>&1
++    exec 1>"$MYPROGLOG" 2>&1
++fi
++
++if $VERY_VERBOSE
++then
++    echo "Start: `date '+%F %T.%N'`"
++    if `which pstree >/dev/null 2>&1`
++    then
++	echo "Called from:"
++	pstree -spa $$
++	echo "--- end of pstree output ---"
++    fi
+ fi
+ 
+ # if SaveLogs exists in the $PCP_LOG_DIR/pmlogger directory then save
+-# $PROGLOG there as well with a unique name that contains the date and time
++# $MYPROGLOG there as well with a unique name that contains the date and time
+ # when we're run
+ #
+ if [ -d $PCP_LOG_DIR/pmlogger/SaveLogs ]
+ then
+-    now="`date '+%Y%m%d.%H.%M'`"
+-    link=`echo $PROGLOG | sed -e "s/$prog/SaveLogs\/$prog.$now/"`
++    now="`date '+%Y%m%d.%H.%M.%S'`"
++    link=`echo $MYPROGLOG | sed -e "s/$prog/SaveLogs\/$prog.$now/"`
+     if [ ! -f "$link" ]
+     then
+ 	if $SHOWME
+ 	then
+-	    echo "+ ln $PROGLOG $link"
++	    echo "+ ln $MYPROGLOG $link"
+ 	else
+-	    ln $PROGLOG $link
++	    ln $MYPROGLOG $link
+ 	fi
+     fi
+ fi
+@@ -273,7 +307,7 @@
+ 
+ _unlock()
+ {
+-    rm -f lock
++    rm -f "$1/lock"
+     echo >$tmp/lock
+ }
+ 
+@@ -395,6 +429,41 @@
+     echo "$pid"
+ }
+ 
++# wait for the local pmcd to get going for a primary pmlogger
++# (borrowed from qa/common.check)
++#
++# wait_for_pmcd [maxdelay]
++#
++_wait_for_pmcd()
++{
++    # 5 seconds default seems like a reasonable max time to get going
++    _can_wait=${1-5}
++    _limit=`expr $_can_wait \* 10`
++    _i=0
++    _dead=true
++    while [ $_i -lt $_limit ]
++    do
++	_sts=`pmprobe pmcd.numclients 2>/dev/null | $PCP_AWK_PROG '{print $2}'`
++	if [ "${_sts:-0}" -gt 0 ]
++	then
++	    # numval really > 0, we're done
++	    #
++	    _dead=false
++	    break
++	fi
++	pmsleep 0.1
++	_i=`expr $_i + 1`
++    done
++    if $_dead
++    then
++	date
++	echo "Arrgghhh ... pmcd at localhost failed to start after $_can_wait seconds"
++	echo "=== failing pmprobes ==="
++	pmprobe pmcd.numclients
++	status=1
++    fi
++}
++
+ _check_archive()
+ {
+     if [ ! -e "$logfile" ]
+@@ -531,7 +600,17 @@
+ 	cd "$here"
+ 	line=`expr $line + 1`
+ 
+-	$VERY_VERBOSE && echo "[$controlfile:$line] host=\"$host\" primary=\"$primary\" socks=\"$socks\" dir=\"$dir\" args=\"$args\""
++
++	if $VERY_VERBOSE 
++	then
++	    case "$host"
++	    in
++	    \#*|'')	# comment or empty
++			;;
++	    *)		echo "[$controlfile:$line] host=\"$host\" primary=\"$primary\" socks=\"$socks\" dir=\"$dir\" args=\"$args\""
++	    		;;
++	    esac
++	fi
+ 
+ 	case "$host"
+ 	in
+@@ -599,6 +678,15 @@
+ 	    continue
+ 	fi
+ 
++	# if -s/--skip-primary on the command line, do not process
++	# a control file line for the primary pmlogger
++	#
++	if $SKIP_PRIMARY && [ $primary = y ]
++	then
++	    $VERY_VERBOSE && echo "Skip, -s/--skip-primary on command line"
++	    continue
++	fi
++
+ 	# substitute LOCALHOSTNAME marker in this config line
+ 	# (differently for directory and pcp -h HOST arguments)
+ 	#
+@@ -610,7 +698,7 @@
+ 	then
+ 	    pflag=''
+ 	    [ $primary = y ] && pflag=' -P'
+-	    echo "Check pmlogger$pflag -h $host ... in $dir ..."
++	    echo "Checking for: pmlogger$pflag -h $host ... in $dir ..."
+ 	fi
+ 
+ 	# check for directory duplicate entries
+@@ -664,19 +752,25 @@
+ 	    delay=200	# tenths of a second
+ 	    while [ $delay -gt 0 ]
+ 	    do
+-		if pmlock -v lock >$tmp/out 2>&1
++		if pmlock -v "$dir/lock" >$tmp/out 2>&1
+ 		then
+-		    echo $dir/lock >$tmp/lock
++		    echo "$dir/lock" >$tmp/lock
++		    if $VERY_VERBOSE
++		    then
++			echo "Acquired lock:"
++			ls -l $dir/lock
++		    fi
+ 		    break
+ 		else
+ 		    [ -f $tmp/stamp ] || touch -t `pmdate -30M %Y%m%d%H%M` $tmp/stamp
+-		    if [ -z "`find lock -newer $tmp/stamp -print 2>/dev/null`" ]
++		    find $tmp/stamp -newer "$dir/lock" -print 2>/dev/null >$tmp/tmp
++		    if [ -s $tmp/tmp ]
+ 		    then
+-			if [ -f lock ]
++			if [ -f "$dir/lock" ]
+ 			then
+ 			    echo "$prog: Warning: removing lock file older than 30 minutes"
+ 			    LC_TIME=POSIX ls -l $dir/lock
+-			    rm -f lock
++			    rm -f "$dir/lock"
+ 			else
+ 			    # there is a small timing window here where pmlock
+ 			    # might fail, but the lock file has been removed by
+@@ -714,7 +808,7 @@
+ 			continue
+ 		    fi
+ 		fi
+-		if [ -f lock ]
++		if [ -f "$dir/lock" ]
+ 		then
+ 		    echo "$prog: Warning: is another PCP cron job running concurrently?"
+ 		    LC_TIME=POSIX ls -l $dir/lock
+@@ -753,6 +847,14 @@
+ 		    $VERY_VERBOSE && echo "primary pmlogger process $pid not running"
+ 		    pid=''
+ 		fi
++	    else
++		if $VERY_VERBOSE
++		then
++		    echo "$PCP_TMP_DIR/pmlogger/primary: missing?"
++		    echo "Contents of $PCP_TMP_DIR/pmlogger"
++		    ls -l $PCP_TMP_DIR/pmlogger
++		    echo "--- end of ls output ---"
++		fi
+ 	    fi
+ 	else
+ 	    for log in $PCP_TMP_DIR/pmlogger/[0-9]*
+@@ -798,6 +900,17 @@
+ 		#
+ 		PM_LOG_PORT_DIR="$PCP_TMP_DIR/pmlogger"
+ 		rm -f "$PM_LOG_PORT_DIR/primary"
++		# We really starting the primary pmlogger to work, especially
++		# in the systemd world, so make sure pmcd is ready to accept
++		# connections.
++		#
++		_wait_for_pmcd
++		if [ "$status" = 1 ]
++		then
++		    $VERY_VERBOSE && echo "pmcd not running, skip primary pmlogger"
++		    _unlock "$dir"
++		    continue
++		fi
+ 	    else
+ 		args="-h $host $args"
+ 		envs=""
+@@ -870,7 +983,7 @@
+ 	    then
+ 		echo
+ 		echo "+ ${sock_me}$PMLOGGER $args $LOGNAME"
+-		_unlock
++		_unlock "$dir"
+ 		continue
+ 	    else
+ 		$PCP_BINADM_DIR/pmpost "start pmlogger from $prog for host $host"
+@@ -903,7 +1016,7 @@
+ 	    $PCP_ECHO_PROG $PCP_ECHO_N "$pid ""$PCP_ECHO_C" >> $tmp/pmloggers
+ 	fi
+ 
+-	_unlock
++	_unlock "$dir"
+     done
+ }
+ 
+diff -auNr pcp-5.1.1-004/src/pmlogger/pmlogger_daily.sh pcp-5.1.1-005/src/pmlogger/pmlogger_daily.sh
+--- pcp-5.1.1-004/src/pmlogger/pmlogger_daily.sh	2020-04-07 13:31:03.000000000 +1000
++++ pcp-5.1.1-005/src/pmlogger/pmlogger_daily.sh	2020-06-22 20:08:18.451403756 +1000
+@@ -31,16 +31,24 @@
+ echo >$tmp/lock
+ prog=`basename $0`
+ PROGLOG=$PCP_LOG_DIR/pmlogger/$prog.log
++MYPROGLOG=$PROGLOG.$$
+ USE_SYSLOG=true
+ 
+ _cleanup()
+ {
++    if [ -s "$MYPROGLOG" ]
++    then
++	rm -f "$PROGLOG"
++	mv "$MYPROGLOG" "$PROGLOG"
++    else
++	rm -f "$MYPROGLOG"
++    fi
+     $USE_SYSLOG && [ $status -ne 0 ] && \
+     $PCP_SYSLOG_PROG -p daemon.error "$prog failed - see $PROGLOG"
+-    [ -s "$PROGLOG" ] || rm -f "$PROGLOG"
+     lockfile=`cat $tmp/lock 2>/dev/null`
+     rm -f "$lockfile" "$PCP_RUN_DIR/pmlogger_daily.pid"
+     rm -rf $tmp
++    $VERY_VERBOSE && echo "End: `date '+%F %T.%N'`"
+ }
+ trap "_cleanup; exit \$status" 0 1 2 3 15
+ 
+@@ -215,8 +223,10 @@
+ 		fi
+ 		COMPRESSONLY=true
+ 		PROGLOG=$PCP_LOG_DIR/pmlogger/$prog-K.log
++		MYPROGLOG=$PROGLOG.$$
+ 		;;
+ 	-l)	PROGLOG="$2"
++		MYPROGLOG=$PROGLOG.$$
+ 		USE_SYSLOG=false
+ 		shift
+ 		;;
+@@ -278,6 +288,7 @@
+ 		# $PCP_LOG_DIR/pmlogger/daily.<date>.trace
+ 		#
+ 		PROGLOG=$PCP_LOG_DIR/pmlogger/daily.`date "+%Y%m%d.%H.%M"`.trace
++		MYPROGLOG=$PROGLOG.$$
+ 		VERBOSE=true
+ 		VERY_VERBOSE=true
+ 		MYARGS="$MYARGS -V -V"
+@@ -418,13 +429,23 @@
+     #
+     # Exception ($SHOWME, above) is for -N where we want to see the output.
+     #
+-    touch "$PROGLOG"
+-    chown $PCP_USER:$PCP_GROUP "$PROGLOG" >/dev/null 2>&1
+-    exec 1>"$PROGLOG" 2>&1
++    touch "$MYPROGLOG"
++    chown $PCP_USER:$PCP_GROUP "$MYPROGLOG" >/dev/null 2>&1
++    exec 1>"$MYPROGLOG" 2>&1
++fi
++
++if $VERY_VERBOSE
++then
++    echo "Start: `date '+%F %T.%N'`"
++    if `which pstree >/dev/null 2>&1`
++    then
++	echo "Called from:"
++	pstree -spa $$
++    fi
+ fi
+ 
+ # if SaveLogs exists in the $PCP_LOG_DIR/pmlogger directory then save
+-# $PROGLOG there as well with a unique name that contains the date and time
++# $MYPROGLOG there as well with a unique name that contains the date and time
+ # when we're run ... skip if -N (showme)
+ #
+ if $SHOWME
+@@ -433,15 +454,15 @@
+ else
+     if [ -d $PCP_LOG_DIR/pmlogger/SaveLogs ]
+     then
+-	now="`date '+%Y%m%d.%H.%M'`"
+-	link=`echo $PROGLOG | sed -e "s/$prog/SaveLogs\/$prog.$now/"`
++	now="`date '+%Y%m%d.%H.%M.%S'`"
++	link=`echo $MYPROGLOG | sed -e "s/$prog/SaveLogs\/$prog.$now/"`
+ 	if [ ! -f "$link" ]
+ 	then
+ 	    if $SHOWME
+ 	    then
+-		echo "+ ln $PROGLOG $link"
++		echo "+ ln $MYPROGLOG $link"
+ 	    else
+-		ln $PROGLOG $link
++		ln $MYPROGLOG $link
+ 	    fi
+ 	fi
+     fi
+@@ -487,19 +508,20 @@
+ 	delay=200	# tenths of a second
+ 	while [ $delay -gt 0 ]
+ 	do
+-	    if pmlock -v lock >>$tmp/out 2>&1
++	    if pmlock -v "$1/lock" >>$tmp/out 2>&1
+ 	    then
+-		echo $1/lock >$tmp/lock
++		echo "$1/lock" >$tmp/lock
+ 		break
+ 	    else
+ 		[ -f $tmp/stamp ] || touch -t `pmdate -30M %Y%m%d%H%M` $tmp/stamp
+-		if [ ! -z "`find lock -newer $tmp/stamp -print 2>/dev/null`" ]
++		find $tmp/stamp -newer "$1/lock" -print 2>/dev/null >$tmp/tmp
++		if [ -s $tmp/tmp ]
+ 		then
+-		    if [ -f lock ]
++		    if [ -f "$1/lock" ]
+ 		    then
+ 			_warning "removing lock file older than 30 minutes"
+-			LC_TIME=POSIX ls -l $1/lock
+-			rm -f lock
++			LC_TIME=POSIX ls -l "$1/lock"
++			rm -f "$1/lock"
+ 		    else
+ 			# there is a small timing window here where pmlock
+ 			# might fail, but the lock file has been removed by
+@@ -517,10 +539,10 @@
+ 	then
+ 	    # failed to gain mutex lock
+ 	    #
+-	    if [ -f lock ]
++	    if [ -f "$1/lock" ]
+ 	    then
+ 		_warning "is another PCP cron job running concurrently?"
+-		LC_TIME=POSIX ls -l $1/lock
++		LC_TIME=POSIX ls -l "$1/lock"
+ 	    else
+ 		echo "$prog: `cat $tmp/out`"
+ 	    fi
+@@ -534,7 +556,7 @@
+ 
+ _unlock()
+ {
+-    rm -f lock
++    rm -f "$1/lock"
+     echo >$tmp/lock
+ }
+ 
+@@ -703,6 +725,9 @@
+ # if the directory containing the archive matches, then the name
+ # of the file is the pid.
+ #
++# The pid(s) (if any) appear on stdout, so be careful to send any
++# diagnostics to stderr.
++#
+ _get_non_primary_logger_pid()
+ {
+     pid=''
+@@ -713,7 +738,7 @@
+ 	then
+ 	    _host=`sed -n 2p <$log`
+ 	    _arch=`sed -n 3p <$log`
+-	    $PCP_ECHO_PROG $PCP_ECHO_N "... try $log host=$_host arch=$_arch: ""$PCP_ECHO_C"
++	    $PCP_ECHO_PROG >&2 $PCP_ECHO_N "... try $log host=$_host arch=$_arch: ""$PCP_ECHO_C"
+ 	fi
+ 	# throw away stderr in case $log has been removed by now
+ 	match=`sed -e '3s@/[^/]*$@@' $log 2>/dev/null | \
+@@ -721,19 +746,19 @@
+ BEGIN				{ m = 0 }
+ NR == 3 && $0 == "'$dir'"	{ m = 2; next }
+ END				{ print m }'`
+-	$VERY_VERBOSE && $PCP_ECHO_PROG $PCP_ECHO_N "match=$match ""$PCP_ECHO_C"
++	$VERY_VERBOSE && $PCP_ECHO_PROG >&2 $PCP_ECHO_N "match=$match ""$PCP_ECHO_C"
+ 	if [ "$match" = 2 ]
+ 	then
+ 	    pid=`echo $log | sed -e 's,.*/,,'`
+ 	    if _get_pids_by_name pmlogger | grep "^$pid\$" >/dev/null
+ 	    then
+-		$VERY_VERBOSE && echo "pmlogger process $pid identified, OK"
++		$VERY_VERBOSE && echo >&2 "pmlogger process $pid identified, OK"
+ 		break
+ 	    fi
+-	    $VERY_VERBOSE && echo "pmlogger process $pid not running, skip"
++	    $VERY_VERBOSE && echo >&2 "pmlogger process $pid not running, skip"
+ 	    pid=''
+ 	else
+-	    $VERY_VERBOSE && echo "different directory, skip"
++	    $VERY_VERBOSE && echo >&2 "different directory, skip"
+ 	fi
+     done
+     echo "$pid"
+@@ -1028,6 +1053,8 @@
+ 		pid=''
+ 	    fi
+ 	else
++	    # pid(s) on stdout, diagnostics on stderr
++	    #
+ 	    pid=`_get_non_primary_logger_pid`
+ 	    if $VERY_VERBOSE
+ 	    then
+@@ -1458,7 +1485,7 @@
+ 	    fi
+ 	fi
+ 
+-	_unlock
++	_unlock "$dir"
+     done
+ }
+ 
+diff -auNr pcp-5.1.1-004/src/pmlogger/pmlogger.service.in pcp-5.1.1-005/src/pmlogger/pmlogger.service.in
+--- pcp-5.1.1-004/src/pmlogger/pmlogger.service.in	2020-06-22 20:00:17.634331148 +1000
++++ pcp-5.1.1-005/src/pmlogger/pmlogger.service.in	2020-06-22 20:08:18.452403767 +1000
+@@ -2,7 +2,7 @@
+ Description=Performance Metrics Archive Logger
+ Documentation=man:pmlogger(1)
+ After=network-online.target pmcd.service
+-After=pmlogger_check.timer pmlogger_check.path pmlogger_daily.timer pmlogger_daily-poll.timer
++Before=pmlogger_check.timer pmlogger_check.path pmlogger_daily.timer pmlogger_daily-poll.timer
+ BindsTo=pmlogger_check.timer pmlogger_check.path pmlogger_daily.timer pmlogger_daily-poll.timer
+ Wants=pmcd.service
+ 
+diff -auNr pcp-5.1.1-004/src/pmlogger/rc_pmlogger pcp-5.1.1-005/src/pmlogger/rc_pmlogger
+--- pcp-5.1.1-004/src/pmlogger/rc_pmlogger	2020-04-21 10:42:02.000000000 +1000
++++ pcp-5.1.1-005/src/pmlogger/rc_pmlogger	2020-06-22 20:08:18.453403777 +1000
+@@ -96,7 +96,7 @@
+     bgtmp=`mktemp -d $PCP_DIR/var/tmp/pcp.XXXXXXXXX` || exit 1
+     trap "rm -rf $bgtmp; exit \$bgstatus" 0 1 2 3 15
+ 
+-    pmlogger_check $VFLAG >$bgtmp/pmcheck.out 2>$bgtmp/pmcheck
++    pmlogger_check --quick $VFLAG >$bgtmp/pmcheck.out 2>$bgtmp/pmcheck
+     bgstatus=$?
+     if [ -s $bgtmp/pmcheck ]
+     then
+@@ -125,8 +125,6 @@
+ 	    false
+ 	else
+ 	    # Really start the pmlogger instances based on the control file.
+-	    # Done in the background to avoid delaying the init script,
+-	    # failure notification is external (syslog, log files).
+ 	    #
+ 	    $ECHO $PCP_ECHO_N "Starting pmlogger ..." "$PCP_ECHO_C"
+ 
+@@ -234,11 +232,9 @@
+ if [ $VERBOSE_CTL = on ]
+ then				# For a verbose startup and shutdown
+     ECHO=$PCP_ECHO_PROG
+-    REBUILDOPT=''
+     VFLAG='-V'
+ else				# For a quiet startup and shutdown
+     ECHO=:
+-    REBUILDOPT=-s
+     VFLAG=
+ fi
+ 
+diff -auNr pcp-5.1.1-004/src/pmproxy/src/http.c pcp-5.1.1-005/src/pmproxy/src/http.c
+--- pcp-5.1.1-004/src/pmproxy/src/http.c	2020-06-22 20:00:17.635331158 +1000
++++ pcp-5.1.1-005/src/pmproxy/src/http.c	2020-06-22 20:08:18.460403851 +1000
+@@ -324,17 +324,36 @@
+ }
+ 
+ static sds
+-http_response_trace(struct client *client)
++http_response_trace(struct client *client, int sts)
+ {
++    struct http_parser	*parser = &client->u.http.parser;
+     dictIterator	*iterator;
+     dictEntry		*entry;
+-    sds			result = sdsempty();
++    char		buffer[64];
++    sds			header;
++
++    parser->http_major = parser->http_minor = 1;
++
++    header = sdscatfmt(sdsempty(),
++		"HTTP/%u.%u %u %s\r\n"
++		"%S: Keep-Alive\r\n",
++		parser->http_major, parser->http_minor,
++		sts, http_status_mapping(sts), HEADER_CONNECTION);
++    header = sdscatfmt(header, "%S: %u\r\n", HEADER_CONTENT_LENGTH, 0);
+ 
+     iterator = dictGetSafeIterator(client->u.http.headers);
+     while ((entry = dictNext(iterator)) != NULL)
+-	result = sdscatfmt("%S: %S\r\n", dictGetKey(entry), dictGetVal(entry));
++	header = sdscatfmt(header, "%S: %S\r\n", dictGetKey(entry), dictGetVal(entry));
+     dictReleaseIterator(iterator);
+-    return result;
++
++    header = sdscatfmt(header, "Date: %s\r\n\r\n",
++		http_date_string(time(NULL), buffer, sizeof(buffer)));
++
++    if (pmDebugOptions.http && pmDebugOptions.desperate) {
++	fprintf(stderr, "trace response to client %p\n", client);
++	fputs(header, stderr);
++    }
++    return header;
+ }
+ 
+ static sds
+@@ -418,7 +437,7 @@
+ 	if (client->u.http.parser.method == HTTP_OPTIONS)
+ 	    buffer = http_response_access(client, sts, options);
+ 	else if (client->u.http.parser.method == HTTP_TRACE)
+-	    buffer = http_response_trace(client);
++	    buffer = http_response_trace(client, sts);
+ 	else	/* HTTP_HEAD */
+ 	    buffer = http_response_header(client, 0, sts, type);
+ 	suffix = NULL;
+@@ -533,6 +552,8 @@
+     if (servlet && servlet->on_release)
+ 	servlet->on_release(client);
+     client->u.http.privdata = NULL;
++    client->u.http.servlet = NULL;
++    client->u.http.flags = 0;
+ 
+     if (client->u.http.headers) {
+ 	dictRelease(client->u.http.headers);
+@@ -696,29 +717,39 @@
+ {
+     struct client	*client = (struct client *)request->data;
+     struct servlet	*servlet;
+-    sds			buffer;
+     int			sts;
+ 
+     http_client_release(client);	/* new URL, clean slate */
+-    /* server options - https://tools.ietf.org/html/rfc7231#section-4.3.7 */
+-    if (length == 1 && *offset == '*' &&
+-	client->u.http.parser.method == HTTP_OPTIONS) {
+-	buffer = http_response_access(client, HTTP_STATUS_OK, HTTP_SERVER_OPTIONS);
+-	client_write(client, buffer, NULL);
+-    } else if ((servlet = servlet_lookup(client, offset, length)) != NULL) {
++    /* pass to servlets handling each of our internal request endpoints */
++    if ((servlet = servlet_lookup(client, offset, length)) != NULL) {
+ 	client->u.http.servlet = servlet;
+-	if ((sts = client->u.http.parser.status_code) == 0) {
++	if ((sts = client->u.http.parser.status_code) != 0)
++	    http_error(client, sts, "failed to process URL");
++	else {
+ 	    if (client->u.http.parser.method == HTTP_OPTIONS ||
+ 		client->u.http.parser.method == HTTP_TRACE ||
+ 		client->u.http.parser.method == HTTP_HEAD)
+ 		client->u.http.flags |= HTTP_FLAG_NO_BODY;
+-	    else
+-		client->u.http.flags &= ~HTTP_FLAG_NO_BODY;
+ 	    client->u.http.headers = dictCreate(&sdsOwnDictCallBacks, NULL);
+-	    return 0;
+ 	}
+-	http_error(client, sts, "failed to process URL");
+-    } else {
++    }
++    /* server options - https://tools.ietf.org/html/rfc7231#section-4.3.7 */
++    else if (client->u.http.parser.method == HTTP_OPTIONS) {
++	if (length == 1 && *offset == '*') {
++	    client->u.http.flags |= HTTP_FLAG_NO_BODY;
++	    client->u.http.headers = dictCreate(&sdsOwnDictCallBacks, NULL);
++	} else {
++	    sts = client->u.http.parser.status_code = HTTP_STATUS_BAD_REQUEST;
++	    http_error(client, sts, "no handler for OPTIONS");
++	}
++    }
++    /* server trace - https://tools.ietf.org/html/rfc7231#section-4.3.8 */
++    else if (client->u.http.parser.method == HTTP_TRACE) {
++	client->u.http.flags |= HTTP_FLAG_NO_BODY;
++	client->u.http.headers = dictCreate(&sdsOwnDictCallBacks, NULL);
++    }
++    /* nothing available to respond to this request - inform the client */
++    else {
+ 	sts = client->u.http.parser.status_code = HTTP_STATUS_BAD_REQUEST;
+ 	http_error(client, sts, "no handler for URL");
+     }
+@@ -734,7 +765,7 @@
+     if (pmDebugOptions.http && pmDebugOptions.desperate)
+ 	printf("Body: %.*s\n(client=%p)\n", (int)length, offset, client);
+ 
+-    if (servlet->on_body)
++    if (servlet && servlet->on_body)
+ 	return servlet->on_body(client, offset, length);
+     return 0;
+ }
+@@ -828,7 +859,7 @@
+     }
+ 
+     client->u.http.privdata = NULL;
+-    if (servlet->on_headers)
++    if (servlet && servlet->on_headers)
+ 	sts = servlet->on_headers(client, client->u.http.headers);
+ 
+     /* HTTP Basic Auth for all servlets */
+@@ -857,13 +888,31 @@
+ {
+     struct client	*client = (struct client *)request->data;
+     struct servlet	*servlet = client->u.http.servlet;
++    sds			buffer;
++    int			sts;
+ 
+     if (pmDebugOptions.http)
+ 	fprintf(stderr, "HTTP message complete (client=%p)\n", client);
+ 
+-    if (servlet && servlet->on_done)
+-	return servlet->on_done(client);
+-    return 0;
++    if (servlet) {
++	if (servlet->on_done)
++	    return servlet->on_done(client);
++	return 0;
++    }
++
++    sts = HTTP_STATUS_OK;
++    if (client->u.http.parser.method == HTTP_OPTIONS) {
++	buffer = http_response_access(client, sts, HTTP_SERVER_OPTIONS);
++	client_write(client, buffer, NULL);
++	return 0;
++    }
++    if (client->u.http.parser.method == HTTP_TRACE) {
++	buffer = http_response_trace(client, sts);
++	client_write(client, buffer, NULL);
++	return 0;
++    }
++
++    return 1;
+ }
+ 
+ void
+diff -auNr pcp-5.1.1.orig/qa/1608 pcp-5.1.1/qa/1608
+--- pcp-5.1.1.orig/qa/1608	1970-01-01 10:00:00.000000000 +1000
++++ pcp-5.1.1/qa/1608	2020-06-23 12:16:04.005557293 +1000
+@@ -0,0 +1,58 @@
++#!/bin/sh
++# PCP QA Test No. 1608
++# Exercise a long URL handling in pmproxy.
++#
++# Copyright (c) 2020 Red Hat.  All Rights Reserved.
++#
++
++seq=`basename $0`
++echo "QA output created by $seq"
++
++# get standard environment, filters and checks
++. ./common.product
++. ./common.filter
++. ./common.check
++
++_check_series
++which curl >/dev/null 2>&1 || _notrun "No curl binary installed"
++
++status=1	# failure is the default!
++$sudo rm -rf $tmp $tmp.* $seq.full
++trap "_cleanup; exit \$status" 0 1 2 3 15
++
++pmproxy_was_running=false
++[ -f $PCP_RUN_DIR/pmproxy.pid ] && pmproxy_was_running=true
++echo "pmproxy_was_running=$pmproxy_was_running" >>$here/$seq.full
++
++_cleanup()
++{
++    if $pmproxy_was_running
++    then
++        echo "Restart pmproxy ..." >>$here/$seq.full
++        _service pmproxy restart >>$here/$seq.full 2>&1
++        _wait_for_pmproxy
++    else
++        echo "Stopping pmproxy ..." >>$here/$seq.full
++        _service pmproxy stop >>$here/$seq.full 2>&1
++    fi
++    $sudo rm -f $tmp.*
++}
++
++_webapi_failure_filter()
++{
++    _webapi_header_filter | \
++    sed \
++	-e 's/pmproxy.[0-9][0-9]*.[0-9][0-9]*.[0-9][0-9]*/PMPROXY\/VERSION/g' \
++    #end
++}
++
++# real QA test starts here
++_service pmproxy restart >/dev/null 2>&1
++
++url="http://localhost:44322/pmapi/context"
++aaa=`head -c 10000 < /dev/zero | tr '\0' '\141'`
++curl -isS -X OPTIONS "${url}?${aaa}" | _webapi_failure_filter
++
++# success, all done
++status=0
++exit
+diff -auNr pcp-5.1.1.orig/qa/1608.out pcp-5.1.1/qa/1608.out
+--- pcp-5.1.1.orig/qa/1608.out	1970-01-01 10:00:00.000000000 +1000
++++ pcp-5.1.1/qa/1608.out	2020-06-23 12:16:04.005557293 +1000
+@@ -0,0 +1,16 @@
++QA output created by 1608
++
++</body>
++</html>
++<body>
++<h1>414 URI Too Long</h1>
++<head><title>414 URI Too Long</title></head>
++<html>
++<p><b>unknown servlet</b>: request URL too long</p><hr>
++<p><small><i>PMPROXY/VERSION</i></small></p>
++Access-Control-Allow-Headers: Accept, Accept-Language, Content-Language, Content-Type
++Access-Control-Allow-Origin: *
++Content-Length: SIZE
++Content-Type: text/html
++Date: DATE
++HTTP/1.1 414 URI Too Long
+diff -auNr pcp-5.1.1.orig/qa/group pcp-5.1.1/qa/group
+--- pcp-5.1.1.orig/qa/group	2020-06-23 12:15:21.335094106 +1000
++++ pcp-5.1.1/qa/group	2020-06-23 12:16:54.256102754 +1000
+@@ -1717,6 +1717,7 @@
+ 1600 pmseries pmcd pmproxy pmlogger local
+ 1601 pmseries pmproxy local
+ 1602 pmproxy local
++1608 pmproxy local
+ 1622 selinux local
+ 1623 libpcp_import collectl local
+ 1644 pmda.perfevent local
+diff -auNr pcp-5.1.1.orig/src/pmproxy/src/http.c pcp-5.1.1/src/pmproxy/src/http.c
+--- pcp-5.1.1.orig/src/pmproxy/src/http.c	2020-06-23 12:15:21.364094421 +1000
++++ pcp-5.1.1/src/pmproxy/src/http.c	2020-06-23 12:16:04.008557325 +1000
+@@ -21,7 +21,9 @@
+ static int chunked_transfer_size; /* pmproxy.chunksize, pagesize by default */
+ static int smallest_buffer_size = 128;
+ 
+-#define MAX_PARAMS_SIZE 4096
++/* https://tools.ietf.org/html/rfc7230#section-3.1.1 */
++#define MAX_URL_SIZE	8192
++#define MAX_PARAMS_SIZE 8000
+ #define MAX_HEADERS_SIZE 128
+ 
+ static sds HEADER_ACCESS_CONTROL_REQUEST_HEADERS,
+@@ -720,8 +722,13 @@
+     int			sts;
+ 
+     http_client_release(client);	/* new URL, clean slate */
++
++    if (length >= MAX_URL_SIZE) {
++	sts = client->u.http.parser.status_code = HTTP_STATUS_URI_TOO_LONG;
++	http_error(client, sts, "request URL too long");
++    }
+     /* pass to servlets handling each of our internal request endpoints */
+-    if ((servlet = servlet_lookup(client, offset, length)) != NULL) {
++    else if ((servlet = servlet_lookup(client, offset, length)) != NULL) {
+ 	client->u.http.servlet = servlet;
+ 	if ((sts = client->u.http.parser.status_code) != 0)
+ 	    http_error(client, sts, "failed to process URL");
diff --git a/SOURCES/s390x-interrupts.patch b/SOURCES/s390x-interrupts.patch
deleted file mode 100644
index 6a1bb59..0000000
--- a/SOURCES/s390x-interrupts.patch
+++ /dev/null
@@ -1,108 +0,0 @@
-commit 04be64dc8a08203decc6fe206700dcb1f06c8d79
-Author: Nathan Scott <nathans@redhat.com>
-Date:   Mon Feb 24 17:28:48 2020 +1100
-
-    pmdalinux: fix interrupts file parser for s390x /proc/interrupts layout
-    
-    The s390x interrupts file produced by the kernel was causing a failure
-    in pmdalinux because the code expected first numeric interrupt lines &
-    then named (text) lines, whereas on this platform they're intermixed.
-    
-    Add a sample interrupts file from these kernels for qa/886 to test.
-    
-    Resolves Red Hat BZ #1798058
-
-diff --git a/qa/886.out.bz2 b/qa/886.out.bz2
-index 59bfae0e2..8db30e566 100644
-Binary files a/qa/886.out.bz2 and b/qa/886.out.bz2 differ
-diff --git a/qa/linux/interrupts-16cpu-s390x b/qa/linux/interrupts-16cpu-s390x
-new file mode 100644
-index 000000000..574dec6b0
---- /dev/null
-+++ b/qa/linux/interrupts-16cpu-s390x
-@@ -0,0 +1,59 @@
-+           CPU0       CPU1       CPU2       CPU3       CPU4       CPU5       CPU6       CPU7       CPU8       CPU9       CPU10      CPU11      CPU12      CPU13      CPU14      CPU15      
-+EXT:      30368       5872      22695      18176      19463       5347      21306      15838      21533       6333      32165       7468      23182       5010      28665       6909 
-+I/O:        675        559        764        682        764        631        646        645        822        909        464        463        645        653        574        377 
-+AIO:         39         32        101        122         58         67         87         65        156        145         33         50         64         51         48         34 
-+  3:          6          4         43         44         13         22         37         26         64         55         11         29         20         15         10          7   PCI-MSI  mlx5_async@pci:0002:00:00.0
-+  4:          0          0          0          1          0          0          0          0          0          0          0          0          0          0          0          0   PCI-MSI  mlx5_comp0@pci:0002:00:00.0
-+  5:          0          0          0          0          0          0          0          0          0          0          0          1          0          0          0          0   PCI-MSI  mlx5_comp1@pci:0002:00:00.0
-+  6:          0          0          0          0          0          0          0          0          0          0          0          0          1          0          0          0   PCI-MSI  mlx5_comp2@pci:0002:00:00.0
-+  7:          0          0          0          0          0          1          0          0          0          0          0          0          0          0          0          0   PCI-MSI  mlx5_comp3@pci:0002:00:00.0
-+  8:          0          0          0          0          0          0          0          0          0          1          0          0          0          0          0          0   PCI-MSI  mlx5_comp4@pci:0002:00:00.0
-+  9:          0          0          0          0          0          0          0          0          0          0          0          0          0          1          0          0   PCI-MSI  mlx5_comp5@pci:0002:00:00.0
-+ 10:          0          0          0          0          0          0          0          0          0          0          0          0          0          1          0          0   PCI-MSI  mlx5_comp6@pci:0002:00:00.0
-+ 11:          0          0          1          0          0          0          0          0          0          0          0          0          0          0          0          0   PCI-MSI  mlx5_comp7@pci:0002:00:00.0
-+ 12:          0          0          0          0          0          0          0          0          0          0          1          0          0          0          0          0   PCI-MSI  mlx5_comp8@pci:0002:00:00.0
-+ 13:         15         11         39         49         24         25         18         13         64         64          3         12         27         13         22          7   PCI-MSI  mlx5_async@pci:0003:00:00.0
-+ 14:          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0          1   PCI-MSI  mlx5_comp0@pci:0003:00:00.0
-+ 15:          0          0          0          1          0          0          0          0          0          0          0          0          0          0          0          0   PCI-MSI  mlx5_comp1@pci:0003:00:00.0
-+ 16:          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0          1   PCI-MSI  mlx5_comp2@pci:0003:00:00.0
-+ 17:          0          0          0          0          0          0          0          0          0          0          0          0          0          0          1          0   PCI-MSI  mlx5_comp3@pci:0003:00:00.0
-+ 18:          0          0          0          0          0          0          1          0          0          0          0          0          0          0          0          0   PCI-MSI  mlx5_comp4@pci:0003:00:00.0
-+ 19:          0          0          0          0          0          0          0          0          0          0          0          0          1          0          0          0   PCI-MSI  mlx5_comp5@pci:0003:00:00.0
-+ 20:          0          0          0          0          1          0          0          0          0          0          0          0          0          0          0          0   PCI-MSI  mlx5_comp6@pci:0003:00:00.0
-+ 21:          0          0          0          0          0          0          0          0          0          0          0          1          0          0          0          0   PCI-MSI  mlx5_comp7@pci:0003:00:00.0
-+ 22:          0          0          0          0          0          0          0          0          0          0          0          0          0          1          0          0   PCI-MSI  mlx5_comp8@pci:0003:00:00.0
-+ 23:          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0   PCI-MSI  0000:00:00.0
-+ 24:          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0   PCI-MSI  0001:00:00.0
-+CLK:       2876       2246       9129      14527       2478       1653       2830       3374       8696       1867      12976       3002       2341       1935       3066       3063   [EXT] Clock Comparator
-+EXC:      27474       3626      13527       3649      16970       3694      18487      12464      12834       4466      19188       4466      20848       3077      25599       3846   [EXT] External Call
-+EMS:          0          0          0          1          0          0          0          0          0          0          0          0          0          0          0          0   [EXT] Emergency Signal
-+TMR:          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0   [EXT] CPU Timer
-+TAL:          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0   [EXT] Timing Alert
-+PFL:          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0   [EXT] Pseudo Page Fault
-+DSD:          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0   [EXT] DASD Diag
-+VRT:          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0   [EXT] Virtio
-+SCP:         18          0         39          0         15          0          5          0          3          0          1          0          0          0          0          0   [EXT] Service Call
-+IUC:          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0   [EXT] IUCV
-+CMS:          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0   [EXT] CPU-Measurement: Sampling
-+CMC:          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0   [EXT] CPU-Measurement: Counter
-+FTP:          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0   [EXT] HMC FTP Service
-+CIO:         62         57         80         90        101         81         91         81         93        112         59         46         70         97         46         37   [I/O] Common I/O Layer Interrupt
-+DAS:        613        502        684        592        663        550        555        564        729        797        405        417        575        556        528        340   [I/O] DASD
-+C15:          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0   [I/O] 3215
-+C70:          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0   [I/O] 3270
-+TAP:          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0   [I/O] Tape
-+VMR:          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0   [I/O] Unit Record Devices
-+LCS:          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0   [I/O] LCS
-+CTC:          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0   [I/O] CTC
-+ADM:          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0   [I/O] EADM Subchannel
-+CSC:          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0   [I/O] CHSC Subchannel
-+VIR:          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0   [I/O] Virtual I/O Devices
-+QAI:         18         17         18         27         20         20         31         26         28         25         18          7         15         20         15         18   [AIO] QDIO Adapter Interrupt
-+APB:          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0   [AIO] AP Bus
-+PCF:         21         15         83         95         38         48         56         39        128        120         15         43         49         31         33         16   [AIO] PCI Floating Interrupt
-+PCD:          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0   [AIO] PCI Directed Interrupt
-+MSI:         21         15         83         95         38         48         56         39        128        120         15         43         49         31         33         16   [AIO] MSI Interrupt
-+VAI:          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0   [AIO] Virtual I/O Devices AI
-+GAL:          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0   [AIO] GIB Alert
-+NMI:          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0          0   [NMI] Machine Check
-+RST:          0          1          1          1          1          1          1          1          1          1          1          1          1          1          1          1   [CPU] CPU Restart
-diff --git a/src/pmdas/linux/interrupts.c b/src/pmdas/linux/interrupts.c
-index f8a4d9b1b..f57af9e43 100644
---- a/src/pmdas/linux/interrupts.c
-+++ b/src/pmdas/linux/interrupts.c
-@@ -456,7 +456,9 @@ refresh_interrupt_values(void)
-     while (fgets(iobuf, iobufsz, fp) != NULL) {
- 	iobuf[iobufsz - 1] = '\0';
- 	/* next we parse each interrupt line row (starting with a digit) */
--	sts = extract_interrupt_lines(iobuf, ncolumns, i++);
-+	sts = extract_interrupt_lines(iobuf, ncolumns, i);
-+	if (sts > 0)
-+	    i++;
- 	if (sts > 1)
- 	    resized++;
- 	if (sts)
-@@ -466,7 +468,9 @@ refresh_interrupt_values(void)
- 	if (extract_interrupt_misses(iobuf))
- 	    continue;
- 	/* parse other per-CPU interrupt counter rows (starts non-digit) */
--	sts = extract_interrupt_other(iobuf, ncolumns, j++);
-+	sts = extract_interrupt_other(iobuf, ncolumns, j);
-+	if (sts > 0)
-+	    j++;
- 	if (sts > 1)
- 	    resized++;
- 	if (!sts)
diff --git a/SPECS/pcp.spec b/SPECS/pcp.spec
index e2c345d..297467b 100644
--- a/SPECS/pcp.spec
+++ b/SPECS/pcp.spec
@@ -1,6 +1,6 @@
 Name:    pcp
-Version: 5.0.2
-Release: 5%{?dist}
+Version: 5.1.1
+Release: 3%{?dist}
 Summary: System-level performance monitoring and performance management
 License: GPLv2+ and LGPLv2+ and CC-BY
 URL:     https://pcp.io
@@ -8,14 +8,14 @@ URL:     https://pcp.io
 %global  bintray https://bintray.com/artifact/download
 Source0: %{bintray}/pcp/source/pcp-%{version}.src.tar.gz
 
-# RHBZ 1788119
-Patch1:  multilib-pcp-devel.patch
-# RHBZ 1785560
-Patch2:  archive-discovery.patch
-# RHBZ 1788881
-Patch3:  activemq-modules.patch
-# RHBZ 1798058
-Patch4:  s390x-interrupts.patch
+Patch000: redhat-bugzilla-1792971.patch
+Patch001: redhat-bugzilla-1541406.patch
+Patch002: redhat-bugzilla-1846711.patch
+Patch003: redhat-bugzilla-1848995.patch
+Patch004: redhat-bugzilla-1790452.patch
+Patch005: redhat-bugzilla-1846705.patch
+Patch006: redhat-bugzilla-1849511.patch
+Patch007: redhat-bugzilla-1790433.patch
 
 %if 0%{?fedora} >= 26 || 0%{?rhel} > 7
 %global __python2 python2
@@ -23,6 +23,12 @@ Patch4:  s390x-interrupts.patch
 %global __python2 python
 %endif
 
+%if 0%{?rhel} >= 7 || 0%{?fedora} >= 17
+%global _hostname_executable /usr/bin/hostname
+%else
+%global _hostname_executable /bin/hostname
+%endif
+
 %if 0%{?fedora} || 0%{?rhel} > 5
 %global disable_selinux 0
 %else
@@ -50,7 +56,7 @@ Patch4:  s390x-interrupts.patch
 %endif
 
 # libchan, libhdr_histogram and pmdastatsd
-%if 0%{?fedora} >= 29 || 0%{?rhel} > 8
+%if 0%{?fedora} >= 29 || 0%{?rhel} > 7
 %global disable_statsd 0
 %else
 %global disable_statsd 1
@@ -229,7 +235,11 @@ BuildRequires: cyrus-sasl-devel
 BuildRequires: libvarlink-devel
 %endif
 %if !%{disable_statsd}
-BuildRequires: ragel chan-devel HdrHistogram_c-devel
+# ragel unavailable on RHEL8
+%if 0%{?rhel} == 0
+BuildRequires: ragel
+%endif
+BuildRequires: chan-devel HdrHistogram_c-devel
 %endif
 %if !%{disable_perfevent}
 BuildRequires: libpfm-devel >= 4
@@ -249,7 +259,7 @@ BuildRequires: perl-generators
 BuildRequires: perl-devel perl(strict)
 BuildRequires: perl(ExtUtils::MakeMaker) perl(LWP::UserAgent) perl(JSON)
 BuildRequires: perl(LWP::UserAgent) perl(Time::HiRes) perl(Digest::MD5)
-BuildRequires: man hostname
+BuildRequires: man %{_hostname_executable}
 %if !%{disable_systemd}
 BuildRequires: systemd-devel
 %endif
@@ -263,7 +273,7 @@ BuildRequires: qt5-qtsvg-devel
 %endif
 %endif
 
-Requires: bash xz gawk sed grep findutils which hostname
+Requires: bash xz gawk sed grep findutils which %{_hostname_executable}
 Requires: pcp-libs = %{version}-%{release}
 %if !%{disable_selinux}
 Requires: pcp-selinux = %{version}-%{release}
@@ -344,9 +354,9 @@ Requires: pcp-libs = %{version}-%{release}
 %endif
 
 %if %{disable_statsd}
-%global _with_statsd --with-statsd=no
+%global _with_statsd --with-pmdastatsd=no
 %else
-%global _with_statsd --with-statsd=yes
+%global _with_statsd --with-pmdastatsd=yes
 %endif
 
 %if %{disable_bcc}
@@ -391,6 +401,24 @@ then
 fi
 }
 
+%global install_file() %{expand:
+if [ -w "%1" ]
+then
+    (cd "%1" && touch "%2" && chmod 644 "%2")
+else
+    echo "WARNING: Cannot write to %1, skipping %2 creation." >&2
+fi
+}
+
+%global rebuild_pmns() %{expand:
+if [ -w "%1" ]
+then
+    (cd "%1" && ./Rebuild -s && rm -f "%2")
+else
+    echo "WARNING: Cannot write to %1, skipping namespace rebuild." >&2
+fi
+}
+
 %global selinux_handle_policy() %{expand:
 if [ %1 -ge 1 ]
 then
@@ -502,10 +530,11 @@ Requires: pcp-pmda-bpftrace
 %if !%{disable_python2} || !%{disable_python3}
 Requires: pcp-pmda-gluster pcp-pmda-zswap pcp-pmda-unbound pcp-pmda-mic
 Requires: pcp-pmda-libvirt pcp-pmda-lio pcp-pmda-openmetrics pcp-pmda-haproxy
-Requires: pcp-pmda-lmsensors pcp-pmda-netcheck
+Requires: pcp-pmda-lmsensors pcp-pmda-netcheck pcp-pmda-rabbitmq
+Requires: pcp-pmda-openvswitch
 %endif
 %if !%{disable_mssql}
-Requires: pcp-pmda-mssql
+Requires: pcp-pmda-mssql 
 %endif
 %if !%{disable_snmp}
 Requires: pcp-pmda-snmp
@@ -525,6 +554,15 @@ Requires: pcp-gui
 %endif
 Requires: bc gcc gzip bzip2
 Requires: redhat-rpm-config
+%if !%{disable_selinux}
+Requires: selinux-policy-devel
+Requires: selinux-policy-targeted
+%if 0%{?rhel} == 5
+Requires: setools
+%else
+Requires: setools-console
+%endif
+%endif
 
 %description testsuite
 Quality assurance test suite for Performance Co-Pilot (PCP).
@@ -933,6 +971,7 @@ but can also be configured to monitor remote GUIDs such as IB switches.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for ActiveMQ
 URL: https://pcp.io
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 Requires: perl-PCP-PMDA = %{version}-%{release}
 Requires: perl(LWP::UserAgent)
 
@@ -948,10 +987,13 @@ collecting metrics about the ActiveMQ message broker.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for BIND servers
 URL: https://pcp.io
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 Requires: perl-PCP-PMDA = %{version}-%{release}
 Requires: perl(LWP::UserAgent)
 Requires: perl(XML::LibXML)
 Requires: perl(File::Slurp)
+Requires: perl-autodie
+Requires: perl-Time-HiRes
 
 %description pmda-bind2
 This package contains the PCP Performance Metrics Domain Agent (PMDA) for
@@ -965,7 +1007,11 @@ collecting metrics from BIND (Berkeley Internet Name Domain).
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for Redis
 URL: https://pcp.io
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 Requires: perl-PCP-PMDA = %{version}-%{release}
+Requires: perl-autodie
+Requires: perl-Time-HiRes
+Requires: perl-Data-Dumper
 
 %description pmda-redis
 This package contains the PCP Performance Metrics Domain Agent (PMDA) for
@@ -980,6 +1026,7 @@ collecting metrics from Redis servers (redis.io).
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for NutCracker (TwemCache)
 URL: https://pcp.io
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 Requires: perl-PCP-PMDA = %{version}-%{release}
 Requires: perl(YAML::XS::LibYAML)
 Requires: perl(JSON)
@@ -997,6 +1044,7 @@ collecting metrics from NutCracker (TwemCache).
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for Bonded network interfaces
 URL: https://pcp.io
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 Requires: perl-PCP-PMDA = %{version}-%{release}
 
 %description pmda-bonding
@@ -1011,7 +1059,9 @@ collecting metrics about bonded network interfaces.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for Database response times and Availablility
 URL: https://pcp.io
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 Requires: perl-PCP-PMDA = %{version}-%{release}
+Requires: perl-DBI
 
 %description pmda-dbping
 This package contains the PCP Performance Metrics Domain Agent (PMDA) for
@@ -1025,6 +1075,7 @@ collecting metrics about the Database response times and Availablility.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for 389 Directory Servers
 URL: https://pcp.io
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 Requires: perl-PCP-PMDA = %{version}-%{release}
 %if 0%{?rhel} <= 7
 Requires: perl-LDAP
@@ -1042,6 +1093,7 @@ collecting metrics about a 389 Directory Server.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for 389 Directory Server Loggers
 URL: https://pcp.io
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 Requires: perl-PCP-PMDA = %{version}-%{release}
 Requires: perl-Date-Manip
 
@@ -1058,6 +1110,7 @@ collecting metrics from a 389 Directory Server log.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for GPFS Filesystem
 URL: https://pcp.io
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 Requires: perl-PCP-PMDA = %{version}-%{release}
 
 %description pmda-gpfs
@@ -1072,7 +1125,10 @@ collecting metrics about the GPFS filesystem.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for a GPS Daemon
 URL: https://pcp.io
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 Requires: perl-PCP-PMDA = %{version}-%{release}
+Requires: perl-Time-HiRes
+Requires: perl-JSON
 
 %description pmda-gpsd
 This package contains the PCP Performance Metrics Domain Agent (PMDA) for
@@ -1100,6 +1156,7 @@ collecting metrics using the Docker daemon REST API.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for the Lustre Filesytem
 URL: https://pcp.io
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 Requires: perl-PCP-PMDA = %{version}-%{release}
 
 %description pmda-lustre
@@ -1114,8 +1171,7 @@ collecting metrics about the Lustre Filesystem.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for the Lustre Filesytem Comms
 URL: https://pcp.io
-Requires: pcp = %{version}-%{release}
-Requires: pcp-libs = %{version}-%{release}
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 
 %description pmda-lustrecomm
 This package contains the PCP Performance Metrics Domain Agent (PMDA) for
@@ -1129,6 +1185,7 @@ collecting metrics about the Lustre Filesystem Comms.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for Memcached
 URL: https://pcp.io
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 Requires: perl-PCP-PMDA = %{version}-%{release}
 
 %description pmda-memcache
@@ -1143,6 +1200,7 @@ collecting metrics about Memcached.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for MySQL
 URL: https://pcp.io
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 Requires: perl-PCP-PMDA = %{version}-%{release}
 Requires: perl(DBI) perl(DBD::mysql)
 BuildRequires: perl(DBI) perl(DBD::mysql)
@@ -1159,6 +1217,7 @@ collecting metrics about the MySQL database.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for Named
 URL: https://pcp.io
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 Requires: perl-PCP-PMDA = %{version}-%{release}
 
 %description pmda-named
@@ -1172,6 +1231,7 @@ collecting metrics about the Named nameserver.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for Netfilter framework
 URL: https://pcp.io
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 Requires: perl-PCP-PMDA = %{version}-%{release}
 
 %description pmda-netfilter
@@ -1186,6 +1246,7 @@ collecting metrics about the Netfilter packet filtering framework.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for Usenet News
 URL: https://pcp.io
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 Requires: perl-PCP-PMDA = %{version}-%{release}
 
 %description pmda-news
@@ -1200,6 +1261,7 @@ collecting metrics about Usenet News.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for the Nginx Webserver
 URL: https://pcp.io
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 Requires: perl-PCP-PMDA = %{version}-%{release}
 Requires: perl(LWP::UserAgent)
 BuildRequires: perl(LWP::UserAgent)
@@ -1216,6 +1278,7 @@ collecting metrics about the Nginx Webserver.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for the Oracle database
 URL: https://pcp.io
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 Requires: perl-PCP-PMDA = %{version}-%{release}
 Requires: perl(DBI)
 BuildRequires: perl(DBI)
@@ -1232,7 +1295,9 @@ collecting metrics about the Oracle database.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for PowerDNS
 URL: https://pcp.io
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 Requires: perl-PCP-PMDA = %{version}-%{release}
+Requires: perl-Time-HiRes
 
 %description pmda-pdns
 This package contains the PCP Performance Metrics Domain Agent (PMDA) for
@@ -1246,6 +1311,7 @@ collecting metrics about the PowerDNS.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for the Postfix (MTA)
 URL: https://pcp.io
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 Requires: perl-PCP-PMDA = %{version}-%{release}
 Requires: perl-Time-HiRes
 %if 0%{?fedora} > 16 || 0%{?rhel} > 5
@@ -1273,6 +1339,7 @@ collecting metrics about the Postfix (MTA).
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for Rsyslog
 URL: https://pcp.io
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 Requires: perl-PCP-PMDA = %{version}-%{release}
 
 %description pmda-rsyslog
@@ -1287,6 +1354,7 @@ collecting metrics about Rsyslog.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for Samba
 URL: https://pcp.io
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 Requires: perl-PCP-PMDA = %{version}-%{release}
 
 %description pmda-samba
@@ -1301,6 +1369,7 @@ collecting metrics about Samba.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for the SLURM Workload Manager
 URL: https://pcp.io
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 Requires: perl-PCP-PMDA = %{version}-%{release}
 
 %description pmda-slurm
@@ -1316,6 +1385,7 @@ collecting metrics from the SLURM Workload Manager.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for Simple Network Management Protocol
 URL: https://pcp.io
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 Requires: perl-PCP-PMDA = %{version}-%{release}
 # There are no perl-Net-SNMP packages in rhel, disable unless non-rhel or epel5
 %if 0%{?rhel} == 0 || 0%{?rhel} < 6
@@ -1335,6 +1405,7 @@ collecting metrics about SNMP.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for VMware
 URL: https://pcp.io
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 Requires: perl-PCP-PMDA = %{version}-%{release}
 
 %description pmda-vmware
@@ -1349,6 +1420,7 @@ collecting metrics for VMware.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for Zimbra
 URL: https://pcp.io
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 Requires: perl-PCP-PMDA = %{version}-%{release}
 
 %description pmda-zimbra
@@ -1363,7 +1435,7 @@ collecting metrics about Zimbra.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for the Device Mapper Cache and Thin Client
 URL: https://pcp.io
-Requires: pcp-libs = %{version}-%{release}
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 BuildRequires: device-mapper-devel
 %description pmda-dm
 This package contains the PCP Performance Metrics Domain Agent (PMDA) for
@@ -1379,6 +1451,7 @@ collecting metrics about the Device Mapper Cache and Thin Client.
 License: ASL 2.0 and GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics from eBPF/BCC modules
 URL: https://pcp.io
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 Requires: python3-bcc
 Requires: python3-pcp
 %description pmda-bcc
@@ -1395,6 +1468,7 @@ extracting performance metrics from eBPF/BCC Python modules.
 License: ASL 2.0 and GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics from bpftrace scripts
 URL: https://pcp.io
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 Requires: bpftrace >= 0.9.2
 Requires: python3-pcp
 Requires: python3 >= 3.6
@@ -1412,6 +1486,7 @@ extracting performance metrics from bpftrace scripts.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for the Gluster filesystem
 URL: https://pcp.io
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 %if !%{disable_python3}
 Requires: python3-pcp
 %else
@@ -1429,6 +1504,7 @@ collecting metrics about the gluster filesystem.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for NFS Clients
 URL: https://pcp.io
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 %if !%{disable_python3}
 Requires: python3-pcp
 %else
@@ -1446,6 +1522,7 @@ collecting metrics for NFS Clients.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for PostgreSQL
 URL: https://pcp.io
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 %if !%{disable_python3}
 Requires: python3-pcp
 Requires: python3-psycopg2
@@ -1467,6 +1544,7 @@ collecting metrics about the PostgreSQL database.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for compressed swap
 URL: https://pcp.io
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 %if !%{disable_python3}
 Requires: python3-pcp
 %else
@@ -1484,6 +1562,7 @@ collecting metrics about compressed swap.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for the Unbound DNS Resolver
 URL: https://pcp.io
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 %if !%{disable_python3}
 Requires: python3-pcp
 %else
@@ -1501,6 +1580,7 @@ collecting metrics about the Unbound DNS Resolver.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for Intel MIC cards
 URL: https://pcp.io
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 %if !%{disable_python3}
 Requires: python3-pcp
 %else
@@ -1518,6 +1598,7 @@ collecting metrics about Intel MIC cards.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for HAProxy
 URL: https://pcp.io
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 %if !%{disable_python3}
 Requires: python3-pcp
 %else
@@ -1535,6 +1616,7 @@ extracting performance metrics from HAProxy over the HAProxy stats socket.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for virtual machines
 URL: https://pcp.io
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 %if !%{disable_python3}
 Requires: python3-pcp
 Requires: libvirt-python3 python3-lxml
@@ -1564,6 +1646,7 @@ and hypervisor machines.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for Elasticsearch
 URL: https://pcp.io
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 %if !%{disable_python3}
 Requires: python3-pcp
 %else
@@ -1575,12 +1658,49 @@ collecting metrics about Elasticsearch.
 #end pcp-pmda-elasticsearch
 
 #
+# pcp-pmda-openvswitch
+#
+%package pmda-openvswitch
+License: GPLv2+
+Summary: Performance Co-Pilot (PCP) metrics for Open vSwitch
+URL: https://pcp.io
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
+%if !%{disable_python3}
+Requires: python3-pcp
+%else
+Requires: %{__python2}-pcp
+%endif
+%description pmda-openvswitch
+This package contains the PCP Performance Metrics Domain Agent (PMDA) for
+collecting metrics from Open vSwitch.
+#end pcp-pmda-openvswitch
+
+#
+# pcp-pmda-rabbitmq
+#
+%package pmda-rabbitmq
+License: GPLv2+
+Summary: Performance Co-Pilot (PCP) metrics for RabbitMQ queues
+URL: https://pcp.io
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
+%if !%{disable_python3}
+Requires: python3-pcp
+%else
+Requires: %{__python2}-pcp
+%endif
+%description pmda-rabbitmq
+This package contains the PCP Performance Metrics Domain Agent (PMDA) for
+collecting metrics about RabbitMQ message queues.
+#end pcp-pmda-rabbitmq
+
+#
 # pcp-pmda-lio
 #
 %package pmda-lio
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for the LIO subsystem
 URL: https://pcp.io
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 %if !%{disable_python3}
 Requires: python3-pcp
 Requires: python3-rtslib
@@ -1605,7 +1725,7 @@ target.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics from OpenMetrics endpoints
 URL: https://pcp.io
-Requires: pcp-libs = %{version}-%{release}
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 %if !%{disable_python3}
 Requires: python3-pcp
 Requires: python3-requests
@@ -1630,7 +1750,8 @@ extracting metrics from OpenMetrics (https://openmetrics.io/) endpoints.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for hardware sensors
 URL: https://pcp.io
-Requires: pcp-libs = %{version}-%{release}
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
+Requires: lm_sensors
 %if !%{disable_python3}
 Requires: python3-pcp
 %else
@@ -1650,7 +1771,7 @@ collecting metrics about the Linux hardware monitoring sensors.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for simple network checks
 URL: https://pcp.io
-Requires: pcp-libs = %{version}-%{release}
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 %if !%{disable_python3}
 Requires: python3-pcp
 %else
@@ -1671,7 +1792,7 @@ collecting metrics from simple network checks.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for Microsoft SQL Server
 URL: https://pcp.io
-Requires: pcp-libs = %{version}-%{release}
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 %if !%{disable_python3}
 Requires: python3-pcp
 %else
@@ -1691,6 +1812,7 @@ collecting metrics from Microsoft SQL Server.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for JSON data
 URL: https://pcp.io
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 %if !%{disable_python3}
 Requires: python3-pcp
 Requires: python3-jsonpointer python3-six
@@ -1714,7 +1836,7 @@ collecting metrics output in JSON.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for the Apache webserver
 URL: https://pcp.io
-Requires: pcp-libs = %{version}-%{release}
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 %description pmda-apache
 This package contains the PCP Performance Metrics Domain Agent (PMDA) for
 collecting metrics about the Apache webserver.
@@ -1727,7 +1849,7 @@ collecting metrics about the Apache webserver.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for the Bash shell
 URL: https://pcp.io
-Requires: pcp-libs = %{version}-%{release}
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 %description pmda-bash
 This package contains the PCP Performance Metrics Domain Agent (PMDA) for
 collecting metrics about the Bash shell.
@@ -1740,7 +1862,7 @@ collecting metrics about the Bash shell.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for the CIFS protocol
 URL: https://pcp.io
-Requires: pcp-libs = %{version}-%{release}
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 %description pmda-cifs
 This package contains the PCP Performance Metrics Domain Agent (PMDA) for
 collecting metrics about the Common Internet Filesytem.
@@ -1753,7 +1875,7 @@ collecting metrics about the Common Internet Filesytem.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for Cisco routers
 URL: https://pcp.io
-Requires: pcp-libs = %{version}-%{release}
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 %description pmda-cisco
 This package contains the PCP Performance Metrics Domain Agent (PMDA) for
 collecting metrics about Cisco routers.
@@ -1766,7 +1888,7 @@ collecting metrics about Cisco routers.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for the GFS2 filesystem
 URL: https://pcp.io
-Requires: pcp-libs = %{version}-%{release}
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 %description pmda-gfs2
 This package contains the PCP Performance Metrics Domain Agent (PMDA) for
 collecting metrics about the Global Filesystem v2.
@@ -1779,7 +1901,7 @@ collecting metrics about the Global Filesystem v2.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics from arbitrary log files
 URL: https://pcp.io
-Requires: pcp-libs = %{version}-%{release}
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 %description pmda-logger
 This package contains the PCP Performance Metrics Domain Agent (PMDA) for
 collecting metrics from a specified set of log files (or pipes).  The PMDA
@@ -1793,7 +1915,7 @@ supports both sampled and event-style metrics.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for the sendmail queue
 URL: https://pcp.io
-Requires: pcp-libs = %{version}-%{release}
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 %description pmda-mailq
 This package contains the PCP Performance Metrics Domain Agent (PMDA) for
 collecting metrics about email queues managed by sendmail.
@@ -1806,7 +1928,7 @@ collecting metrics about email queues managed by sendmail.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for filesystem mounts
 URL: https://pcp.io
-Requires: pcp-libs = %{version}-%{release}
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 %description pmda-mounts
 This package contains the PCP Performance Metrics Domain Agent (PMDA) for
 collecting metrics about filesystem mounts.
@@ -1819,7 +1941,7 @@ collecting metrics about filesystem mounts.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for the Nvidia GPU
 URL: https://pcp.io
-Requires: pcp-libs = %{version}-%{release}
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 %description pmda-nvidia-gpu
 This package contains the PCP Performance Metrics Domain Agent (PMDA) for
 collecting metrics about Nvidia GPUs.
@@ -1832,8 +1954,7 @@ collecting metrics about Nvidia GPUs.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for the room temperature
 URL: https://pcp.io
-Requires: pcp = %{version}-%{release}
-Requires: pcp-libs = %{version}-%{release}
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 %description pmda-roomtemp
 This package contains the PCP Performance Metrics Domain Agent (PMDA) for
 collecting metrics about the room temperature.
@@ -1847,15 +1968,13 @@ collecting metrics about the room temperature.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for the RPM package manager
 URL: https://pcp.io
-Requires: pcp = %{version}-%{release}
-Requires: pcp-libs = %{version}-%{release}
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 %description pmda-rpm
 This package contains the PCP Performance Metrics Domain Agent (PMDA) for
 collecting metrics about the installed RPM packages.
 %endif
 # end pcp-pmda-rpm
 
-
 #
 # pcp-pmda-sendmail
 #
@@ -1863,8 +1982,7 @@ collecting metrics about the installed RPM packages.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for Sendmail
 URL: https://pcp.io
-Requires: pcp = %{version}-%{release}
-Requires: pcp-libs = %{version}-%{release}
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 %description pmda-sendmail
 This package contains the PCP Performance Metrics Domain Agent (PMDA) for
 collecting metrics about Sendmail traffic.
@@ -1877,7 +1995,7 @@ collecting metrics about Sendmail traffic.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for shell command responses
 URL: https://pcp.io
-Requires: pcp-libs = %{version}-%{release}
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 %description pmda-shping
 This package contains the PCP Performance Metrics Domain Agent (PMDA) for
 collecting metrics about quality of service and response time measurements of
@@ -1891,7 +2009,8 @@ arbitrary shell commands.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for S.M.A.R.T values
 URL: https://pcp.io
-Requires: pcp-libs = %{version}-%{release}
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
+Requires: smartmontools
 %description pmda-smart
 This package contains the PCP Performance Metric Domain Agent (PMDA) for
 collecting metrics of disk S.M.A.R.T values making use of data from the
@@ -1905,8 +2024,7 @@ smartmontools package.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) summary metrics from pmie
 URL: https://pcp.io
-Requires: pcp = %{version}-%{release}
-Requires: pcp-libs = %{version}-%{release}
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 %description pmda-summary
 This package contains the PCP Performance Metrics Domain Agent (PMDA) for
 collecting metrics about other installed PMDAs.
@@ -1920,7 +2038,7 @@ collecting metrics about other installed PMDAs.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics from the Systemd journal
 URL: https://pcp.io
-Requires: pcp-libs = %{version}-%{release}
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 %description pmda-systemd
 This package contains the PCP Performance Metrics Domain Agent (PMDA) for
 collecting metrics from the Systemd journal.
@@ -1934,7 +2052,7 @@ collecting metrics from the Systemd journal.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics for application tracing
 URL: https://pcp.io
-Requires: pcp-libs = %{version}-%{release}
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 %description pmda-trace
 This package contains the PCP Performance Metrics Domain Agent (PMDA) for
 collecting metrics about trace performance data in applications.
@@ -1947,8 +2065,7 @@ collecting metrics about trace performance data in applications.
 License: GPLv2+
 Summary: Performance Co-Pilot (PCP) metrics from web server logs
 URL: https://pcp.io
-Requires: pcp = %{version}-%{release}
-Requires: pcp-libs = %{version}-%{release}
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 %description pmda-weblog
 This package contains the PCP Performance Metrics Domain Agent (PMDA) for
 collecting metrics about web server logs.
@@ -1960,13 +2077,14 @@ License: GPLv2+
 Summary: Performance Co-Pilot (PCP) Zeroconf Package
 URL: https://pcp.io
 Requires: pcp pcp-doc pcp-system-tools
-Requires: pcp-pmda-dm pcp-pmda-nfsclient
+Requires: pcp-pmda-dm
+%if !%{disable_python2} || !%{disable_python3}
+Requires: pcp-pmda-nfsclient pcp-pmda-openmetrics
+%endif
 %description zeroconf
 This package contains configuration tweaks and files to increase metrics
 gathering frequency, several extended pmlogger configurations, as well as
 automated pmie diagnosis, alerting and self-healing for the localhost.
-A cron script also writes daily performance summary reports similar to
-those written by sysstat.
 
 %if !%{disable_python2}
 #
@@ -2023,7 +2141,7 @@ Requires: python3-pcp = %{version}-%{release}
 %else
 Requires: %{__python2}-pcp = %{version}-%{release}
 %endif
-Requires: pcp-libs = %{version}-%{release}
+Requires: pcp = %{version}-%{release} pcp-libs = %{version}-%{release}
 %if !%{disable_dstat}
 # https://fedoraproject.org/wiki/Packaging:Guidelines "Renaming/Replacing Existing Packages"
 Provides: dstat = %{version}-%{release}
@@ -2105,12 +2223,20 @@ updated policy package.
 
 %prep
 %setup -q
-%patch1 -p1
-%patch2 -p1
-%patch3 -p1
-%patch4 -p1
+%patch000 -p1
+%patch001 -p1
+%patch002 -p1
+%patch003 -p1
+%patch004 -p1
+%patch005 -p1
+%patch006 -p1
+%patch007 -p1
 
 %build
+# fix up build version
+_build=`echo %{release} | sed -e 's/\..*$//'`
+sed -i "/PACKAGE_BUILD/s/=[0-9]*/=$_build/" VERSION.pcp
+
 %if !%{disable_python2} && 0%{?default_python} != 3
 export PYTHON=python%{?default_python}
 %endif
@@ -2232,6 +2358,8 @@ ls -1 $RPM_BUILD_ROOT/%{_pmdasdir} |\
   grep -E -v '^mssql' |\
   grep -E -v '^netcheck' |\
   grep -E -v '^nvidia' |\
+  grep -E -v '^openvswitch' |\
+  grep -E -v '^rabbitmq' |\
   grep -E -v '^roomtemp' |\
   grep -E -v '^sendmail' |\
   grep -E -v '^shping' |\
@@ -2252,7 +2380,7 @@ ls -1 $RPM_BUILD_ROOT/%{_pmdasdir} |\
 
 # all base pcp package files except those split out into sub-packages
 ls -1 $RPM_BUILD_ROOT/%{_bindir} |\
-  grep -E -v 'pmiostat|zabbix|zbxpcp|dstat|pmrep' |\
+  grep -E -v 'pmiostat|zabbix|zbxpcp|dstat|pmrep|pcp2csv' |\
   grep -E -v 'pcp2spark|pcp2graphite|pcp2influxdb|pcp2zabbix' |\
   grep -E -v 'pcp2elasticsearch|pcp2json|pcp2xlsx|pcp2xml' |\
   grep -E -v 'pmdbg|pmclient|pmerr|genpmda' |\
@@ -2260,7 +2388,7 @@ sed -e 's#^#'%{_bindir}'\/#' >base_bin.list
 ls -1 $RPM_BUILD_ROOT/%{_bashcompdir} |\
   grep -E -v 'pcp2spark|pcp2graphite|pcp2influxdb|pcp2zabbix' |\
   grep -E -v 'pcp2elasticsearch|pcp2json|pcp2xlsx|pcp2xml' |\
-  grep -E -v 'pmrep|pmdumptext' |\
+  grep -E -v 'pcp2csv|pmrep|pmdumptext' |\
 sed -e 's#^#'%{_bashcompdir}'\/#' >base_bashcomp.list
 
 # Separate the pcp-system-tools package files.
@@ -2268,10 +2396,10 @@ sed -e 's#^#'%{_bashcompdir}'\/#' >base_bashcomp.list
 # so its also in pcp-system-tools.
 %if !%{disable_python2} || !%{disable_python3}
 ls -1 $RPM_BUILD_ROOT/%{_bindir} |\
-  egrep -e 'pmiostat|pmrep|dstat' |\
+  egrep -e 'pmiostat|pmrep|dstat|pcp2csv' |\
   sed -e 's#^#'%{_bindir}'\/#' >pcp-system-tools.list
 ls -1 $RPM_BUILD_ROOT/%{_libexecdir}/pcp/bin |\
-  egrep -e 'atop|collectl|dmcache|dstat|free|iostat|ipcs|lvmcache|mpstat' \
+  egrep -e 'atop|dmcache|dstat|free|iostat|ipcs|lvmcache|mpstat' \
         -e 'numastat|pidstat|shping|tapestat|uptime|verify' |\
   sed -e 's#^#'%{_libexecdir}/pcp/bin'\/#' >>pcp-system-tools.list
 %endif
@@ -2286,7 +2414,7 @@ ls -1 $RPM_BUILD_ROOT/%{_libexecdir}/pcp/bin |\
 
 ls -1 $RPM_BUILD_ROOT/%{_libexecdir}/pcp/bin |\
 %if !%{disable_python2} || !%{disable_python3}
-  grep -E -v 'atop|collectl|dmcache|dstat|free|iostat|ipcs|lvmcache|mpstat' |\
+  grep -E -v 'atop|dmcache|dstat|free|iostat|ipcs|lvmcache|mpstat' |\
   grep -E -v 'numastat|shping|tapestat|uptime|verify|selinux-setup' |\
 %endif
   grep -E -v 'pmlogger_daily_report' |\
@@ -2421,6 +2549,12 @@ fi
 %preun pmda-elasticsearch
 %{pmda_remove "$1" "elasticsearch"}
 
+%preun pmda-openvswitch
+%{pmda_remove "$1" "openvswitch"}
+
+%preun pmda-rabbitmq
+%{pmda_remove "$1" "rabbitmq"}
+
 %if !%{disable_snmp}
 %preun pmda-snmp
 %{pmda_remove "$1" "snmp"}
@@ -2654,10 +2788,10 @@ PCP_PMDAS_DIR=%{_pmdasdir}
 PCP_SYSCONFIG_DIR=%{_sysconfdir}/sysconfig
 PCP_PMCDCONF_PATH=%{_confdir}/pmcd/pmcd.conf
 # auto-install important PMDAs for RH Support (if not present already)
-for PMDA in dm nfsclient ; do
+for PMDA in dm nfsclient openmetrics ; do
     if ! grep -q "$PMDA/pmda$PMDA" "$PCP_PMCDCONF_PATH"
     then
-	touch "$PCP_PMDAS_DIR/$PMDA/.NeedInstall"
+	%{install_file "$PCP_PMDAS_DIR/$PMDA" .NeedInstall}
     fi
 done
 # increase default pmlogger recording frequency
@@ -2695,13 +2829,14 @@ pmieconf -c enable dmthin
 
 %post
 PCP_PMNS_DIR=%{_pmnsdir}
+PCP_LOG_DIR=%{_logsdir}
 chown -R pcp:pcp %{_logsdir}/pmcd 2>/dev/null
 chown -R pcp:pcp %{_logsdir}/pmlogger 2>/dev/null
 chown -R pcp:pcp %{_logsdir}/sa 2>/dev/null
 chown -R pcp:pcp %{_logsdir}/pmie 2>/dev/null
 chown -R pcp:pcp %{_logsdir}/pmproxy 2>/dev/null
-touch "$PCP_PMNS_DIR/.NeedRebuild"
-chmod 644 "$PCP_PMNS_DIR/.NeedRebuild"
+%{install_file "$PCP_PMNS_DIR" .NeedRebuild}
+%{install_file "$PCP_LOG_DIR/pmlogger" .NeedRewrite}
 %if !%{disable_systemd}
     %systemd_postun_with_restart pmcd.service
     %systemd_post pmcd.service
@@ -2720,9 +2855,7 @@ chmod 644 "$PCP_PMNS_DIR/.NeedRebuild"
     /sbin/chkconfig --add pmproxy >/dev/null 2>&1
     /sbin/service pmproxy condrestart
 %endif
-
-cd "$PCP_PMNS_DIR" && ./Rebuild -s && rm -f .NeedRebuild
-cd
+%{rebuild_pmns "$PCP_PMNS_DIR" .NeedRebuild}
 
 %if 0%{?fedora} >= 26 || 0%{?rhel} > 7
 %ldconfig_scriptlets libs
@@ -2789,34 +2922,44 @@ cd
 %{_initddir}/pmproxy
 %if !%{disable_systemd}
 %{_unitdir}/pmcd.service
+%{_unitdir}/pmproxy.service
 %{_unitdir}/pmlogger.service
+%{_unitdir}/pmfind.service
 %{_unitdir}/pmie.service
-%{_unitdir}/pmproxy.service
 # services and timers replacing the old cron scripts
 %{_unitdir}/pmlogger_check.service
 %{_unitdir}/pmlogger_check.timer
+%{_unitdir}/pmlogger_check.path
 %{_unitdir}/pmlogger_daily.service
 %{_unitdir}/pmlogger_daily.timer
 %{_unitdir}/pmlogger_daily-poll.service
 %{_unitdir}/pmlogger_daily-poll.timer
+%{_unitdir}/pmie_check.timer
+%{_unitdir}/pmie_check.path
 %{_unitdir}/pmie_check.service
 %{_unitdir}/pmie_check.timer
+%{_unitdir}/pmie_check.path
 %{_unitdir}/pmie_daily.service
 %{_unitdir}/pmie_daily.timer
+%{_unitdir}/pmfind.timer
+%{_unitdir}/pmfind.path
 %config(noreplace) %{_sysconfdir}/sysconfig/pmie_timers
 %config(noreplace) %{_sysconfdir}/sysconfig/pmlogger_timers
 %else
 # cron scripts
 %config(noreplace) %{_sysconfdir}/cron.d/pcp-pmlogger
+%config(noreplace) %{_sysconfdir}/cron.d/pcp-pmfind
 %config(noreplace) %{_sysconfdir}/cron.d/pcp-pmie
 %endif
 %config(noreplace) %{_sysconfdir}/sasl2/pmcd.conf
 %config(noreplace) %{_sysconfdir}/sysconfig/pmlogger
 %config(noreplace) %{_sysconfdir}/sysconfig/pmproxy
+%config(noreplace) %{_sysconfdir}/sysconfig/pmfind
 %config(noreplace) %{_sysconfdir}/sysconfig/pmcd
 %config %{_sysconfdir}/pcp.env
-%dir %{_confdir}/pipe.conf.d
 %dir %{_confdir}/labels
+%dir %{_confdir}/labels/optional
+%dir %{_confdir}/pipe.conf.d
 %dir %{_confdir}/pmcd
 %config(noreplace) %{_confdir}/pmcd/pmcd.conf
 %config(noreplace) %{_confdir}/pmcd/pmcd.options
@@ -2984,6 +3127,12 @@ cd
 %files pmda-elasticsearch
 %{_pmdasdir}/elasticsearch
 
+%files pmda-openvswitch
+%{_pmdasdir}/openvswitch
+
+%files pmda-rabbitmq
+%{_pmdasdir}/rabbitmq
+
 %files pmda-gpfs
 %{_pmdasdir}/gpfs
 
@@ -3259,48 +3408,315 @@ cd
 %endif
 
 %changelog
-* Tue Feb 25 2020 Nathan Scott <nathans@redhat.com> - 5.0.2-5
-- Fix /proc/interrupts parsing on s390x platforms (BZ 1798058)
+* Tue Jun 23 2020 Mark Goodwin <mgoodwin@redhat.com> - 5.1.1-3
+- fix for missing runtime deps on perl Net::SNMP (BZ 1790433)
+- resolve covscan and other issues from upstream QA (BZ 1849511)
+- Possible memory leak detected in pcp-atop (BZ 1846705)
+- Installation of pcp-pmda-samba causes SELinux issues (BZ 1790452)
+- fix Intermittent pminfo crashes (BZ 1848995)
+- Silence openmetrics PMDA warnings, add status metrics (BZ 1846711)
+- set PACKAGE_BUILD in VERSION.pcp so pmcd.build metric is correct
+
+* Thu Jun 11 2020 Mark Goodwin <mgoodwin@redhat.com> - 5.1.1-2
+- activate pmlogger_rewrite on upgrades (BZ 1541406)
+- fix Coverity issues in pmdastatsd and pmlogconf (BZ 1792971)
+- libpcp_web: ensure context is freed only after timer is fully closed
+- services: pmlogger and pmie services Want pmcd on boot
+- fix intermittent pmlogconf core dumps (BZ 1845241)
+- pcp-atop: resolve potential null task pointer dereference
+- pmproxy: improve diagnostics, particularly relating to http requests
+- pmproxy: cleanup, remove unused flags and dead code in http encoding
+- pmproxy: support the OPTIONS protocol in HTTP 1.1
+- libpcp_web: add resilience to descriptor lookup paths (BZ 1837153)
+
+* Fri May 29 2020 Mark Goodwin <mgoodwin@redhat.com> - 5.1.1-1
+- Rebuild to pick up changed HdrHistogram_c version (BZ 1831502)
+- pmdakvm: handle kernel lockdown in integrity mode (BZ 1824297)
+- PCP_NSSDB_DIR should not be mentioned in /etc/pcp.conf (BZ 1826020)
+- pmie_daily.service runs as pcp but tries to do root things (BZ 1832262)
+- pcp-testsuite-5.1.0-2.el8 package is missing pcpqa.pp file (BZ 1835422)
+- gfs2 kernel trace points turning on by themselves (BZ 1825386)
+- pcp-atop various fixes (BZ 1818710)
+- SELinux prevents pmlogger from secure connection to remote pmcd (BZ 1826047)
+- pmda-lustre fails to start since lustre 2.12 (BZ 1788937)
+- added labels support for pmrep and various pcp2xxx tools
+- Update to latest pcp-5.1.1 PCP sources.
+
+* Fri May 08 2020 Mark Goodwin <mgoodwin@redhat.com> - 5.1.0-2
+- replace pmdads389log dep on 389-ds-base with a softdep
+- fix install deps for pcp-testsuite on pcp-pmda-mssql
+- Improve pmlogger and pmie system log messages (BZ 1806428)
+- Missing dep of pcp-pmda-dbping on perl-DBI (BZ 1790421)
+- Missing dep of pcp-pmda-ds389log on 389-ds-base (BZ 1790422)
+- Missing dep of pcp-pmda-gpsd on perl-JSON and perl-Time-HiRes (BZ 1790426)
+- Missing dep of pcp-pmda-lmsensors on lm_sensors (BZ 1790427)
+- Missing dep of pcp-pmda-redis on perl-autodie (BZ 1790431)
+- Missing dep of pcp-pmda-smart on smartmontools (BZ 1790432)
+- Missing dep of pcp-pmda-snmp on net-snmp-perl (BZ 1790433)
+- SELinux issues with pcp-pmda-zimbra (BZ 1790437)
+- Missing dep of pcp-pmda-pdns on perl-Time-HiRes (BZ 1790441)
+- Installation of pcp-pmda-netcheck causes SELinux issue (BZ 1790450)
+- Installation of pcp-pmda-samba causes SELinux issues (BZ 1790452)
+- Some PMDAs are missing dependency on PCP (BZ 1790526)
+- pmieconf randomly fails (BZ 1800545)
+- collectl2pcp does not handle large collectl archives well (BZ 1785101)
+- Missing dep of pcp-pmda-redis on perl-autodie and perl-Time-HiRes (BZ 1788519)
+- Can not install pcp-pmda-activemq, wrong location of RESTClient (BZ 1788878)
+- Missing dep of pcp-pmda-bind2 on various perl packages (BZ 1790415)
+- pmlogger_daily_report causing PCP upstream testsuite to fail (BZ 1805146)
+- Missing selinux rules preventing pcp-pmda-named runing rndc(BZ 1825663)
+- SELinux is preventing PostgreSQL PMDA to collect metrics (BZ 1825957)
+- pmnewlog generates inaccessible config file (BZ 1810110)
+- pmnewlog is causing PCP testsuite to hang (BZ 1810118)
+- pmdakvm: debugfs access is restricted (BZ 1824297)
+- error starting pmlogger; pid file not owned by root (BZ 1761962)
+- Update to latest PCP 5.1.0-1 upstream sources.
+- Update to latest PCP sources.
 
-* Mon Feb 03 2020 Nathan Scott <nathans@redhat.com> - 5.0.2-4
-- Restrict pcp-pmda-mssql to ODBC architectures (BZ 1795804)
-- Fix pcp-pmda-activemq perl module installation (BZ 1788881)
-- Update archive discovery code with latest fixes (BZ 1785560)
-- Update bpftrace and BCC PMDAs architecture lists (BZ 1795798)
+* Wed Mar 11 2020 Mark Goodwin <mgoodwin@redhat.com> - 5.0.3-3
+- Resolve pcp-selinux issues causing services failures - (BZ 1810458)
 
-* Tue Jan 21 2020 Nathan Scott <nathans@redhat.com> - 5.0.2-3
-- Fix issue with multilib pcp-devel installation (BZ 1788119)
-- Archive discovery fixes in pmproxy and libpcp_web (BZ 1785560)
+* Mon Mar 02 2020 Mark Goodwin <mgoodwin@redhat.com> - 5.0.3-2
+- fix typo in Requires: perl-Time-HiRes affecting pcp-pmda-bind2
 
-* Thu Dec 12 2019 Nathan Scott <nathans@redhat.com> - 5.0.2-2
-- Reenable infiniband PMDA in the rpm spec, deps fixed.
+* Thu Feb 27 2020 Mark Goodwin <mgoodwin@redhat.com> - 5.0.3-1
+- Avoid python ctypes bitfield struct on-stack (BZ 1800685)
+- Add dstat support for DM/MD/part devices (BZ 1794273)
+- Fix compilation with gcc version 10 (BZ 1793495)
+- Fix dstat sub-sample averaging (BZ 1780039)
+- Update to latest PCP sources.
 
 * Wed Dec 11 2019 Nathan Scott <nathans@redhat.com> - 5.0.2-1
-- Update to latest PCP v5 sources via rebase (BZ 1723598)
-- Resolve selinux policy issue with bcc PMDA (BZ 1709237)
-- Resolve selinux policy issue with unbound PMDA (BZ 1778813)
-- Updates to perfevent PMDA for hv_24x7 events (BZ 1765434, 1779507)
-- Fix perl packaging dependency for postfix PMDA (BZ 1773459)
-- Fix pcp-dstat handling of large numbers of disks (BZ 1779419)
-- Fix pmie exit status on receipt of TERM signal (BZ 1780003)
-- Fix pmlogger timeout handling on fresh install (BZ 1780073)
-
-* Mon Nov 11 2019 Nathan Scott <nathans@redhat.com> - 5.0.1-1
-- Update to latest PCP v5 sources via rebase (BZ 1723598)
-- Resolve selinux policy installation issues (BZ 1730206)
-
-* Mon May 06 2019 Nathan Scott <nathans@redhat.com> - 4.3.2-2
-- Update metrics for device mapper VDO driver (BZ 1670548)
-- Update to a more recent PCP bug fix release (BZ 1685302)
-
-* Thu Jan 10 2019 Mark Goodwin <mgoodwin@redhat.com> - 4.3.0-3
-- add missing build deps on libuv for pmseries and libpcp_web (BZ 1630540)
-
-* Wed Dec 26 2018 Mark Goodwin <mgoodwin@redhat.com> - 4.3.0-2
-- Revert pmlogger_daily daystart patch (BZ 1662034)
-
-* Thu Jul 07 2016 Nathan Scott <nathans@redhat.com> - 3.11.3-2
-- Export filesys metrics with persistent DM naming (BZ 1349932)
+- Resolve fresh install pmlogger timeout bug (BZ 1721223)
+- Fix dstat exception writing to a closed fd (BZ 1768619)
+- Fix chan lib dependency of pcp-pmda-statsd (BZ 1770815)
+- Update to latest PCP sources.
+
+* Mon Nov 04 2019 Nathan Scott <nathans@redhat.com> - 5.0.1-1
+- Resolve selinux policy issues in PCP tools (BZ 1743040)
+- Update to latest PCP sources.
+
+* Sun Oct 20 2019 Mark Goodwin <mgoodwin@redhat.com> - 5.0.0-2
+- various spec fixes for pmdastatsd
+- add patch1 to fix pmdastatsd build on rawhide
+
+* Fri Oct 11 2019 Mark Goodwin <mgoodwin@redhat.com> - 5.0.0-1
+- Update to latest PCP sources.
+
+* Fri Aug 16 2019 Nathan Scott <nathans@redhat.com> - 4.3.4-1
+- Resolve bootup issues with pmlogger service (BZ 1737091, BZ 1721223)
+- Resolve selinux policy issues in PCP tools (BZ 1721644, BZ 1711547)
+- Update to latest PCP sources.
+
+* Fri Jul 26 2019 Fedora Release Engineering <releng@fedoraproject.org> - 4.3.3-2
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_31_Mass_Rebuild
+
+* Fri Jun 28 2019 Mark Goodwin <mgoodwin@redhat.com> - 4.3.3-1
+- Resolve segv running pmchart with bogus timezone (BZ 1718948)
+- Resolve pmrep wait.formula for collectl-dm-sD and collectl-sD (BZ 1724288)
+- Update to latest PCP sources.
+
+* Mon Jun 10 22:13:21 CET 2019 Igor Gnatenko <ignatenkobrain@fedoraproject.org> - 4.3.2-4
+- Rebuild for RPM 4.15
+
+* Mon Jun 10 15:42:04 CET 2019 Igor Gnatenko <ignatenkobrain@fedoraproject.org> - 4.3.2-3
+- Rebuild for RPM 4.15
+
+* Fri May 31 2019 Jitka Plesnikova <jplesnik@redhat.com> - 4.3.2-2
+- Perl 5.30 rebuild
+
+* Fri Apr 26 2019 Mark Goodwin <mgoodwin@redhat.com> - 4.3.2-1
+- Resolve selinux policy issues for pmie daemon mode (BZ 1702589)
+- Resolve selinux policy issues for BPF permissions (BZ 1693332)
+- Further improvements to daily archive processing (BZ 1647390)
+- Update to latest PCP sources.
+
+* Wed Feb 27 2019 Mark Goodwin <mgoodwin@redhat.com> - 4.3.1-1
+- Fixes pcp-dstat in --full (all instances) mode (BZ 1661912)
+- Remove package dependencies on initscripts (BZ 1592380)
+- Set include directory for cppcheck use (BZ 1663372)
+- Update to latest PCP sources.
+
+* Fri Dec 21 2018 Nathan Scott <nathans@redhat.com> - 4.3.0-1
+- Add the dstat -f/--full option to expand instances (BZ 1651536)
+- Improve systemd interaction for local pmie (BZ 1650999)
+- SELinux is preventing ps from 'search' accesses on the directory
+  .config (BZ 1569697)
+- SELinux is preventing pmdalinux from 'search' accesses on
+  the directory /var/lib/libvirt/images (BZ 1579988)
+- SELinux is preventing pmdalinux from 'unix_read' accesses
+  on the semáforo Unknown (BZ 1607658)
+- SELinux is preventing pmdalinux from 'unix_read' accesses
+  on the shared memory Unknown (BZ 1618756, BZ 1619381, BZ 1601721)
+- Update to latest PCP sources.
+
+* Fri Nov 16 2018 Mark Goodwin <mgoodwin@redhat.com> - 4.2.0-1
+- Resolves dstat packaging issues (BZ 1640912)
+- Resolves dstat cursor positioning problem (BZ 1640913)
+- Resolve a signal handling issue in dstat shutdown (BZ 1648552)
+- Rename variable named await in python code (BZ 1633367)
+- New conditionally-built pcp-pmda-podman sub-package.
+- SELinux is preventing pmdalinux from 'unix_read' accesses on the shared memory labeled gpsd_t
+  (BZ 1626487)
+- SELinux is preventing ps from 'search' accesses on the directory .cache
+  (BZ 1634205, BZ 1635522)
+- SELinux is preventing ps from 'sys_ptrace' accesses on the cap_userns Unknown
+  (BZ 1635394)
+- PCP SELinux AVCs (BZ 1633211)
+- SELinux is preventing pmdalinux from 'search' accesses on the directory spider
+  (BZ 1647843)
+- Update to latest PCP sources.
+
+* Fri Sep 21 2018 Nathan Scott <nathans@redhat.com> - 4.1.3-1
+- Update to latest PCP sources.
+
+* Wed Aug 29 2018 Nathan Scott <nathans@redhat.com> - 4.1.1-3
+- Updated versions of Vector (1.3.1) and Blinkenlights (1.0.1) webapps
+
+* Fri Aug 03 2018 Dave Brolley <brolley@redhat.com> - 4.1.1-2
+- pcp.spec: Fix the _with_dstat reference in the %%configure command
+
+* Fri Aug 03 2018 Dave Brolley <brolley@redhat.com> - 4.1.1-1
+- SELinux is preventing pmdalinux from 'unix_read' accesses on the shared memory Unknown
+  (BZ 1592901)
+- SELinux is preventing pmdalinux from getattr, associate access on the shared memory Unknown
+  (BZ 1594991)
+- PCP BCC PMDA AVCs (BZ 1597978)
+- PCP BCC PMDA packaging issue (BZ 1597979)
+- pmdaproc only reads the first 1024 bytes of the /proc/*/status file resulting in lost metric
+  values(BZ 1600262)
+- Update to latest PCP sources.
+
+* Fri Jul 13 2018 Fedora Release Engineering <releng@fedoraproject.org> - 4.1.0-7
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_29_Mass_Rebuild
+
+* Tue Jul 03 2018 Petr Pisar <ppisar@redhat.com> - 4.1.0-6
+- Perl 5.28 rebuild
+
+* Fri Jun 29 2018 Miro Hrončok <mhroncok@redhat.com> - 4.1.0-5
+- Rebuilt for Python 3.7
+
+* Thu Jun 28 2018 Jitka Plesnikova <jplesnik@redhat.com> - 4.1.0-4
+- Perl 5.28 rebuild
+
+* Tue Jun 19 2018 Miro Hrončok <mhroncok@redhat.com> - 4.1.0-3
+- Rebuilt for Python 3.7
+
+* Fri Jun 15 2018 Nathan Scott <nathans@redhat.com> - 4.1.0-2
+- Rapid compression of PCP log data and metadata (BZ 1293471)
+- Added Perl package build dependencies.
+- Update to latest PCP sources.
+
+* Fri May 11 2018 Mark Goodwin <mgoodwin@redhat.com> - 4.0.2-1
+- Propogate build flags throughout PCP (BZ 1538187)
+- Further additions to selinux policy (BZ 1565158)
+- Update to Vector v1.2.2 in pcp-webapp-vector.
+- Update to latest PCP sources.
+
+* Thu Mar 29 2018 Mark Goodwin <mgoodwin@redhat.com> - 4.0.1-1
+- Fix selinux policy to allow pmdagluster to work (BZ 1558708)
+- pmcd binding only to localhost:44321 by default (BZ 1529915)
+- Update to latest PCP sources.
+
+* Thu Mar 01 2018 Iryna Shcherbina <ishcherb@redhat.com> - 4.0.0-3
+- Update Python 2 dependency declarations to new packaging standards
+  (See https://fedoraproject.org/wiki/FinalizingFedoraSwitchtoPython3)
+
+* Tue Feb 20 2018 Nathan Scott <nathans@redhat.com> - 4.0.0-2
+- Disable pmdabcc on architectures without BCC/eBPF support.
+
+* Fri Feb 16 2018 Nathan Scott <nathans@redhat.com> - 4.0.0-1
+- pcp-atopsar: robustness around missing data (BZ 1508028)
+- python pmcc method checking for missing metrics (BZ 1508026)
+- Fix generic -s and -T option handling in libpcp (BZ 1352461)
+- Resolve crash in local context mode in libpcp_pmda (BZ 1451475)
+- python api: fix timezone segv from incorrect free (BZ 1352465)
+- Remove section 1 and 5 man pages for pmview tool (BZ 1289126)
+- Update to latest PCP sources.
+
+* Thu Feb 08 2018 Nathan Scott <nathans@redhat.com> - 3.12.2-5
+- Update the Vector webapp to latest upstream (v1.2.1).
+
+* Wed Jan 10 2018 Lukas Berk <lberk@redhat.com> - 3.12.2-4
+- Remove Obsoletes line for pcp-gui-debuginfo
+- Update Python 2 dependency declarations to new packaging standards
+  (See https://fedoraproject.org/wiki/FinalizingFedoraSwitchtoPython3)
+
+* Tue Nov 07 2017 Igor Gnatenko <ignatenkobrain@fedoraproject.org> - 3.12.2-2
+- Remove old crufty coreutils requires
+
+* Wed Oct 18 2017 Lukas Berk <lberk@redhat.com> - 3.12.2-1
+- selinux: add pmlogger_exec_t rule from (BZ 1483320)
+- selinux: pmlc accessing tcp port 4330 (BZ 1447585)
+- selinux: pmnewlog.sh using ps to check pid's for pmloggers (BZ 1488116)
+- Update to latest PCP sources.
+
+* Mon Aug 28 2017 Nathan Scott <nathans@redhat.com> - 3.12.1-3
+- Disable infiniband and papi packages on armv7hl (BZ 1485692)
+
+* Fri Aug 25 2017 Lukas Berk <lberk@redhat.com> - 3.12.1-2
+- Rebuild for infiniband dep breakage.
+
+* Wed Aug 16 2017 Nathan Scott <nathans@redhat.com> - 3.12.1-1
+- Update to latest PCP sources.
+
+* Thu Jul 13 2017 Petr Pisar <ppisar@redhat.com> - 3.12.0-2
+- perl dependency renamed to perl-interpreter
+  <https://fedoraproject.org/wiki/Changes/perl_Package_to_Install_Core_Modules>
+
+* Fri Jun 30 2017 Lukas Berk <lberk@redhat.com> - 3.12.0-1
+- Fix pcp-atop failure in open-ended write mode (BZ 1431292)
+- Resolve additional selinux policy issues (BZ 1317515)
+- Improve poor pmlogconf performance (BZ1376857)
+- Update to latest PCP sources.
+
+* Mon Jun 05 2017 Jitka Plesnikova <jplesnik@redhat.com> - 3.11.10-3
+- Perl 5.26 rebuild
+
+* Fri Jun 2 2017 Lukas Berk <lberk@redhat.com> - 3.11.10-2
+- Correct subrpm inclusion of zeroconf config files (BZ 1456262)
+
+* Wed May 17 2017 Dave Brolley <brolley@redhat.com> - 3.11.10-1
+- python api: handle non-POSIXLY_CORRECT getopt cases (BZ 1289912)
+- Fix pmchart reaction to timezone changes from pmtime (BZ 968823)
+- Require Qt5 for Fedora.
+- Update to latest PCP sources.
+
+* Fri Mar 31 2017 Nathan Scott <nathans@redhat.com> - 3.11.9-1
+- Fix pmchart chart legends toggling behaviour (BZ 1359961)
+- Improve multiple local context attr handling (BZ 1430248)
+- Fix error during installation of pcp-selinux (BZ 1433271)
+- Update to latest PCP sources.
+
+* Fri Feb 17 2017 Lukas Berk <lberk@redhat.com> - 3.11.8-1
+- Support newer kernels /proc/vmstat file contents (BZ 1396148)
+- Added pcp-selinux policy (BZs 1214090, 1381127, 1337968, 1398147)
+
+* Wed Dec 21 2016 Dave Brolley <brolley@redhat.com> - 3.11.7-1
+- pmchart run-away mem leak replaying multi-archive when rewinding (BZ 1359975)
+
+* Fri Nov 11 2016 Mark Goodwin <mgoodwin@redhat.com> - 3.11.6-1
+- Optimize DSO lookups for local context mode startup (BZ 1275293)
+- Correct return code for derive metric help text (BZ 1336208)
+- Improve pmrep metrics collection via extend_indom (BZ 1377464)
+- Fix network.interface.speed value extraction (BZ 1379431)
+
+* Mon Sep 26 2016 Mark Goodwin <mgoodwin@redhat.com> - 3.11.5-1
+- Allow systemd-based auto-restart of all daemons (BZ 1365658)
+- Ensure pmieconf and pmlogconf handle empty files (BZ 1249123)
+- Ignore rpmsave and rpmnew suffixed control files (BZ 1375415)
+- Add new pcp-pmda-libvirt package for virtual machine metrics
+- Update to latest PCP sources.
+
+* Fri Aug 05 2016 Nathan Scott <nathans@redhat.com> - 3.11.4-1
+- Support inside-container metric values in python (BZ 1333702)
+- Fix pmdaproc handling of commands with whitespace (BZ 1350816)
+- Use persistent DM names for the filesystem metrics (BZ 1349932)
+- Add to the ds389{,log} RPM package dependencies (BZ 1354055)
+- Use "dirsrv" as default pmdads389log user account (BZ 1357607)
+- Make pmie(1) honour SIGINT while parsing rules (BZ 1327226)
+- Add pmlogconf support for pcp-pidstat and pcp-mpstat (BZ 1361943)
+- Update to latest PCP sources.
 
 * Fri Jun 17 2016 Nathan Scott <nathans@redhat.com> - 3.11.3-1
 - Fix memory leak in derived metrics error handling (BZ 1331973)