diff --git a/.gitignore b/.gitignore
index a247b61..bcb6c81 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,2 +1,2 @@
-SOURCES/sos-4.2.tar.gz
+SOURCES/sos-4.3.tar.gz
 SOURCES/sos-audit-0.3.tgz
diff --git a/.sos.metadata b/.sos.metadata
index 054c91f..74f14fe 100644
--- a/.sos.metadata
+++ b/.sos.metadata
@@ -1,2 +1,2 @@
-fe82967b0577076aac104412a9fe35cdb444bde4 SOURCES/sos-4.2.tar.gz
+6d443271a3eb26af8fb400ed417a4b572730d316 SOURCES/sos-4.3.tar.gz
 9d478b9f0085da9178af103078bbf2fd77b0175a SOURCES/sos-audit-0.3.tgz
diff --git a/README.debrand b/README.debrand
deleted file mode 100644
index 01c46d2..0000000
--- a/README.debrand
+++ /dev/null
@@ -1,2 +0,0 @@
-Warning: This package was configured for automatic debranding, but the changes
-failed to apply.
diff --git a/SOURCES/sos-bz1869561-cpuX-individual-sizelimits.patch b/SOURCES/sos-bz1869561-cpuX-individual-sizelimits.patch
deleted file mode 100644
index 4d579d7..0000000
--- a/SOURCES/sos-bz1869561-cpuX-individual-sizelimits.patch
+++ /dev/null
@@ -1,48 +0,0 @@
-From b09ed75b09075d86c184b0a63cce9260f2cee4ca Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Mon, 30 Aug 2021 11:27:48 +0200
-Subject: [PATCH] [processor] Apply sizelimit to /sys/devices/system/cpu/cpuX
-
-Copy /sys/devices/system/cpu/cpuX with separately applied sizelimit.
-
-This is required for systems with tens/hundreds of CPUs where the
-cumulative directory size exceeds 25MB or even 100MB.
-
-Resolves: #2639
-Closes: #2665
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/report/plugins/processor.py | 9 ++++++++-
- 1 file changed, 8 insertions(+), 1 deletion(-)
-
-diff --git a/sos/report/plugins/processor.py b/sos/report/plugins/processor.py
-index 0ddfd126..2df2dc9a 100644
---- a/sos/report/plugins/processor.py
-+++ b/sos/report/plugins/processor.py
-@@ -7,6 +7,7 @@
- # See the LICENSE file in the source distribution for further information.
- 
- from sos.report.plugins import Plugin, IndependentPlugin
-+import os
- 
- 
- class Processor(Plugin, IndependentPlugin):
-@@ -34,7 +35,13 @@ class Processor(Plugin, IndependentPlugin):
-         self.add_copy_spec([
-             "/proc/cpuinfo",
-             "/sys/class/cpuid",
--            "/sys/devices/system/cpu"
-+        ])
-+        # copy /sys/devices/system/cpu/cpuX with separately applied sizelimit
-+        # this is required for systems with tens/hundreds of CPUs where the
-+        # cumulative directory size exceeds 25MB or even 100MB.
-+        cdirs = self.listdir('/sys/devices/system/cpu')
-+        self.add_copy_spec([
-+            os.path.join('/sys/devices/system/cpu', cdir) for cdir in cdirs
-         ])
- 
-         self.add_cmd_output([
--- 
-2.31.1
-
diff --git a/SOURCES/sos-bz2011507-foreman-puma-status.patch b/SOURCES/sos-bz2011507-foreman-puma-status.patch
deleted file mode 100644
index 2a80571..0000000
--- a/SOURCES/sos-bz2011507-foreman-puma-status.patch
+++ /dev/null
@@ -1,69 +0,0 @@
-From 5a9458d318302c1caef862a868745fc8bdf5c741 Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Mon, 4 Oct 2021 15:52:36 +0200
-Subject: [PATCH] [foreman] Collect puma status and stats
-
-Collect foreman-puma-status and 'pumactl [gc-|]stats', optionally using
-SCL (if detected).
-
-Resolves: #2712
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/report/plugins/foreman.py | 21 ++++++++++++++++++++-
- 1 file changed, 20 insertions(+), 1 deletion(-)
-
-diff --git a/sos/report/plugins/foreman.py b/sos/report/plugins/foreman.py
-index 4539f12b..351794f4 100644
---- a/sos/report/plugins/foreman.py
-+++ b/sos/report/plugins/foreman.py
-@@ -13,6 +13,7 @@ from sos.report.plugins import (Plugin,
-                                 UbuntuPlugin)
- from pipes import quote
- from re import match
-+from sos.utilities import is_executable
- 
- 
- class Foreman(Plugin):
-@@ -26,7 +27,9 @@ class Foreman(Plugin):
-     option_list = [
-         ('months', 'number of months for dynflow output', 'fast', 1),
-         ('proxyfeatures', 'collect features of smart proxies', 'slow', False),
-+        ('puma-gc', 'collect Puma GC stats', 'fast', False),
-     ]
-+    pumactl = 'pumactl %s -S /usr/share/foreman/tmp/puma.state'
- 
-     def setup(self):
-         # for external DB, search in /etc/foreman/database.yml for:
-@@ -134,6 +138,17 @@ class Foreman(Plugin):
-                                 suggest_filename='dynflow_sidekiq_status')
-         self.add_journal(units="dynflow-sidekiq@*")
- 
-+        # Puma stats & status, i.e. foreman-puma-stats, then
-+        # pumactl stats -S /usr/share/foreman/tmp/puma.state
-+        # and optionally also gc-stats
-+        # if on RHEL with Software Collections, wrap the commands accordingly
-+        if self.get_option('puma-gc'):
-+            self.add_cmd_output(self.pumactl % 'gc-stats',
-+                                suggest_filename='pumactl_gc-stats')
-+        self.add_cmd_output(self.pumactl % 'stats',
-+                            suggest_filename='pumactl_stats')
-+        self.add_cmd_output('/usr/sbin/foreman-puma-status')
-+
-         # collect tables sizes, ordered
-         _cmd = self.build_query_cmd(
-             "SELECT table_name, pg_size_pretty(total_bytes) AS total, "
-@@ -297,6 +312,10 @@ class RedHatForeman(Foreman, RedHatPlugin):
-         self.add_file_tags({
-             '/usr/share/foreman/.ssh/ssh_config': 'ssh_foreman_config',
-         })
-+        # if we are on RHEL7 with scl, wrap some Puma commands by
-+        # scl enable tfm 'command'
-+        if self.policy.dist_version() == 7 and is_executable('scl'):
-+            self.pumactl = "scl enable tfm '%s'" % self.pumactl
- 
-         super(RedHatForeman, self).setup()
- 
--- 
-2.31.1
-
diff --git a/SOURCES/sos-bz2011533-unpackaged-recursive-symlink.patch b/SOURCES/sos-bz2011533-unpackaged-recursive-symlink.patch
deleted file mode 100644
index 35cc89d..0000000
--- a/SOURCES/sos-bz2011533-unpackaged-recursive-symlink.patch
+++ /dev/null
@@ -1,42 +0,0 @@
-From e2ca3d02f36c0db4efaacfb2c1b7d502f38e371c Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Mon, 30 Aug 2021 10:18:29 +0200
-Subject: [PATCH] [unpackaged] deal with recursive loop of symlinks properly
-
-When the plugin processes a recursive loop of symlinks, it currently
-hangs in an infinite loop trying to follow the symlinks. Use
-pathlib.Path.resolve() method to return the target directly.
-
-Resolves: #2664
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/report/plugins/unpackaged.py | 5 +++--
- 1 file changed, 3 insertions(+), 2 deletions(-)
-
-diff --git a/sos/report/plugins/unpackaged.py b/sos/report/plugins/unpackaged.py
-index e5cc6191..9d68077c 100644
---- a/sos/report/plugins/unpackaged.py
-+++ b/sos/report/plugins/unpackaged.py
-@@ -10,6 +10,7 @@ from sos.report.plugins import Plugin, RedHatPlugin
- 
- import os
- import stat
-+from pathlib import Path
- 
- 
- class Unpackaged(Plugin, RedHatPlugin):
-@@ -41,8 +42,8 @@ class Unpackaged(Plugin, RedHatPlugin):
-                 for name in files:
-                     path = os.path.join(root, name)
-                     try:
--                        while stat.S_ISLNK(os.lstat(path).st_mode):
--                            path = os.path.abspath(os.readlink(path))
-+                        if stat.S_ISLNK(os.lstat(path).st_mode):
-+                            path = Path(path).resolve()
-                     except Exception:
-                         continue
-                     file_list.append(os.path.realpath(path))
--- 
-2.31.1
-
diff --git a/SOURCES/sos-bz2011534-opacapture-under-allow-system-changes.patch b/SOURCES/sos-bz2011534-opacapture-under-allow-system-changes.patch
deleted file mode 100644
index 39f9c8a..0000000
--- a/SOURCES/sos-bz2011534-opacapture-under-allow-system-changes.patch
+++ /dev/null
@@ -1,49 +0,0 @@
-From 66ebb8256b1326573cbcb2d134545635dfead3bc Mon Sep 17 00:00:00 2001
-From: Jose Castillo <jcastillo@redhat.com>
-Date: Sun, 29 Aug 2021 15:35:09 +0200
-Subject: [PATCH] [omnipath_client] Ensure opacapture runs only with
- allow-system-changes
-
-While omnipath_client plugin is collecting "opacapture",
-`depmod -a` command is executed to regenerates some files
-under /usr/lib/modules/$kernel.
-
-modules.dep
-modules.dep.bin
-modules.devname
-modules.softdep
-modules.symbols
-modules.symbols.bin
-
-This patch ensures that the command is only run when
-the option --allow-system-changes is used.
-
-Fixes: RHBZ#1998433
-
-Signed-off-by: Jose Castillo <jcastillo@redhat.com>
----
- sos/report/plugins/omnipath_client.py | 9 +++++++--
- 1 file changed, 7 insertions(+), 2 deletions(-)
-
-diff --git a/sos/report/plugins/omnipath_client.py b/sos/report/plugins/omnipath_client.py
-index 1ec01384..4e988c5c 100644
---- a/sos/report/plugins/omnipath_client.py
-+++ b/sos/report/plugins/omnipath_client.py
-@@ -45,7 +45,12 @@ class OmnipathClient(Plugin, RedHatPlugin):
-         # rather than storing it somewhere under /var/tmp and copying it via
-         # add_copy_spec, add it directly to sos_commands/<plugin> dir by
-         # building a path argument using self.get_cmd_output_path().
--        self.add_cmd_output("opacapture %s" % join(self.get_cmd_output_path(),
--                                                   "opacapture.tgz"))
-+        # This command calls 'depmod -a', so lets make sure we
-+        # specified the 'allow-system-changes' option before running it.
-+        if self.get_option('allow_system_changes'):
-+            self.add_cmd_output("opacapture %s" %
-+                                join(self.get_cmd_output_path(),
-+                                     "opacapture.tgz"),
-+                                changes=True)
- 
- # vim: set et ts=4 sw=4 :
--- 
-2.31.1
-
diff --git a/SOURCES/sos-bz2011535-kernel-psi.patch b/SOURCES/sos-bz2011535-kernel-psi.patch
deleted file mode 100644
index 1a9d5e0..0000000
--- a/SOURCES/sos-bz2011535-kernel-psi.patch
+++ /dev/null
@@ -1,33 +0,0 @@
-From 23e523b6b9784390c7ce2c5af654ab497fb10aaf Mon Sep 17 00:00:00 2001
-From: Jose Castillo <jcastillo@redhat.com>
-Date: Wed, 8 Sep 2021 09:25:24 +0200
-Subject: [PATCH] [kernel] Capture Pressure Stall Information
-
-Kernel 4.20 includes PSI metrics for CPU, memeory and IO.
-The feature is enabled after adding "psi=1" as
-kernel boot parameter.
-The information is captured in files
-in the directory /proc/pressure.
-
-Signed-off-by: Jose Castillo <jcastillo@redhat.com>
----
- sos/report/plugins/kernel.py | 3 ++-
- 1 file changed, 2 insertions(+), 1 deletion(-)
-
-diff --git a/sos/report/plugins/kernel.py b/sos/report/plugins/kernel.py
-index 8c5e5e11..803f5e30 100644
---- a/sos/report/plugins/kernel.py
-+++ b/sos/report/plugins/kernel.py
-@@ -112,7 +112,8 @@ class Kernel(Plugin, IndependentPlugin):
-             "/sys/kernel/debug/extfrag/unusable_index",
-             "/sys/kernel/debug/extfrag/extfrag_index",
-             clocksource_path + "available_clocksource",
--            clocksource_path + "current_clocksource"
-+            clocksource_path + "current_clocksource",
-+            "/proc/pressure/"
-         ])
- 
-         if self.get_option("with-timer"):
--- 
-2.31.1
-
diff --git a/SOURCES/sos-bz2011536-iptables-based-on-ntf.patch b/SOURCES/sos-bz2011536-iptables-based-on-ntf.patch
deleted file mode 100644
index 5ccc61f..0000000
--- a/SOURCES/sos-bz2011536-iptables-based-on-ntf.patch
+++ /dev/null
@@ -1,303 +0,0 @@
-From 2ab8ba3ecbd52e452cc554d515e0782801dcb4b6 Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Wed, 8 Sep 2021 15:31:48 +0200
-Subject: [PATCH] [firewalld] collect nft rules in firewall_tables only
-
-We collect 'nft list ruleset' in both plugins, while:
-- nft is not shipped by firewalld package, so we should not collect
-it in firewalld plugin
-- running the command requires both nf_tables and nfnetlink kmods, so
-we should use both kmods in the predicate
-
-Resolves: #2679
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/report/plugins/firewall_tables.py | 9 +++++----
- sos/report/plugins/firewalld.py       | 8 +-------
- 2 files changed, 6 insertions(+), 11 deletions(-)
-
-diff --git a/sos/report/plugins/firewall_tables.py b/sos/report/plugins/firewall_tables.py
-index 56058d3bf9..63a7dddeb5 100644
---- a/sos/report/plugins/firewall_tables.py
-+++ b/sos/report/plugins/firewall_tables.py
-@@ -40,10 +40,11 @@ def collect_nftables(self):
-         """ Collects nftables rulesets with 'nft' commands if the modules
-         are present """
- 
--        self.add_cmd_output(
--            "nft list ruleset",
--            pred=SoSPredicate(self, kmods=['nf_tables'])
--        )
-+        # collect nftables ruleset
-+        nft_pred = SoSPredicate(self,
-+                                kmods=['nf_tables', 'nfnetlink'],
-+                                required={'kmods': 'all'})
-+        self.add_cmd_output("nft list ruleset", pred=nft_pred, changes=True)
- 
-     def setup(self):
-         # collect iptables -t for any existing table, if we can't read the
-diff --git a/sos/report/plugins/firewalld.py b/sos/report/plugins/firewalld.py
-index ec83527ed7..9401bfd239 100644
---- a/sos/report/plugins/firewalld.py
-+++ b/sos/report/plugins/firewalld.py
-@@ -9,7 +9,7 @@
- #
- # See the LICENSE file in the source distribution for further information.
- 
--from sos.report.plugins import Plugin, RedHatPlugin, SoSPredicate
-+from sos.report.plugins import Plugin, RedHatPlugin
- 
- 
- class FirewallD(Plugin, RedHatPlugin):
-@@ -35,12 +35,6 @@ def setup(self):
-             "/var/log/firewalld",
-         ])
- 
--        # collect nftables ruleset
--        nft_pred = SoSPredicate(self,
--                                kmods=['nf_tables', 'nfnetlink'],
--                                required={'kmods': 'all'})
--        self.add_cmd_output("nft list ruleset", pred=nft_pred, changes=True)
--
-         # use a 10s timeout to workaround dbus problems in
-         # docker containers.
-         self.add_cmd_output([
--- 
-2.31.1
-
-
-From 2a7cf53b61943907dc823cf893530b620a87946c Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Fri, 15 Oct 2021 22:31:36 +0200
-Subject: [PATCH 1/3] [report] Use log_skipped_cmd method inside
- collect_cmd_output
-
-Also, remove obsolete parameters of the log_skipped_cmd method.
-
-Related: #2724
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/report/plugins/__init__.py | 26 ++++++++------------------
- 1 file changed, 8 insertions(+), 18 deletions(-)
-
-diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py
-index ec138f83..b60ab5f6 100644
---- a/sos/report/plugins/__init__.py
-+++ b/sos/report/plugins/__init__.py
-@@ -876,8 +876,7 @@ class Plugin():
-             return bool(pred)
-         return False
- 
--    def log_skipped_cmd(self, pred, cmd, kmods=False, services=False,
--                        changes=False):
-+    def log_skipped_cmd(self, cmd, pred, changes=False):
-         """Log that a command was skipped due to predicate evaluation.
- 
-         Emit a warning message indicating that a command was skipped due
-@@ -887,21 +886,17 @@ class Plugin():
-         message indicating that the missing data can be collected by using
-         the "--allow-system-changes" command line option will be included.
- 
--        :param pred:    The predicate that caused the command to be skipped
--        :type pred:     ``SoSPredicate``
--
-         :param cmd:     The command that was skipped
-         :type cmd:      ``str``
- 
--        :param kmods:   Did kernel modules cause the command to be skipped
--        :type kmods:    ``bool``
--
--        :param services: Did services cause the command to be skipped
--        :type services: ``bool``
-+        :param pred:    The predicate that caused the command to be skipped
-+        :type pred:     ``SoSPredicate``
- 
-         :param changes: Is the `--allow-system-changes` enabled
-         :type changes:  ``bool``
-         """
-+        if pred is None:
-+            pred = SoSPredicate(self)
-         msg = "skipped command '%s': %s" % (cmd, pred.report_failure())
- 
-         if changes:
-@@ -1700,9 +1693,7 @@ class Plugin():
-             self.collect_cmds.append(soscmd)
-             self._log_info("added cmd output '%s'" % soscmd.cmd)
-         else:
--            self.log_skipped_cmd(pred, soscmd.cmd, kmods=bool(pred.kmods),
--                                 services=bool(pred.services),
--                                 changes=soscmd.changes)
-+            self.log_skipped_cmd(soscmd.cmd, pred, changes=soscmd.changes)
- 
-     def add_cmd_output(self, cmds, suggest_filename=None,
-                        root_symlink=None, timeout=None, stderr=True,
-@@ -2112,7 +2103,7 @@ class Plugin():
-                            root_symlink=False, timeout=None,
-                            stderr=True, chroot=True, runat=None, env=None,
-                            binary=False, sizelimit=None, pred=None,
--                           subdir=None, tags=[]):
-+                           changes=False, subdir=None, tags=[]):
-         """Execute a command and save the output to a file for inclusion in the
-         report, then return the results for further use by the plugin
- 
-@@ -2163,8 +2154,7 @@ class Plugin():
-         :rtype: ``dict``
-         """
-         if not self.test_predicate(cmd=True, pred=pred):
--            self._log_info("skipped cmd output '%s' due to predicate (%s)" %
--                           (cmd, self.get_predicate(cmd=True, pred=pred)))
-+            self.log_skipped_cmd(cmd, pred, changes=changes)
-             return {
-                 'status': None,  # don't match on if result['status'] checks
-                 'output': '',
--- 
-2.31.1
-
-
-From 6b1bea0ffb1df7f8e5001b06cf25f0741b007ddd Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Fri, 15 Oct 2021 22:34:01 +0200
-Subject: [PATCH 2/3] [firewall_tables] call iptables -t <table> based on nft
- list
-
-If iptables are not realy in use, calling iptables -t <table>
-would load corresponding nft table.
-
-Therefore, call iptables -t only for the tables from "nft list ruleset"
-output.
-
-Example: nft list ruleset contains
-
-table ip mangle {
-..
-}
-
-so we can collect iptable -t mangle -nvL .
-
-The same applies to ip6tables as well.
-
-Resolves: #2724
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/report/plugins/firewall_tables.py | 29 ++++++++++++++++++++-------
- 1 file changed, 22 insertions(+), 7 deletions(-)
-
-diff --git a/sos/report/plugins/firewall_tables.py b/sos/report/plugins/firewall_tables.py
-index 63a7ddde..ef04d939 100644
---- a/sos/report/plugins/firewall_tables.py
-+++ b/sos/report/plugins/firewall_tables.py
-@@ -44,26 +44,41 @@ class firewall_tables(Plugin, IndependentPlugin):
-         nft_pred = SoSPredicate(self,
-                                 kmods=['nf_tables', 'nfnetlink'],
-                                 required={'kmods': 'all'})
--        self.add_cmd_output("nft list ruleset", pred=nft_pred, changes=True)
-+        return self.collect_cmd_output("nft list ruleset", pred=nft_pred,
-+                                       changes=True)
- 
-     def setup(self):
-+        # first, collect "nft list ruleset" as collecting commands like
-+        # ip6tables -t mangle -nvL
-+        # depends on its output
-+        # store in nft_ip_tables lists of ip[|6] tables from nft list
-+        nft_list = self.collect_nftables()
-+        nft_ip_tables = {'ip': [], 'ip6': []}
-+        nft_lines = nft_list['output'] if nft_list['status'] == 0 else ''
-+        for line in nft_lines.splitlines():
-+            words = line.split()[0:3]
-+            if len(words) == 3 and words[0] == 'table' and \
-+                    words[1] in nft_ip_tables.keys():
-+                nft_ip_tables[words[1]].append(words[2])
-         # collect iptables -t for any existing table, if we can't read the
-         # tables, collect 2 default ones (mangle, filter)
-+        # do collect them only when relevant nft list ruleset exists
-+        default_ip_tables = "mangle\nfilter\n"
-         try:
-             ip_tables_names = open("/proc/net/ip_tables_names").read()
-         except IOError:
--            ip_tables_names = "mangle\nfilter\n"
-+            ip_tables_names = default_ip_tables
-         for table in ip_tables_names.splitlines():
--            self.collect_iptable(table)
-+            if nft_list['status'] == 0 and table in nft_ip_tables['ip']:
-+                self.collect_iptable(table)
-         # collect the same for ip6tables
-         try:
-             ip_tables_names = open("/proc/net/ip6_tables_names").read()
-         except IOError:
--            ip_tables_names = "mangle\nfilter\n"
-+            ip_tables_names = default_ip_tables
-         for table in ip_tables_names.splitlines():
--            self.collect_ip6table(table)
--
--        self.collect_nftables()
-+            if nft_list['status'] == 0 and table in nft_ip_tables['ip6']:
-+                self.collect_ip6table(table)
- 
-         # When iptables is called it will load the modules
-         # iptables_filter (for kernel <= 3) or
--- 
-2.31.1
-
-
-From 464bd2d2e83f203e369f2ba7671bbb7da53e06f6 Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Sun, 24 Oct 2021 16:00:31 +0200
-Subject: [PATCH 3/3] [firewall_tables] Call iptables only when nft ip filter
- table exists
-
-iptables -vnxL creates nft 'ip filter' table if it does not exist, hence
-we must guard iptables execution by presence of the nft table.
-
-An equivalent logic applies to ip6tables.
-
-Resolves: #2724
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/report/plugins/firewall_tables.py | 26 ++++++++++++++------------
- 1 file changed, 14 insertions(+), 12 deletions(-)
-
-diff --git a/sos/report/plugins/firewall_tables.py b/sos/report/plugins/firewall_tables.py
-index ef04d939..7eafd60f 100644
---- a/sos/report/plugins/firewall_tables.py
-+++ b/sos/report/plugins/firewall_tables.py
-@@ -80,19 +80,21 @@ class firewall_tables(Plugin, IndependentPlugin):
-             if nft_list['status'] == 0 and table in nft_ip_tables['ip6']:
-                 self.collect_ip6table(table)
- 
--        # When iptables is called it will load the modules
--        # iptables_filter (for kernel <= 3) or
--        # nf_tables (for kernel >= 4) if they are not loaded.
-+        # When iptables is called it will load:
-+        # 1) the modules iptables_filter (for kernel <= 3) or
-+        #    nf_tables (for kernel >= 4) if they are not loaded.
-+        # 2) nft 'ip filter' table will be created
-         # The same goes for ipv6.
--        self.add_cmd_output(
--            "iptables -vnxL",
--            pred=SoSPredicate(self, kmods=['iptable_filter', 'nf_tables'])
--        )
--
--        self.add_cmd_output(
--            "ip6tables -vnxL",
--            pred=SoSPredicate(self, kmods=['ip6table_filter', 'nf_tables'])
--        )
-+        if nft_list['status'] != 0 or 'filter' in nft_ip_tables['ip']:
-+            self.add_cmd_output(
-+                "iptables -vnxL",
-+                pred=SoSPredicate(self, kmods=['iptable_filter', 'nf_tables'])
-+            )
-+        if nft_list['status'] != 0 or 'filter' in nft_ip_tables['ip6']:
-+            self.add_cmd_output(
-+                "ip6tables -vnxL",
-+                pred=SoSPredicate(self, kmods=['ip6table_filter', 'nf_tables'])
-+            )
- 
-         self.add_copy_spec([
-             "/etc/nftables",
--- 
-2.31.1
-
diff --git a/SOURCES/sos-bz2011537-estimate-only-option.patch b/SOURCES/sos-bz2011537-estimate-only-option.patch
deleted file mode 100644
index a1a96c4..0000000
--- a/SOURCES/sos-bz2011537-estimate-only-option.patch
+++ /dev/null
@@ -1,1316 +0,0 @@
-From 5b245b1e449c6a05d09034bcb8290bffded79327 Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Wed, 8 Sep 2021 17:04:58 +0200
-Subject: [PATCH] [report] Implement --estimate-only
-
-Add report option --estimate-only to estimate disk space requirements
-when running a sos report.
-
-Resolves: #2673
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- man/en/sos-report.1    | 13 +++++++-
- sos/report/__init__.py | 74 ++++++++++++++++++++++++++++++++++++++++--
- 2 files changed, 84 insertions(+), 3 deletions(-)
-
-diff --git a/man/en/sos-report.1 b/man/en/sos-report.1
-index 36b337df..e8efc8f8 100644
---- a/man/en/sos-report.1
-+++ b/man/en/sos-report.1
-@@ -14,7 +14,7 @@ sos report \- Collect and package diagnostic and support data
-           [--preset preset] [--add-preset add_preset]\fR
-           [--del-preset del_preset] [--desc description]\fR
-           [--batch] [--build] [--debug] [--dry-run]\fR
--          [--label label] [--case-id id]\fR
-+          [--estimate-only] [--label label] [--case-id id]\fR
-           [--threads threads]\fR
-           [--plugin-timeout TIMEOUT]\fR
-           [--cmd-timeout TIMEOUT]\fR
-@@ -317,6 +317,17 @@ output, or string data from the system. The resulting logs may be used
- to understand the actions that sos would have taken without the dry run
- option.
- .TP
-+.B \--estimate-only
-+Estimate disk space requirements when running sos report. This can be valuable
-+to prevent sosreport working dir to consume all free disk space. No plugin data
-+is available at the end.
-+
-+Plugins will be collected sequentially, size of collected files and commands outputs
-+will be calculated and the plugin files will be immediatelly deleted prior execution
-+of the next plugin. This still can consume whole free disk space, though. Please note,
-+size estimations may not be accurate for highly utilized systems due to changes between
-+an estimate and a real execution.
-+.TP
- .B \--upload
- If specified, attempt to upload the resulting archive to a vendor defined location.
- 
-diff --git a/sos/report/__init__.py b/sos/report/__init__.py
-index 82484f1d..b033f621 100644
---- a/sos/report/__init__.py
-+++ b/sos/report/__init__.py
-@@ -86,6 +86,7 @@ class SoSReport(SoSComponent):
-         'desc': '',
-         'domains': [],
-         'dry_run': False,
-+        'estimate_only': False,
-         'experimental': False,
-         'enable_plugins': [],
-         'keywords': [],
-@@ -137,6 +138,7 @@ class SoSReport(SoSComponent):
-         self._args = args
-         self.sysroot = "/"
-         self.preset = None
-+        self.estimated_plugsizes = {}
- 
-         self.print_header()
-         self._set_debug()
-@@ -223,6 +225,11 @@ class SoSReport(SoSComponent):
-                                 help="Description for a new preset",)
-         report_grp.add_argument("--dry-run", action="store_true",
-                                 help="Run plugins but do not collect data")
-+        report_grp.add_argument("--estimate-only", action="store_true",
-+                                help="Approximate disk space requirements for "
-+                                     "a real sos run; disables --clean and "
-+                                     "--collect, sets --threads=1 and "
-+                                     "--no-postproc")
-         report_grp.add_argument("--experimental", action="store_true",
-                                 dest="experimental", default=False,
-                                 help="enable experimental plugins")
-@@ -700,6 +700,33 @@ class SoSReport(SoSComponent):
-                 self.all_options.append((plugin, plugin_name, optname,
-                                          optparm))
- 
-+    def _set_estimate_only(self):
-+        # set estimate-only mode by enforcing some options settings
-+        # and return a corresponding log messages string
-+        msg = "\nEstimate-only mode enabled"
-+        ext_msg = []
-+        if self.opts.threads > 1:
-+            ext_msg += ["--threads=%s overriden to 1" % self.opts.threads, ]
-+            self.opts.threads = 1
-+        if not self.opts.build:
-+            ext_msg += ["--build enabled", ]
-+            self.opts.build = True
-+        if not self.opts.no_postproc:
-+            ext_msg += ["--no-postproc enabled", ]
-+            self.opts.no_postproc = True
-+        if self.opts.clean:
-+            ext_msg += ["--clean disabled", ]
-+            self.opts.clean = False
-+        if self.opts.upload:
-+            ext_msg += ["--upload* options disabled", ]
-+            self.opts.upload = False
-+        if ext_msg:
-+            msg += ", which overrides some options:\n  " + "\n  ".join(ext_msg)
-+        else:
-+            msg += "."
-+        msg += "\n\n"
-+        return msg
-+
-     def _report_profiles_and_plugins(self):
-         self.ui_log.info("")
-         if len(self.loaded_plugins):
-@@ -875,10 +909,12 @@ class SoSReport(SoSComponent):
-         return True
- 
-     def batch(self):
-+        msg = self.policy.get_msg()
-+        if self.opts.estimate_only:
-+            msg += self._set_estimate_only()
-         if self.opts.batch:
--            self.ui_log.info(self.policy.get_msg())
-+            self.ui_log.info(msg)
-         else:
--            msg = self.policy.get_msg()
-             msg += _("Press ENTER to continue, or CTRL-C to quit.\n")
-             try:
-                 input(msg)
-@@ -1011,6 +1047,22 @@ class SoSReport(SoSComponent):
-                 self.running_plugs.remove(plugin[1])
-                 self.loaded_plugins[plugin[0]-1][1].set_timeout_hit()
-                 pool._threads.clear()
-+        if self.opts.estimate_only:
-+            from pathlib import Path
-+            tmpdir_path = Path(self.archive.get_tmp_dir())
-+            self.estimated_plugsizes[plugin[1]] = sum(
-+                    [f.stat().st_size for f in tmpdir_path.glob('**/*')
-+                     if (os.path.isfile(f) and not os.path.islink(f))])
-+            # remove whole tmp_dir content - including "sos_commands" and
-+            # similar dirs that will be re-created on demand by next plugin
-+            # if needed; it is less error-prone approach than skipping
-+            # deletion of some dirs but deleting their content
-+            for f in os.listdir(self.archive.get_tmp_dir()):
-+                f = os.path.join(self.archive.get_tmp_dir(), f)
-+                if os.path.isdir(f):
-+                    rmtree(f)
-+                else:
-+                    os.unlink(f)
-         return True
- 
-     def collect_plugin(self, plugin):
-@@ -1330,6 +1382,24 @@ class SoSReport(SoSComponent):
-             self.policy.display_results(archive, directory, checksum,
-                                         map_file=map_file)
- 
-+        if self.opts.estimate_only:
-+            from sos.utilities import get_human_readable
-+            _sum = get_human_readable(sum(self.estimated_plugsizes.values()))
-+            self.ui_log.info("Estimated disk space requirement for whole "
-+                             "uncompressed sos report directory: %s" % _sum)
-+            bigplugins = sorted(self.estimated_plugsizes.items(),
-+                                key=lambda x: x[1], reverse=True)[:3]
-+            bp_out = ",  ".join("%s: %s" %
-+                                (p, get_human_readable(v, precision=0))
-+                                for p, v in bigplugins)
-+            self.ui_log.info("Three biggest plugins:  %s" % bp_out)
-+            self.ui_log.info("")
-+            self.ui_log.info("Please note the estimation is relevant to the "
-+                             "current options.")
-+            self.ui_log.info("Be aware that the real disk space requirements "
-+                             "might be different.")
-+            self.ui_log.info("")
-+
-         if self.opts.upload or self.opts.upload_url:
-             if not self.opts.build:
-                 try:
--- 
-2.31.1
-
-From 7ae47e6c0717c0b56c3368008dd99a87f7f436d5 Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Wed, 13 Oct 2021 20:21:16 +0200
-Subject: [PATCH] [report] Count with sos_logs and sos_reports in
- --estimate-only
-
-Currently, we estimate just plugins' disk space and ignore sos_logs
-or sos_reports directories - although they can occupy nontrivial disk
-space as well.
-
-Resolves: #2723
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/report/__init__.py | 8 ++++++++
- 1 file changed, 8 insertions(+)
-
-diff --git a/sos/report/__init__.py b/sos/report/__init__.py
-index e35c7e8d..7feb31ee 100644
---- a/sos/report/__init__.py
-+++ b/sos/report/__init__.py
-@@ -1380,6 +1380,14 @@ class SoSReport(SoSComponent):
- 
-         if self.opts.estimate_only:
-             from sos.utilities import get_human_readable
-+            from pathlib import Path
-+            # add sos_logs, sos_reports dirs, etc., basically everything
-+            # that remained in self.tmpdir after plugins' contents removal
-+            # that still will be moved to the sos report final directory path
-+            tmpdir_path = Path(self.tmpdir)
-+            self.estimated_plugsizes['sos_logs_reports'] = sum(
-+                    [f.stat().st_size for f in tmpdir_path.glob('**/*')])
-+
-             _sum = get_human_readable(sum(self.estimated_plugsizes.values()))
-             self.ui_log.info("Estimated disk space requirement for whole "
-                              "uncompressed sos report directory: %s" % _sum)
--- 
-2.31.1
-
-From 4293f3317505661e8f32ba94ad87310996fa1626 Mon Sep 17 00:00:00 2001
-From: Eric Desrochers <eric.desrochers@canonical.com>
-Date: Tue, 19 Oct 2021 12:18:40 -0400
-Subject: [PATCH] [report] check for symlink before rmtree when opt
- estimate-only is use
-
-Check if the dir is also symlink before performing rmtree()
-method so that unlink() method can be used instead.
-
-Traceback (most recent call last):
-  File "./bin/sos", line 22, in <module>
-    sos.execute()
-  File "/tmp/sos/sos/__init__.py", line 186, in execute
-    self._component.execute()
-OSError: Cannot call rmtree on a symbolic link
-
-Closes: #2727
-
-Signed-off-by: Eric Desrochers <eric.desrochers@canonical.com>
----
- sos/report/__init__.py | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/sos/report/__init__.py b/sos/report/__init__.py
-index 7feb31ee..1b5bc97d 100644
---- a/sos/report/__init__.py
-+++ b/sos/report/__init__.py
-@@ -1059,7 +1059,7 @@ class SoSReport(SoSComponent):
-             # deletion of some dirs but deleting their content
-             for f in os.listdir(self.archive.get_tmp_dir()):
-                 f = os.path.join(self.archive.get_tmp_dir(), f)
--                if os.path.isdir(f):
-+                if os.path.isdir(f) and not os.path.islink(f):
-                     rmtree(f)
-                 else:
-                     os.unlink(f)
--- 
-2.31.1
-
-From 589d47c93257b55bc796ef6ac25b88c974ee3d72 Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Mon, 8 Nov 2021 16:38:24 +0100
-Subject: [PATCH] [report] Calculate sizes of dirs, symlinks and manifest in
- estimate mode
-
-Enhance --estimate-mode to calculate sizes of also:
-- symlinks
-- directories themselves
-- manifest.json file
-
-Use os.lstat() method instead of os.stat() to properly calculate the
-sizes (and not destinations of symlinks, e.g.).
-
-Print five biggest plugins instead of three as sos logs and reports do
-stand as one "plugin" in the list, often.
-
-Resolves: #2752
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/report/__init__.py | 56 +++++++++++++++++++++---------------------
- 1 file changed, 28 insertions(+), 28 deletions(-)
-
-diff --git a/sos/report/__init__.py b/sos/report/__init__.py
-index 10952566..a4c92acc 100644
---- a/sos/report/__init__.py
-+++ b/sos/report/__init__.py
-@@ -1050,8 +1050,7 @@ class SoSReport(SoSComponent):
-             from pathlib import Path
-             tmpdir_path = Path(self.archive.get_tmp_dir())
-             self.estimated_plugsizes[plugin[1]] = sum(
--                    [f.stat().st_size for f in tmpdir_path.glob('**/*')
--                     if (os.path.isfile(f) and not os.path.islink(f))])
-+                    [f.lstat().st_size for f in tmpdir_path.glob('**/*')])
-             # remove whole tmp_dir content - including "sos_commands" and
-             # similar dirs that will be re-created on demand by next plugin
-             # if needed; it is less error-prone approach than skipping
-@@ -1273,6 +1272,33 @@ class SoSReport(SoSComponent):
-                 short_name='manifest.json'
-             )
- 
-+        # print results in estimate mode (to include also just added manifest)
-+        if self.opts.estimate_only:
-+            from sos.utilities import get_human_readable
-+            from pathlib import Path
-+            # add sos_logs, sos_reports dirs, etc., basically everything
-+            # that remained in self.tmpdir after plugins' contents removal
-+            # that still will be moved to the sos report final directory path
-+            tmpdir_path = Path(self.tmpdir)
-+            self.estimated_plugsizes['sos_logs_reports'] = sum(
-+                    [f.lstat().st_size for f in tmpdir_path.glob('**/*')])
-+
-+            _sum = get_human_readable(sum(self.estimated_plugsizes.values()))
-+            self.ui_log.info("Estimated disk space requirement for whole "
-+                             "uncompressed sos report directory: %s" % _sum)
-+            bigplugins = sorted(self.estimated_plugsizes.items(),
-+                                key=lambda x: x[1], reverse=True)[:5]
-+            bp_out = ",  ".join("%s: %s" %
-+                                (p, get_human_readable(v, precision=0))
-+                                for p, v in bigplugins)
-+            self.ui_log.info("Five biggest plugins:  %s" % bp_out)
-+            self.ui_log.info("")
-+            self.ui_log.info("Please note the estimation is relevant to the "
-+                             "current options.")
-+            self.ui_log.info("Be aware that the real disk space requirements "
-+                             "might be different.")
-+            self.ui_log.info("")
-+
-         # package up and compress the results
-         if not self.opts.build:
-             old_umask = os.umask(0o077)
-@@ -1377,32 +1403,6 @@ class SoSReport(SoSComponent):
-             self.policy.display_results(archive, directory, checksum,
-                                         map_file=map_file)
- 
--        if self.opts.estimate_only:
--            from sos.utilities import get_human_readable
--            from pathlib import Path
--            # add sos_logs, sos_reports dirs, etc., basically everything
--            # that remained in self.tmpdir after plugins' contents removal
--            # that still will be moved to the sos report final directory path
--            tmpdir_path = Path(self.tmpdir)
--            self.estimated_plugsizes['sos_logs_reports'] = sum(
--                    [f.stat().st_size for f in tmpdir_path.glob('**/*')])
--
--            _sum = get_human_readable(sum(self.estimated_plugsizes.values()))
--            self.ui_log.info("Estimated disk space requirement for whole "
--                             "uncompressed sos report directory: %s" % _sum)
--            bigplugins = sorted(self.estimated_plugsizes.items(),
--                                key=lambda x: x[1], reverse=True)[:3]
--            bp_out = ",  ".join("%s: %s" %
--                                (p, get_human_readable(v, precision=0))
--                                for p, v in bigplugins)
--            self.ui_log.info("Three biggest plugins:  %s" % bp_out)
--            self.ui_log.info("")
--            self.ui_log.info("Please note the estimation is relevant to the "
--                             "current options.")
--            self.ui_log.info("Be aware that the real disk space requirements "
--                             "might be different.")
--            self.ui_log.info("")
--
-         if self.opts.upload or self.opts.upload_url:
-             if not self.opts.build:
-                 try:
--- 
-2.31.1
-
-From c6a5bbb8d75aadd5c7f76d3f469929aba2cf8060 Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Wed, 5 Jan 2022 10:33:58 +0100
-Subject: [PATCH] [report] Provide better warning about estimate-mode
-
-As --estimate-only calculates disk usage based on `stat` data that
-differs from outputs of other commands like `du`, enhance the warning
-about reliability of the calculated estimation.
-
-Also add a rule-of-thumb recommendation of real disk space requirements.
-
-Resolves: #2815
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- man/en/sos-report.1    | 10 +++++++---
- sos/report/__init__.py |  3 ++-
- 2 files changed, 9 insertions(+), 4 deletions(-)
-
-diff --git a/man/en/sos-report.1 b/man/en/sos-report.1
-index 464a77e54..e34773986 100644
---- a/man/en/sos-report.1
-+++ b/man/en/sos-report.1
-@@ -343,9 +343,13 @@ is available at the end.
- 
- Plugins will be collected sequentially, size of collected files and commands outputs
- will be calculated and the plugin files will be immediatelly deleted prior execution
--of the next plugin. This still can consume whole free disk space, though. Please note,
--size estimations may not be accurate for highly utilized systems due to changes between
--an estimate and a real execution.
-+of the next plugin. This still can consume whole free disk space, though.
-+
-+Please note, size estimations may not be accurate for highly utilized systems due to
-+changes between an estimate and a real execution. Also some difference between
-+estimation (using `stat` command) and other commands used (i.e. `du`).
-+
-+A rule of thumb is to reserve at least double the estimation.
- .TP
- .B \--upload
- If specified, attempt to upload the resulting archive to a vendor defined location.
-diff --git a/sos/report/__init__.py b/sos/report/__init__.py
-index ef61fb344..e0617b45e 100644
---- a/sos/report/__init__.py
-+++ b/sos/report/__init__.py
-@@ -1330,7 +1330,8 @@ def final_work(self):
-             self.ui_log.info("Please note the estimation is relevant to the "
-                              "current options.")
-             self.ui_log.info("Be aware that the real disk space requirements "
--                             "might be different.")
-+                             "might be different. A rule of thumb is to "
-+                             "reserve at least double the estimation.")
-             self.ui_log.info("")
- 
-         # package up and compress the results
-From f22efe044f1f0565b57d6aeca2081a5227e0312c Mon Sep 17 00:00:00 2001
-From: Jake Hunsaker <jhunsake@redhat.com>
-Date: Mon, 14 Feb 2022 09:37:30 -0500
-Subject: [PATCH] [utilities] Don't try to chroot to /
-
-With the recent fix for sysroot being `None` to always being (correctly)
-`/`, we should guard against situations where `sos_get_command_output()`
-would now try to chroot to `/` before running any command. Incidentally,
-this would also cause our unittests to fail if they were run by a
-non-root user.
-
-Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
----
- sos/utilities.py | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/sos/utilities.py b/sos/utilities.py
-index 6b13415b..d782123a 100644
---- a/sos/utilities.py
-+++ b/sos/utilities.py
-@@ -120,7 +120,7 @@ def sos_get_command_output(command, timeout=TIMEOUT_DEFAULT, stderr=False,
-     # closure are caught in the parent (chroot and chdir are bound from
-     # the enclosing scope).
-     def _child_prep_fn():
--        if (chroot):
-+        if chroot and chroot != '/':
-             os.chroot(chroot)
-         if (chdir):
-             os.chdir(chdir)
--- 
-2.34.1
-From 3d064102f8ca6662fd9602512e1cb05cf8746dfd Mon Sep 17 00:00:00 2001
-From: Jake Hunsaker <jhunsake@redhat.com>
-Date: Mon, 27 Sep 2021 19:01:16 -0400
-Subject: [PATCH] [Systemd, Policy] Correct InitSystem chrooting when chroot is
- needed
-
-This commit resolves a situation in which `sos` is being run in a
-container but the `SystemdInit` InitSystem would not properly load
-information from the host, thus causing the `Plugin.is_service*()`
-methods to erroneously fail or return `False`.
-
-Fix this scenario by pulling the `_container_init()` and related logic
-to check for a containerized host sysroot out of the Red Hat specific
-policy and into the base `LinuxPolicy` class so that the init system can
-be initialized with the correct sysroot, which is now used to chroot the
-calls to the relevant `systemctl` commands.
-
-For now, this does impose the use of looking for the `container` env var
-(automatically set by docker, podman, and crio regardless of
-distribution) and the use of the `HOST` env var to read where the host's
-`/` filesystem is mounted within the container. If desired in the
-future, this can be changed to allow policy-specific overrides. For now
-however, this extends host collection via an sos container for all
-distributions currently shipping sos.
-
-Note that this issue only affected the `InitSystem` abstraction for
-loading information about local services, and did not affect init system
-related commands called by plugins as part of those collections.
-
-Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
----
- sos/policies/distros/__init__.py      | 28 ++++++++++++++++++++++++++-
- sos/policies/distros/redhat.py        | 27 +-------------------------
- sos/policies/init_systems/__init__.py | 13 +++++++++++--
- sos/policies/init_systems/systemd.py  |  7 ++++---
- 4 files changed, 43 insertions(+), 32 deletions(-)
-
-diff --git a/sos/policies/distros/__init__.py b/sos/policies/distros/__init__.py
-index f5b9fd5b01..c33a356a75 100644
---- a/sos/policies/distros/__init__.py
-+++ b/sos/policies/distros/__init__.py
-@@ -29,6 +29,10 @@
- except ImportError:
-     REQUESTS_LOADED = False
- 
-+# Container environment variables for detecting if we're in a container
-+ENV_CONTAINER = 'container'
-+ENV_HOST_SYSROOT = 'HOST'
-+
- 
- class LinuxPolicy(Policy):
-     """This policy is meant to be an abc class that provides common
-@@ -69,10 +73,17 @@ def __init__(self, sysroot=None, init=None, probe_runtime=True):
-                                           probe_runtime=probe_runtime)
-         self.init_kernel_modules()
- 
-+        # need to set _host_sysroot before PackageManager()
-+        if sysroot:
-+            self._container_init()
-+            self._host_sysroot = sysroot
-+        else:
-+            sysroot = self._container_init()
-+
-         if init is not None:
-             self.init_system = init
-         elif os.path.isdir("/run/systemd/system/"):
--            self.init_system = SystemdInit()
-+            self.init_system = SystemdInit(chroot=sysroot)
-         else:
-             self.init_system = InitSystem()
- 
-@@ -130,6 +141,21 @@ def get_local_name(self):
-     def sanitize_filename(self, name):
-         return re.sub(r"[^-a-z,A-Z.0-9]", "", name)
- 
-+    def _container_init(self):
-+        """Check if sos is running in a container and perform container
-+        specific initialisation based on ENV_HOST_SYSROOT.
-+        """
-+        if ENV_CONTAINER in os.environ:
-+            if os.environ[ENV_CONTAINER] in ['docker', 'oci', 'podman']:
-+                self._in_container = True
-+        if ENV_HOST_SYSROOT in os.environ:
-+            self._host_sysroot = os.environ[ENV_HOST_SYSROOT]
-+        use_sysroot = self._in_container and self._host_sysroot is not None
-+        if use_sysroot:
-+            host_tmp_dir = os.path.abspath(self._host_sysroot + self._tmp_dir)
-+            self._tmp_dir = host_tmp_dir
-+        return self._host_sysroot if use_sysroot else None
-+
-     def init_kernel_modules(self):
-         """Obtain a list of loaded kernel modules to reference later for plugin
-         enablement and SoSPredicate checks
-diff --git a/sos/policies/distros/redhat.py b/sos/policies/distros/redhat.py
-index b3a84336be..3476e21fb2 100644
---- a/sos/policies/distros/redhat.py
-+++ b/sos/policies/distros/redhat.py
-@@ -17,7 +17,7 @@
- from sos.presets.redhat import (RHEL_PRESETS, ATOMIC_PRESETS, RHV, RHEL,
-                                 CB, RHOSP, RHOCP, RH_CFME, RH_SATELLITE,
-                                 ATOMIC)
--from sos.policies.distros import LinuxPolicy
-+from sos.policies.distros import LinuxPolicy, ENV_HOST_SYSROOT
- from sos.policies.package_managers.rpm import RpmPackageManager
- from sos import _sos as _
- 
-@@ -56,12 +56,6 @@ def __init__(self, sysroot=None, init=None, probe_runtime=True,
-         super(RedHatPolicy, self).__init__(sysroot=sysroot, init=init,
-                                            probe_runtime=probe_runtime)
-         self.usrmove = False
--        # need to set _host_sysroot before PackageManager()
--        if sysroot:
--            self._container_init()
--            self._host_sysroot = sysroot
--        else:
--            sysroot = self._container_init()
- 
-         self.package_manager = RpmPackageManager(chroot=sysroot,
-                                                  remote_exec=remote_exec)
-@@ -140,21 +134,6 @@ def transform_path(path):
-         else:
-             return files
- 
--    def _container_init(self):
--        """Check if sos is running in a container and perform container
--        specific initialisation based on ENV_HOST_SYSROOT.
--        """
--        if ENV_CONTAINER in os.environ:
--            if os.environ[ENV_CONTAINER] in ['docker', 'oci', 'podman']:
--                self._in_container = True
--        if ENV_HOST_SYSROOT in os.environ:
--            self._host_sysroot = os.environ[ENV_HOST_SYSROOT]
--        use_sysroot = self._in_container and self._host_sysroot is not None
--        if use_sysroot:
--            host_tmp_dir = os.path.abspath(self._host_sysroot + self._tmp_dir)
--            self._tmp_dir = host_tmp_dir
--        return self._host_sysroot if use_sysroot else None
--
-     def runlevel_by_service(self, name):
-         from subprocess import Popen, PIPE
-         ret = []
-@@ -183,10 +162,6 @@ def get_tmp_dir(self, opt_tmp_dir):
-         return opt_tmp_dir
- 
- 
--# Container environment variables on Red Hat systems.
--ENV_CONTAINER = 'container'
--ENV_HOST_SYSROOT = 'HOST'
--
- # Legal disclaimer text for Red Hat products
- disclaimer_text = """
- Any information provided to %(vendor)s will be treated in \
-diff --git a/sos/policies/init_systems/__init__.py b/sos/policies/init_systems/__init__.py
-index dd663e6522..beac44cee3 100644
---- a/sos/policies/init_systems/__init__.py
-+++ b/sos/policies/init_systems/__init__.py
-@@ -29,9 +29,14 @@ class InitSystem():
-                       status of services
-     :type query_cmd: ``str``
- 
-+    :param chroot:  Location to chroot to for any command execution, i.e. the
-+                    sysroot if we're running in a container
-+    :type chroot:   ``str`` or ``None``
-+
-     """
- 
--    def __init__(self, init_cmd=None, list_cmd=None, query_cmd=None):
-+    def __init__(self, init_cmd=None, list_cmd=None, query_cmd=None,
-+                 chroot=None):
-         """Initialize a new InitSystem()"""
- 
-         self.services = {}
-@@ -39,6 +44,7 @@ def __init__(self, init_cmd=None, list_cmd=None, query_cmd=None):
-         self.init_cmd = init_cmd
-         self.list_cmd = "%s %s" % (self.init_cmd, list_cmd) or None
-         self.query_cmd = "%s %s" % (self.init_cmd, query_cmd) or None
-+        self.chroot = chroot
- 
-     def is_enabled(self, name):
-         """Check if given service name is enabled
-@@ -108,7 +114,10 @@ def _query_service(self, name):
-         """Query an individual service"""
-         if self.query_cmd:
-             try:
--                return sos_get_command_output("%s %s" % (self.query_cmd, name))
-+                return sos_get_command_output(
-+                    "%s %s" % (self.query_cmd, name),
-+                    chroot=self.chroot
-+                )
-             except Exception:
-                 return None
-         return None
-diff --git a/sos/policies/init_systems/systemd.py b/sos/policies/init_systems/systemd.py
-index 1b138f97b3..76dc57e27f 100644
---- a/sos/policies/init_systems/systemd.py
-+++ b/sos/policies/init_systems/systemd.py
-@@ -15,11 +15,12 @@
- class SystemdInit(InitSystem):
-     """InitSystem abstraction for SystemD systems"""
- 
--    def __init__(self):
-+    def __init__(self, chroot=None):
-         super(SystemdInit, self).__init__(
-             init_cmd='systemctl',
-             list_cmd='list-unit-files --type=service',
--            query_cmd='status'
-+            query_cmd='status',
-+            chroot=chroot
-         )
-         self.load_all_services()
- 
-@@ -30,7 +31,7 @@ def parse_query(self, output):
-         return 'unknown'
- 
-     def load_all_services(self):
--        svcs = shell_out(self.list_cmd).splitlines()[1:]
-+        svcs = shell_out(self.list_cmd, chroot=self.chroot).splitlines()[1:]
-         for line in svcs:
-             try:
-                 name = line.split('.service')[0]
-From e869bc84c714bfc2249bbcb84e14908049ee42c4 Mon Sep 17 00:00:00 2001
-From: Jake Hunsaker <jhunsake@redhat.com>
-Date: Mon, 27 Sep 2021 12:07:08 -0400
-Subject: [PATCH] [Plugin,utilities] Add sysroot wrapper for os.path.join
-
-Adds a wrapper for `os.path.join()` which accounts for non-/ sysroots,
-like we have done previously for other `os.path` methods. Further
-updates `Plugin()` to use this wrapper where appropriate.
-
-Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
----
- sos/report/plugins/__init__.py | 43 +++++++++++++++++-----------------
- sos/utilities.py               |  6 +++++
- 2 files changed, 28 insertions(+), 21 deletions(-)
-
-diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py
-index c635b8de9..1f84bca49 100644
---- a/sos/report/plugins/__init__.py
-+++ b/sos/report/plugins/__init__.py
-@@ -13,7 +13,7 @@
- from sos.utilities import (sos_get_command_output, import_module, grep,
-                            fileobj, tail, is_executable, TIMEOUT_DEFAULT,
-                            path_exists, path_isdir, path_isfile, path_islink,
--                           listdir)
-+                           listdir, path_join)
- 
- import os
- import glob
-@@ -708,19 +708,6 @@ def _log_info(self, msg):
-     def _log_debug(self, msg):
-         self.soslog.debug(self._format_msg(msg))
- 
--    def join_sysroot(self, path):
--        """Join a given path with the configured sysroot
--
--        :param path:    The filesystem path that needs to be joined
--        :type path: ``str``
--
--        :returns: The joined filesystem path
--        :rtype: ``str``
--        """
--        if path[0] == os.sep:
--            path = path[1:]
--        return os.path.join(self.sysroot, path)
--
-     def strip_sysroot(self, path):
-         """Remove the configured sysroot from a filesystem path
- 
-@@ -1176,7 +1163,7 @@ def _copy_dir(self, srcpath):
- 
-     def _get_dest_for_srcpath(self, srcpath):
-         if self.use_sysroot():
--            srcpath = self.join_sysroot(srcpath)
-+            srcpath = self.path_join(srcpath)
-         for copied in self.copied_files:
-             if srcpath == copied["srcpath"]:
-                 return copied["dstpath"]
-@@ -1284,7 +1271,7 @@ def add_forbidden_path(self, forbidden, recursive=False):
-             forbidden = [forbidden]
- 
-         if self.use_sysroot():
--            forbidden = [self.join_sysroot(f) for f in forbidden]
-+            forbidden = [self.path_join(f) for f in forbidden]
- 
-         for forbid in forbidden:
-             self._log_info("adding forbidden path '%s'" % forbid)
-@@ -1438,7 +1425,7 @@ def add_copy_spec(self, copyspecs, sizelimit=None, maxage=None,
-             since = self.get_option('since')
- 
-         logarchive_pattern = re.compile(r'.*((\.(zip|gz|bz2|xz))|[-.][\d]+)$')
--        configfile_pattern = re.compile(r"^%s/*" % self.join_sysroot("etc"))
-+        configfile_pattern = re.compile(r"^%s/*" % self.path_join("etc"))
- 
-         if not self.test_predicate(pred=pred):
-             self._log_info("skipped copy spec '%s' due to predicate (%s)" %
-@@ -1468,7 +1455,7 @@ def add_copy_spec(self, copyspecs, sizelimit=None, maxage=None,
-                 return False
- 
-             if self.use_sysroot():
--                copyspec = self.join_sysroot(copyspec)
-+                copyspec = self.path_join(copyspec)
- 
-             files = self._expand_copy_spec(copyspec)
- 
-@@ -1683,7 +1670,7 @@ def _add_device_cmd(self, cmds, devices, timeout=None, sizelimit=None,
-                 if not _dev_ok:
-                     continue
-                 if prepend_path:
--                    device = os.path.join(prepend_path, device)
-+                    device = self.path_join(prepend_path, device)
-                 _cmd = cmd % {'dev': device}
-                 self._add_cmd_output(cmd=_cmd, timeout=timeout,
-                                      sizelimit=sizelimit, chroot=chroot,
-@@ -2592,7 +2579,7 @@ def __expand(paths):
-                     if self.path_isfile(path) or self.path_islink(path):
-                         found_paths.append(path)
-                     elif self.path_isdir(path) and self.listdir(path):
--                        found_paths.extend(__expand(os.path.join(path, '*')))
-+                        found_paths.extend(__expand(self.path_join(path, '*')))
-                     else:
-                         found_paths.append(path)
-                 except PermissionError:
-@@ -2608,7 +2595,7 @@ def __expand(paths):
-         if (os.access(copyspec, os.R_OK) and self.path_isdir(copyspec) and
-                 self.listdir(copyspec)):
-             # the directory exists and is non-empty, recurse through it
--            copyspec = os.path.join(copyspec, '*')
-+            copyspec = self.path_join(copyspec, '*')
-         expanded = glob.glob(copyspec, recursive=True)
-         recursed_files = []
-         for _path in expanded:
-@@ -2877,6 +2864,20 @@ def listdir(self, path):
-         """
-         return listdir(path, self.commons['cmdlineopts'].sysroot)
- 
-+    def path_join(self, path, *p):
-+        """Helper to call the sos.utilities wrapper that allows the
-+        corresponding `os` call to account for sysroot
-+
-+        :param path:    The leading path passed to os.path.join()
-+        :type path:     ``str``
-+
-+        :param p:       Following path section(s) to be joined with ``path``,
-+                        an empty parameter will result in a path that ends with
-+                        a separator
-+        :type p:        ``str``
-+        """
-+        return path_join(path, *p, sysroot=self.sysroot)
-+
-     def postproc(self):
-         """Perform any postprocessing. To be replaced by a plugin if required.
-         """
-diff --git a/sos/utilities.py b/sos/utilities.py
-index c940e066d..b75751539 100644
---- a/sos/utilities.py
-+++ b/sos/utilities.py
-@@ -242,6 +242,12 @@ def listdir(path, sysroot):
-     return _os_wrapper(path, sysroot, 'listdir', os)
- 
- 
-+def path_join(path, *p, sysroot=os.sep):
-+    if not path.startswith(sysroot):
-+        path = os.path.join(sysroot, path.lstrip(os.sep))
-+    return os.path.join(path, *p)
-+
-+
- class AsyncReader(threading.Thread):
-     """Used to limit command output to a given size without deadlocking
-     sos.
-From 9596473d1779b9c48e9923c220aaf2b8d9b3bebf Mon Sep 17 00:00:00 2001
-From: Jake Hunsaker <jhunsake@redhat.com>
-Date: Thu, 18 Nov 2021 13:17:14 -0500
-Subject: [PATCH] [global] Align sysroot determination and usage across sos
-
-The determination of sysroot - being automatic, user-specified, or
-controlled via environment variables in a container - has gotten muddied
-over time. This has resulted in different parts of the project;
-`Policy`, `Plugin`, `SoSComponent`, etc... to not always be in sync when
-sysroot is not `/`, thus causing varying and unexpected/unintended
-behavior.
-
-Fix this by only determining sysroot within `Policy()` initialization,
-and then using that determination across all aspects of the project that
-use or reference sysroot.
-
-This results in several changes:
-
-- `PackageManager()` will now (again) correctly reference host package
-  lists when sos is run in a container.
-
-- `ContainerRuntime()` is now able to activate when sos is running in a
-  container.
-
-- Plugins will now properly use sysroot for _all_ plugin enablement
-  triggers.
-
-- Plugins, Policy, and SoSComponents now all reference the
-  `self.sysroot` variable, rather than changing between `sysroot`.
-`_host_sysroot`, and `commons['sysroot']`. `_host_sysroot` has been
-removed from `Policy`.
-
-Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
----
- sos/archive.py                    |  2 +-
- sos/component.py                  |  2 +-
- sos/policies/__init__.py          | 11 +----------
- sos/policies/distros/__init__.py  | 33 +++++++++++++++++++------------
- sos/policies/distros/debian.py    |  2 +-
- sos/policies/distros/redhat.py    |  3 +--
- sos/policies/runtimes/__init__.py | 15 +++++++++-----
- sos/policies/runtimes/docker.py   |  4 ++--
- sos/report/__init__.py            |  6 ++----
- sos/report/plugins/__init__.py    | 22 +++++++++++----------
- sos/report/plugins/unpackaged.py  |  7 ++++---
- sos/utilities.py                  | 13 ++++++++----
- 12 files changed, 64 insertions(+), 56 deletions(-)
-
-diff --git a/sos/archive.py b/sos/archive.py
-index b02b247595..e3c68b7789 100644
---- a/sos/archive.py
-+++ b/sos/archive.py
-@@ -153,7 +153,7 @@ def dest_path(self, name):
-         return (os.path.join(self._archive_root, name))
- 
-     def join_sysroot(self, path):
--        if path.startswith(self.sysroot):
-+        if not self.sysroot or path.startswith(self.sysroot):
-             return path
-         if path[0] == os.sep:
-             path = path[1:]
-diff --git a/sos/component.py b/sos/component.py
-index 5ac6e47f4f..dba0aabf2b 100644
---- a/sos/component.py
-+++ b/sos/component.py
-@@ -109,7 +109,7 @@ def __init__(self, parser, parsed_args, cmdline_args):
-             try:
-                 import sos.policies
-                 self.policy = sos.policies.load(sysroot=self.opts.sysroot)
--                self.sysroot = self.policy.host_sysroot()
-+                self.sysroot = self.policy.sysroot
-             except KeyboardInterrupt:
-                 self._exit(0)
-             self._is_root = self.policy.is_root()
-diff --git a/sos/policies/__init__.py b/sos/policies/__init__.py
-index fb8db1d724..ef9188deb4 100644
---- a/sos/policies/__init__.py
-+++ b/sos/policies/__init__.py
-@@ -110,7 +110,6 @@ class Policy(object):
-     presets = {"": PresetDefaults()}
-     presets_path = PRESETS_PATH
-     _in_container = False
--    _host_sysroot = '/'
- 
-     def __init__(self, sysroot=None, probe_runtime=True):
-         """Subclasses that choose to override this initializer should call
-@@ -124,7 +123,7 @@ def __init__(self, sysroot=None, probe_runtime=True):
-         self.package_manager = PackageManager()
-         self.valid_subclasses = [IndependentPlugin]
-         self.set_exec_path()
--        self._host_sysroot = sysroot
-+        self.sysroot = sysroot
-         self.register_presets(GENERIC_PRESETS)
- 
-     def check(self, remote=''):
-@@ -177,14 +176,6 @@ def in_container(self):
-         """
-         return self._in_container
- 
--    def host_sysroot(self):
--        """Get the host's default sysroot
--
--        :returns: Host sysroot
--        :rtype: ``str`` or ``None``
--        """
--        return self._host_sysroot
--
-     def dist_version(self):
-         """
-         Return the OS version
-diff --git a/sos/policies/distros/__init__.py b/sos/policies/distros/__init__.py
-index 7bdc81b852..c69fc1e73c 100644
---- a/sos/policies/distros/__init__.py
-+++ b/sos/policies/distros/__init__.py
-@@ -71,19 +71,18 @@ class LinuxPolicy(Policy):
-     def __init__(self, sysroot=None, init=None, probe_runtime=True):
-         super(LinuxPolicy, self).__init__(sysroot=sysroot,
-                                           probe_runtime=probe_runtime)
--        self.init_kernel_modules()
- 
--        # need to set _host_sysroot before PackageManager()
-         if sysroot:
--            self._container_init()
--            self._host_sysroot = sysroot
-+            self.sysroot = sysroot
-         else:
--            sysroot = self._container_init()
-+            self.sysroot = self._container_init()
-+
-+        self.init_kernel_modules()
- 
-         if init is not None:
-             self.init_system = init
-         elif os.path.isdir("/run/systemd/system/"):
--            self.init_system = SystemdInit(chroot=sysroot)
-+            self.init_system = SystemdInit(chroot=self.sysroot)
-         else:
-             self.init_system = InitSystem()
- 
-@@ -149,27 +148,30 @@ def _container_init(self):
-             if os.environ[ENV_CONTAINER] in ['docker', 'oci', 'podman']:
-                 self._in_container = True
-         if ENV_HOST_SYSROOT in os.environ:
--            self._host_sysroot = os.environ[ENV_HOST_SYSROOT]
--        use_sysroot = self._in_container and self._host_sysroot is not None
-+            _host_sysroot = os.environ[ENV_HOST_SYSROOT]
-+        use_sysroot = self._in_container and _host_sysroot is not None
-         if use_sysroot:
--            host_tmp_dir = os.path.abspath(self._host_sysroot + self._tmp_dir)
-+            host_tmp_dir = os.path.abspath(_host_sysroot + self._tmp_dir)
-             self._tmp_dir = host_tmp_dir
--        return self._host_sysroot if use_sysroot else None
-+        return _host_sysroot if use_sysroot else None
- 
-     def init_kernel_modules(self):
-         """Obtain a list of loaded kernel modules to reference later for plugin
-         enablement and SoSPredicate checks
-         """
-         self.kernel_mods = []
-+        release = os.uname().release
- 
-         # first load modules from lsmod
--        lines = shell_out("lsmod", timeout=0).splitlines()
-+        lines = shell_out("lsmod", timeout=0, chroot=self.sysroot).splitlines()
-         self.kernel_mods.extend([
-             line.split()[0].strip() for line in lines[1:]
-         ])
- 
-         # next, include kernel builtins
--        builtins = "/usr/lib/modules/%s/modules.builtin" % os.uname().release
-+        builtins = self.join_sysroot(
-+            "/usr/lib/modules/%s/modules.builtin" % release
-+        )
-         try:
-             with open(builtins, "r") as mfile:
-                 for line in mfile:
-@@ -186,7 +188,7 @@ def init_kernel_modules(self):
-             'dm_mod': 'CONFIG_BLK_DEV_DM'
-         }
- 
--        booted_config = "/boot/config-%s" % os.uname().release
-+        booted_config = self.join_sysroot("/boot/config-%s" % release)
-         kconfigs = []
-         try:
-             with open(booted_config, "r") as kfile:
-@@ -200,6 +202,11 @@ def init_kernel_modules(self):
-             if config_strings[builtin] in kconfigs:
-                 self.kernel_mods.append(builtin)
- 
-+    def join_sysroot(self, path):
-+        if self.sysroot and self.sysroot != '/':
-+            path = os.path.join(self.sysroot, path.lstrip('/'))
-+        return path
-+
-     def pre_work(self):
-         # this method will be called before the gathering begins
- 
-diff --git a/sos/policies/distros/debian.py b/sos/policies/distros/debian.py
-index 95b389a65e..639fd5eba3 100644
---- a/sos/policies/distros/debian.py
-+++ b/sos/policies/distros/debian.py
-@@ -27,7 +27,7 @@ def __init__(self, sysroot=None, init=None, probe_runtime=True,
-                  remote_exec=None):
-         super(DebianPolicy, self).__init__(sysroot=sysroot, init=init,
-                                            probe_runtime=probe_runtime)
--        self.package_manager = DpkgPackageManager(chroot=sysroot,
-+        self.package_manager = DpkgPackageManager(chroot=self.sysroot,
-                                                   remote_exec=remote_exec)
-         self.valid_subclasses += [DebianPlugin]
- 
-diff --git a/sos/policies/distros/redhat.py b/sos/policies/distros/redhat.py
-index eb44240736..4b14abaf3a 100644
---- a/sos/policies/distros/redhat.py
-+++ b/sos/policies/distros/redhat.py
-@@ -42,7 +42,6 @@ class RedHatPolicy(LinuxPolicy):
-     _redhat_release = '/etc/redhat-release'
-     _tmp_dir = "/var/tmp"
-     _in_container = False
--    _host_sysroot = '/'
-     default_scl_prefix = '/opt/rh'
-     name_pattern = 'friendly'
-     upload_url = None
-@@ -57,7 +56,7 @@ def __init__(self, sysroot=None, init=None, probe_runtime=True,
-                                            probe_runtime=probe_runtime)
-         self.usrmove = False
- 
--        self.package_manager = RpmPackageManager(chroot=sysroot,
-+        self.package_manager = RpmPackageManager(chroot=self.sysroot,
-                                                  remote_exec=remote_exec)
- 
-         self.valid_subclasses += [RedHatPlugin]
-diff --git a/sos/policies/runtimes/__init__.py b/sos/policies/runtimes/__init__.py
-index f28d6a1df3..2e60ad2361 100644
---- a/sos/policies/runtimes/__init__.py
-+++ b/sos/policies/runtimes/__init__.py
-@@ -64,7 +64,7 @@ def check_is_active(self):
-         :returns: ``True`` if the runtime is active, else ``False``
-         :rtype: ``bool``
-         """
--        if is_executable(self.binary):
-+        if is_executable(self.binary, self.policy.sysroot):
-             self.active = True
-             return True
-         return False
-@@ -78,7 +78,7 @@ def get_containers(self, get_all=False):
-         containers = []
-         _cmd = "%s ps %s" % (self.binary, '-a' if get_all else '')
-         if self.active:
--            out = sos_get_command_output(_cmd)
-+            out = sos_get_command_output(_cmd, chroot=self.policy.sysroot)
-             if out['status'] == 0:
-                 for ent in out['output'].splitlines()[1:]:
-                     ent = ent.split()
-@@ -112,8 +112,10 @@ def get_images(self):
-         images = []
-         fmt = '{{lower .Repository}}:{{lower .Tag}} {{lower .ID}}'
-         if self.active:
--            out = sos_get_command_output("%s images --format '%s'"
--                                         % (self.binary, fmt))
-+            out = sos_get_command_output(
-+                "%s images --format '%s'" % (self.binary, fmt),
-+                chroot=self.policy.sysroot
-+            )
-             if out['status'] == 0:
-                 for ent in out['output'].splitlines():
-                     ent = ent.split()
-@@ -129,7 +131,10 @@ def get_volumes(self):
-         """
-         vols = []
-         if self.active:
--            out = sos_get_command_output("%s volume ls" % self.binary)
-+            out = sos_get_command_output(
-+                "%s volume ls" % self.binary,
-+                chroot=self.policy.sysroot
-+            )
-             if out['status'] == 0:
-                 for ent in out['output'].splitlines()[1:]:
-                     ent = ent.split()
-diff --git a/sos/policies/runtimes/docker.py b/sos/policies/runtimes/docker.py
-index 759dfaf6a0..e81f580ec3 100644
---- a/sos/policies/runtimes/docker.py
-+++ b/sos/policies/runtimes/docker.py
-@@ -18,9 +18,9 @@ class DockerContainerRuntime(ContainerRuntime):
-     name = 'docker'
-     binary = 'docker'
- 
--    def check_is_active(self):
-+    def check_is_active(self, sysroot=None):
-         # the daemon must be running
--        if (is_executable('docker') and
-+        if (is_executable('docker', sysroot) and
-                 (self.policy.init_system.is_running('docker') or
-                  self.policy.init_system.is_running('snap.docker.dockerd'))):
-             self.active = True
-diff --git a/sos/report/__init__.py b/sos/report/__init__.py
-index a4c92accd3..a6c72778fc 100644
---- a/sos/report/__init__.py
-+++ b/sos/report/__init__.py
-@@ -173,14 +173,12 @@ def __init__(self, parser, args, cmdline):
-         self._set_directories()
- 
-         msg = "default"
--        host_sysroot = self.policy.host_sysroot()
-+        self.sysroot = self.policy.sysroot
-         # set alternate system root directory
-         if self.opts.sysroot:
-             msg = "cmdline"
--            self.sysroot = self.opts.sysroot
--        elif self.policy.in_container() and host_sysroot != os.sep:
-+        elif self.policy.in_container() and self.sysroot != os.sep:
-             msg = "policy"
--            self.sysroot = host_sysroot
-         self.soslog.debug("set sysroot to '%s' (%s)" % (self.sysroot, msg))
- 
-         if self.opts.chroot not in chroot_modes:
-diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py
-index 46028bb124..e180ae1727 100644
---- a/sos/report/plugins/__init__.py
-+++ b/sos/report/plugins/__init__.py
-@@ -724,7 +724,7 @@ def strip_sysroot(self, path):
-         """
-         if not self.use_sysroot():
-             return path
--        if path.startswith(self.sysroot):
-+        if self.sysroot and path.startswith(self.sysroot):
-             return path[len(self.sysroot):]
-         return path
- 
-@@ -743,8 +743,10 @@ def tmp_in_sysroot(self):
-                   ``False``
-         :rtype: ``bool``
-         """
--        paths = [self.sysroot, self.archive.get_tmp_dir()]
--        return os.path.commonprefix(paths) == self.sysroot
-+        # if sysroot is still None, that implies '/'
-+        _sysroot = self.sysroot or '/'
-+        paths = [_sysroot, self.archive.get_tmp_dir()]
-+        return os.path.commonprefix(paths) == _sysroot
- 
-     def is_installed(self, package_name):
-         """Is the package $package_name installed?
-@@ -2621,7 +2623,7 @@ def __expand(paths):
-         return list(set(expanded))
- 
-     def _collect_copy_specs(self):
--        for path in self.copy_paths:
-+        for path in sorted(self.copy_paths, reverse=True):
-             self._log_info("collecting path '%s'" % path)
-             self._do_copy_path(path)
-         self.generate_copyspec_tags()
-@@ -2749,7 +2751,7 @@ def _check_plugin_triggers(self, files, packages, commands, services,
- 
-         return ((any(self.path_exists(fname) for fname in files) or
-                 any(self.is_installed(pkg) for pkg in packages) or
--                any(is_executable(cmd) for cmd in commands) or
-+                any(is_executable(cmd, self.sysroot) for cmd in commands) or
-                 any(self.is_module_loaded(mod) for mod in self.kernel_mods) or
-                 any(self.is_service(svc) for svc in services) or
-                 any(self.container_exists(cntr) for cntr in containers)) and
-@@ -2817,7 +2819,7 @@ def path_exists(self, path):
-         :returns:           True if the path exists in sysroot, else False
-         :rtype:             ``bool``
-         """
--        return path_exists(path, self.commons['cmdlineopts'].sysroot)
-+        return path_exists(path, self.sysroot)
- 
-     def path_isdir(self, path):
-         """Helper to call the sos.utilities wrapper that allows the
-@@ -2830,7 +2832,7 @@ def path_isdir(self, path):
-         :returns:           True if the path is a dir, else False
-         :rtype:             ``bool``
-         """
--        return path_isdir(path, self.commons['cmdlineopts'].sysroot)
-+        return path_isdir(path, self.sysroot)
- 
-     def path_isfile(self, path):
-         """Helper to call the sos.utilities wrapper that allows the
-@@ -2843,7 +2845,7 @@ def path_isfile(self, path):
-         :returns:           True if the path is a file, else False
-         :rtype:             ``bool``
-         """
--        return path_isfile(path, self.commons['cmdlineopts'].sysroot)
-+        return path_isfile(path, self.sysroot)
- 
-     def path_islink(self, path):
-         """Helper to call the sos.utilities wrapper that allows the
-@@ -2856,7 +2858,7 @@ def path_islink(self, path):
-         :returns:           True if the path is a link, else False
-         :rtype:             ``bool``
-         """
--        return path_islink(path, self.commons['cmdlineopts'].sysroot)
-+        return path_islink(path, self.sysroot)
- 
-     def listdir(self, path):
-         """Helper to call the sos.utilities wrapper that allows the
-@@ -2869,7 +2871,7 @@ def listdir(self, path):
-         :returns:           Contents of path, if it is a directory
-         :rtype:             ``list``
-         """
--        return listdir(path, self.commons['cmdlineopts'].sysroot)
-+        return listdir(path, self.sysroot)
- 
-     def path_join(self, path, *p):
-         """Helper to call the sos.utilities wrapper that allows the
-diff --git a/sos/report/plugins/unpackaged.py b/sos/report/plugins/unpackaged.py
-index 772b1d1fbb..24203c4b13 100644
---- a/sos/report/plugins/unpackaged.py
-+++ b/sos/report/plugins/unpackaged.py
-@@ -58,10 +58,11 @@ def format_output(files):
-             """
-             expanded = []
-             for f in files:
--                if self.path_islink(f):
--                    expanded.append("{} -> {}".format(f, os.readlink(f)))
-+                fp = self.path_join(f)
-+                if self.path_islink(fp):
-+                    expanded.append("{} -> {}".format(fp, os.readlink(fp)))
-                 else:
--                    expanded.append(f)
-+                    expanded.append(fp)
-             return expanded
- 
-         # Check command predicate to avoid costly processing
-diff --git a/sos/utilities.py b/sos/utilities.py
-index b757515397..d66309334b 100644
---- a/sos/utilities.py
-+++ b/sos/utilities.py
-@@ -96,11 +96,15 @@ def grep(pattern, *files_or_paths):
-     return matches
- 
- 
--def is_executable(command):
-+def is_executable(command, sysroot=None):
-     """Returns if a command matches an executable on the PATH"""
- 
-     paths = os.environ.get("PATH", "").split(os.path.pathsep)
-     candidates = [command] + [os.path.join(p, command) for p in paths]
-+    if sysroot:
-+        candidates += [
-+            os.path.join(sysroot, c.lstrip('/')) for c in candidates
-+        ]
-     return any(os.access(path, os.X_OK) for path in candidates)
- 
- 
-@@ -216,8 +220,9 @@ def get_human_readable(size, precision=2):
- 
- 
- def _os_wrapper(path, sysroot, method, module=os.path):
--    if sysroot not in [None, '/']:
--        path = os.path.join(sysroot, path.lstrip('/'))
-+    if sysroot and sysroot != os.sep:
-+        if not path.startswith(sysroot):
-+            path = os.path.join(sysroot, path.lstrip('/'))
-     _meth = getattr(module, method)
-     return _meth(path)
- 
-@@ -243,7 +248,7 @@ def listdir(path, sysroot):
- 
- 
- def path_join(path, *p, sysroot=os.sep):
--    if not path.startswith(sysroot):
-+    if sysroot and not path.startswith(sysroot):
-         path = os.path.join(sysroot, path.lstrip(os.sep))
-     return os.path.join(path, *p)
- 
-From a43124e1f6217107838eed4d70339d100cbbc77a Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Wed, 9 Feb 2022 19:45:27 +0100
-Subject: [PATCH] [policies] Set fallback to None sysroot
-
-9596473 commit added a regression allowing to set sysroot to None
-when running sos report on a regular system (outside a container). In
-such a case, we need to fallback to '/' sysroot.
-
-Resolves: #2846
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/policies/distros/__init__.py | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/sos/policies/distros/__init__.py b/sos/policies/distros/__init__.py
-index f3c1de11..9048f1c4 100644
---- a/sos/policies/distros/__init__.py
-+++ b/sos/policies/distros/__init__.py
-@@ -78,7 +78,7 @@ class LinuxPolicy(Policy):
-         if sysroot:
-             self.sysroot = sysroot
-         else:
--            self.sysroot = self._container_init()
-+            self.sysroot = self._container_init() or '/'
-
-         self.init_kernel_modules()
-
--- 
-2.34.1
-
diff --git a/SOURCES/sos-bz2011538-iptables-save-under-nf_tables-kmod.patch b/SOURCES/sos-bz2011538-iptables-save-under-nf_tables-kmod.patch
deleted file mode 100644
index e234bc6..0000000
--- a/SOURCES/sos-bz2011538-iptables-save-under-nf_tables-kmod.patch
+++ /dev/null
@@ -1,73 +0,0 @@
-From 7d5157aa5071e3620246e2d4aa80acb2d3ed30f0 Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Tue, 28 Sep 2021 22:44:52 +0200
-Subject: [PATCH] [networking] prevent iptables-save commands to load nf_tables
- kmod
-
-If iptables has built-in nf_tables kmod, then
-'ip netns <foo> iptables-save' command requires the kmod which must
-be guarded by predicate.
-
-Analogously for ip6tables.
-
-Resolves: #2703
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/report/plugins/networking.py | 29 ++++++++++++++++++++++++-----
- 1 file changed, 24 insertions(+), 5 deletions(-)
-
-diff --git a/sos/report/plugins/networking.py b/sos/report/plugins/networking.py
-index c80ae719..1237f629 100644
---- a/sos/report/plugins/networking.py
-+++ b/sos/report/plugins/networking.py
-@@ -182,22 +182,41 @@ class Networking(Plugin):
-         # per-namespace.
-         self.add_cmd_output("ip netns")
-         cmd_prefix = "ip netns exec "
--        for namespace in self.get_network_namespaces(
--                            self.get_option("namespace_pattern"),
--                            self.get_option("namespaces")):
-+        namespaces = self.get_network_namespaces(
-+                self.get_option("namespace_pattern"),
-+                self.get_option("namespaces"))
-+        if (namespaces):
-+            # 'ip netns exec <foo> iptables-save' must be guarded by nf_tables
-+            # kmod, if 'iptables -V' output contains 'nf_tables'
-+            # analogously for ip6tables
-+            co = {'cmd': 'iptables -V', 'output': 'nf_tables'}
-+            co6 = {'cmd': 'ip6tables -V', 'output': 'nf_tables'}
-+            iptables_with_nft = (SoSPredicate(self, kmods=['nf_tables'])
-+                                 if self.test_predicate(self,
-+                                 pred=SoSPredicate(self, cmd_outputs=co))
-+                                 else None)
-+            ip6tables_with_nft = (SoSPredicate(self, kmods=['nf_tables'])
-+                                  if self.test_predicate(self,
-+                                  pred=SoSPredicate(self, cmd_outputs=co6))
-+                                  else None)
-+        for namespace in namespaces:
-             ns_cmd_prefix = cmd_prefix + namespace + " "
-             self.add_cmd_output([
-                 ns_cmd_prefix + "ip address show",
-                 ns_cmd_prefix + "ip route show table all",
-                 ns_cmd_prefix + "ip -s -s neigh show",
-                 ns_cmd_prefix + "ip rule list",
--                ns_cmd_prefix + "iptables-save",
--                ns_cmd_prefix + "ip6tables-save",
-                 ns_cmd_prefix + "netstat %s -neopa" % self.ns_wide,
-                 ns_cmd_prefix + "netstat -s",
-                 ns_cmd_prefix + "netstat %s -agn" % self.ns_wide,
-                 ns_cmd_prefix + "nstat -zas",
-             ], priority=50)
-+            self.add_cmd_output([ns_cmd_prefix + "iptables-save"],
-+                                pred=iptables_with_nft,
-+                                priority=50)
-+            self.add_cmd_output([ns_cmd_prefix + "ip6tables-save"],
-+                                pred=ip6tables_with_nft,
-+                                priority=50)
- 
-             ss_cmd = ns_cmd_prefix + "ss -peaonmi"
-             # --allow-system-changes is handled directly in predicate
--- 
-2.31.1
-
diff --git a/SOURCES/sos-bz2012858-dryrun-uncaught-exception.patch b/SOURCES/sos-bz2012858-dryrun-uncaught-exception.patch
deleted file mode 100644
index 619d538..0000000
--- a/SOURCES/sos-bz2012858-dryrun-uncaught-exception.patch
+++ /dev/null
@@ -1,33 +0,0 @@
-From e56b3ea999731b831ebba80cf367e43e65c12b62 Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Mon, 4 Oct 2021 14:43:08 +0200
-Subject: [PATCH] [report] Overwrite pred=None before refering predicate
- attributes
-
-During a dry run, add_journal method sets pred=None whilst log_skipped_cmd
-refers to predicate attributes. In that case, replace None predicate
-by a default / empty predicate.
-
-Resolves: #2711
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/report/plugins/__init__.py | 2 ++
- 1 file changed, 2 insertions(+)
-
-diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py
-index 3c2b64d9..c635b8de 100644
---- a/sos/report/plugins/__init__.py
-+++ b/sos/report/plugins/__init__.py
-@@ -1693,6 +1693,8 @@ class Plugin():
-     def _add_cmd_output(self, **kwargs):
-         """Internal helper to add a single command to the collection list."""
-         pred = kwargs.pop('pred') if 'pred' in kwargs else SoSPredicate(self)
-+        if pred is None:
-+            pred = SoSPredicate(self)
-         if 'priority' not in kwargs:
-             kwargs['priority'] = 10
-         if 'changes' not in kwargs:
--- 
-2.31.1
-
diff --git a/SOURCES/sos-bz2012859-plugin-timeout-unhandled-exception.patch b/SOURCES/sos-bz2012859-plugin-timeout-unhandled-exception.patch
deleted file mode 100644
index e977fb5..0000000
--- a/SOURCES/sos-bz2012859-plugin-timeout-unhandled-exception.patch
+++ /dev/null
@@ -1,31 +0,0 @@
-From a93e118a9c88df52fd2c701d2276185f877d565c Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Wed, 3 Nov 2021 16:07:15 +0100
-Subject: [PATCH] [report] shutdown threads for timeouted plugins
-
-Wait for shutting down threads of timeouted plugins, to prevent
-them in writing to moved auxiliary files like sos_logs/sos.log
-
-Resolves: #2722
-Closes: #2746
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/report/__init__.py | 1 +
- 1 file changed, 1 insertion(+)
-
-diff --git a/sos/report/__init__.py b/sos/report/__init__.py
-index 1b5bc97d..ef86b28d 100644
---- a/sos/report/__init__.py
-+++ b/sos/report/__init__.py
-@@ -1046,6 +1046,7 @@ class SoSReport(SoSComponent):
-                 self.ui_log.error("\n Plugin %s timed out\n" % plugin[1])
-                 self.running_plugs.remove(plugin[1])
-                 self.loaded_plugins[plugin[0]-1][1].set_timeout_hit()
-+                pool.shutdown(wait=True)
-                 pool._threads.clear()
-         if self.opts.estimate_only:
-             from pathlib import Path
--- 
-2.31.1
-
diff --git a/SOURCES/sos-bz2019697-openvswitch-offline-analysis.patch b/SOURCES/sos-bz2019697-openvswitch-offline-analysis.patch
deleted file mode 100644
index 8bd4adb..0000000
--- a/SOURCES/sos-bz2019697-openvswitch-offline-analysis.patch
+++ /dev/null
@@ -1,151 +0,0 @@
-From 3f0ec3e55e7dcec89dd7fad10084ea7f16178608 Mon Sep 17 00:00:00 2001
-From: Salvatore Daniele <sdaniele@redhat.com>
-Date: Tue, 7 Sep 2021 13:48:22 -0400
-Subject: [PATCH 1/2] [openvswitch] add ovs default OpenFlow protocols
-
-ovs-vsctl list bridge can return an empty 'protocol' column even when
-there are OpenFlow protocols in place by default.
-
-ovs-ofctl --version will return the range of supported ofp and should
-also be used to ensure flow information for relevant protocol versions
-is collected.
-
-OpenFlow default versions:
-https://docs.openvswitch.org/en/latest/faq/openflow/
-
-Signed-off-by: Salvatore Daniele <sdaniele@redhat.com>
----
- sos/report/plugins/openvswitch.py | 26 ++++++++++++++++++++++++++
- 1 file changed, 26 insertions(+)
-
-diff --git a/sos/report/plugins/openvswitch.py b/sos/report/plugins/openvswitch.py
-index cd897db2..92cc7259 100644
---- a/sos/report/plugins/openvswitch.py
-+++ b/sos/report/plugins/openvswitch.py
-@@ -206,6 +206,7 @@ class OpenVSwitch(Plugin):
- 
-         # Gather additional output for each OVS bridge on the host.
-         br_list_result = self.collect_cmd_output("ovs-vsctl -t 5 list-br")
-+        ofp_ver_result = self.collect_cmd_output("ovs-ofctl -t 5 --version")
-         if br_list_result['status'] == 0:
-             for br in br_list_result['output'].splitlines():
-                 self.add_cmd_output([
-@@ -232,6 +233,16 @@ class OpenVSwitch(Plugin):
-                     "OpenFlow15"
-                 ]
- 
-+                # Flow protocol hex identifiers
-+                ofp_versions = {
-+                    0x01: "OpenFlow10",
-+                    0x02: "OpenFlow11",
-+                    0x03: "OpenFlow12",
-+                    0x04: "OpenFlow13",
-+                    0x05: "OpenFlow14",
-+                    0x06: "OpenFlow15",
-+                }
-+
-                 # List protocols currently in use, if any
-                 ovs_list_bridge_cmd = "ovs-vsctl -t 5 list bridge %s" % br
-                 br_info = self.collect_cmd_output(ovs_list_bridge_cmd)
-@@ -242,6 +253,21 @@ class OpenVSwitch(Plugin):
-                         br_protos_ln = line[line.find("[")+1:line.find("]")]
-                         br_protos = br_protos_ln.replace('"', '').split(", ")
- 
-+                # If 'list bridge' yeilded no protocols, use the range of
-+                # protocols enabled by default on this version of ovs.
-+                if br_protos == [''] and ofp_ver_result['output']:
-+                    ofp_version_range = ofp_ver_result['output'].splitlines()
-+                    ver_range = []
-+
-+                    for line in ofp_version_range:
-+                        if "OpenFlow versions" in line:
-+                            v = line.split("OpenFlow versions ")[1].split(":")
-+                            ver_range = range(int(v[0], 16), int(v[1], 16)+1)
-+
-+                    for protocol in ver_range:
-+                        if protocol in ofp_versions:
-+                            br_protos.append(ofp_versions[protocol])
-+
-                 # Collect flow information for relevant protocol versions only
-                 for flow in flow_versions:
-                     if flow in br_protos:
--- 
-2.31.1
-
-
-From 5a006024f730213a726c70e82c5ecd2daf685b2b Mon Sep 17 00:00:00 2001
-From: Salvatore Daniele <sdaniele@redhat.com>
-Date: Tue, 7 Sep 2021 14:17:19 -0400
-Subject: [PATCH 2/2] [openvswitch] add commands for offline analysis
-
-Replicas of ovs-vswitchd and ovsdb-server can be recreated offline
-using flow, group, and tlv dumps, and ovs conf.db. This allows for
-offline anaylsis and the use of tools such as ovs-appctl
-ofproto/trace and ovs-ofctl for debugging.
-
-This patch ensures this information is available in the sos report.
-The db is copied rather than collected using ovsdb-client list dump
-for two reasons:
-
-ovsdb-client requires interacting with the ovsdb-server which could
-take it 'down' for some time, and impact large, busy clusters.
-
-The list-dump is not in a format that can be used to restore the db
-offline. All of the information in the list dump is available and more
-by copying the db.
-
-Signed-off-by: Salvatore Daniele <sdaniele@redhat.com>
----
- sos/report/plugins/openvswitch.py | 12 ++++++++++--
- sos/report/plugins/ovn_central.py |  1 +
- 2 files changed, 11 insertions(+), 2 deletions(-)
-
-diff --git a/sos/report/plugins/openvswitch.py b/sos/report/plugins/openvswitch.py
-index 92cc7259..003596c6 100644
---- a/sos/report/plugins/openvswitch.py
-+++ b/sos/report/plugins/openvswitch.py
-@@ -75,12 +75,19 @@ class OpenVSwitch(Plugin):
-             "/run/openvswitch/ovs-monitor-ipsec.pid"
-         ])
- 
-+        self.add_copy_spec([
-+            path_join('/usr/local/etc/openvswitch', 'conf.db'),
-+            path_join('/etc/openvswitch', 'conf.db'),
-+            path_join('/var/lib/openvswitch', 'conf.db'),
-+        ])
-+        ovs_dbdir = environ.get('OVS_DBDIR')
-+        if ovs_dbdir:
-+            self.add_copy_spec(path_join(ovs_dbdir, 'conf.db'))
-+
-         self.add_cmd_output([
-             # The '-t 5' adds an upper bound on how long to wait to connect
-             # to the Open vSwitch server, avoiding hangs when running sos.
-             "ovs-vsctl -t 5 show",
--            # Gather the database.
--            "ovsdb-client -f list dump",
-             # List the contents of important runtime directories
-             "ls -laZ /run/openvswitch",
-             "ls -laZ /dev/hugepages/",
-@@ -276,6 +283,7 @@ class OpenVSwitch(Plugin):
-                             "ovs-ofctl -O %s dump-groups %s" % (flow, br),
-                             "ovs-ofctl -O %s dump-group-stats %s" % (flow, br),
-                             "ovs-ofctl -O %s dump-flows %s" % (flow, br),
-+                            "ovs-ofctl -O %s dump-tlv-map %s" % (flow, br),
-                             "ovs-ofctl -O %s dump-ports-desc %s" % (flow, br)
-                         ])
- 
-diff --git a/sos/report/plugins/ovn_central.py b/sos/report/plugins/ovn_central.py
-index a4c483a9..d6647aad 100644
---- a/sos/report/plugins/ovn_central.py
-+++ b/sos/report/plugins/ovn_central.py
-@@ -138,6 +138,7 @@ class OVNCentral(Plugin):
-                 os.path.join('/usr/local/etc/openvswitch', dbfile),
-                 os.path.join('/etc/openvswitch', dbfile),
-                 os.path.join('/var/lib/openvswitch', dbfile),
-+                os.path.join('/var/lib/ovn/etc', dbfile),
-             ])
-             if ovs_dbdir:
-                 self.add_copy_spec(os.path.join(ovs_dbdir, dbfile))
--- 
-2.31.1
-
diff --git a/SOURCES/sos-bz2020778-filter-namespace-per-pattern.patch b/SOURCES/sos-bz2020778-filter-namespace-per-pattern.patch
deleted file mode 100644
index 5b0afdb..0000000
--- a/SOURCES/sos-bz2020778-filter-namespace-per-pattern.patch
+++ /dev/null
@@ -1,54 +0,0 @@
-From 568eb2fbcf74ecad00d5c06989f55f8a6a9e3516 Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Thu, 4 Nov 2021 23:14:21 +0100
-Subject: [PATCH] [report] fix filter_namespace per pattern
-
-Curently, -k networking.namespace_pattern=.. is broken as the R.E. test
-forgets to add the namespace in case of positive match.
-
-Also ensure both plugopts namespace_pattern and namespaces work
-together.
-
-Resolves: #2748
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/report/plugins/__init__.py | 15 +++++++--------
- 1 file changed, 7 insertions(+), 8 deletions(-)
-
-diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py
-index 3e717993..a0d4e95d 100644
---- a/sos/report/plugins/__init__.py
-+++ b/sos/report/plugins/__init__.py
-@@ -2953,21 +2953,20 @@ class Plugin():
-                 )
-         for ns in ns_list:
-             # if ns_pattern defined, skip namespaces not matching the pattern
--            if ns_pattern:
--                if not bool(re.match(pattern, ns)):
--                    continue
-+            if ns_pattern and not bool(re.match(pattern, ns)):
-+                continue
-+            out_ns.append(ns)
- 
--            # if ns_max is defined at all, limit returned list to that number
-+            # if ns_max is defined at all, break the loop when the limit is
-+            # reached
-             # this allows the use of both '0' and `None` to mean unlimited
--            elif ns_max:
--                out_ns.append(ns)
-+            if ns_max:
-                 if len(out_ns) == ns_max:
-                     self._log_warn("Limiting namespace iteration "
-                                    "to first %s namespaces found"
-                                    % ns_max)
-                     break
--            else:
--                out_ns.append(ns)
-+
-         return out_ns
- 
- 
--- 
-2.31.1
-
diff --git a/SOURCES/sos-bz2023481-plugin-timeouts-proper-handling.patch b/SOURCES/sos-bz2023481-plugin-timeouts-proper-handling.patch
deleted file mode 100644
index 9fc7c3d..0000000
--- a/SOURCES/sos-bz2023481-plugin-timeouts-proper-handling.patch
+++ /dev/null
@@ -1,91 +0,0 @@
-From 3fea9a564c4112d04f6324df0d8b212e78feb5b3 Mon Sep 17 00:00:00 2001
-From: Jake Hunsaker <jhunsake@redhat.com>
-Date: Wed, 3 Nov 2021 11:02:54 -0400
-Subject: [PATCH] [Plugin] Ensure specific plugin timeouts are only set for
- that plugin
-
-It was discovered that setting a specific plugin timeout via the `-k
-$plugin.timeout` option could influence the timeout setting for other
-plugins that are not also having their timeout explicitly set. Fix this
-by moving the default plugin opts into `Plugin.__init__()` so that each
-plugin is ensured a private copy of these default plugin options.
-
-Additionally, add more timeout data to plugin manifest entries to allow
-for better tracking of this setting.
-
-Adds a test case for this scenario.
-
-Closes: #2744
-
-Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
----
- sos/report/__init__.py                   |  2 +-
- sos/report/plugins/__init__.py           | 28 +++++++++++++------
- tests/vendor_tests/redhat/rhbz2018033.py | 35 ++++++++++++++++++++++++
- 3 files changed, 55 insertions(+), 10 deletions(-)
- create mode 100644 tests/vendor_tests/redhat/rhbz2018033.py
-
-diff --git a/sos/report/__init__.py b/sos/report/__init__.py
-index ef86b28d..c95e6300 100644
---- a/sos/report/__init__.py
-+++ b/sos/report/__init__.py
-@@ -766,7 +766,7 @@ class SoSReport(SoSComponent):
-         if self.all_options:
-             self.ui_log.info(_("The following options are available for ALL "
-                                "plugins:"))
--            for opt in self.all_options[0][0]._default_plug_opts:
-+            for opt in self.all_options[0][0].get_default_plugin_opts():
-                 val = opt[3]
-                 if val == -1:
-                     val = TIMEOUT_DEFAULT
-diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py
-index 49f1af27..3e717993 100644
---- a/sos/report/plugins/__init__.py
-+++ b/sos/report/plugins/__init__.py
-@@ -474,12 +474,6 @@ class Plugin(object):
-     # Default predicates
-     predicate = None
-     cmd_predicate = None
--    _default_plug_opts = [
--        ('timeout', 'Timeout in seconds for plugin to finish', 'fast', -1),
--        ('cmd-timeout', 'Timeout in seconds for a command', 'fast', -1),
--        ('postproc', 'Enable post-processing collected plugin data', 'fast',
--         True)
--    ]
- 
-     def __init__(self, commons):
- 
-@@ -506,7 +500,7 @@ class Plugin(object):
-             else logging.getLogger('sos')
- 
-         # add the default plugin opts
--        self.option_list.extend(self._default_plug_opts)
-+        self.option_list.extend(self.get_default_plugin_opts())
- 
-         # get the option list into a dictionary
-         for opt in self.option_list:
-@@ -591,6 +583,14 @@ class Plugin():
-         # Initialise the default --dry-run predicate
-         self.set_predicate(SoSPredicate(self))
- 
-+    def get_default_plugin_opts(self):
-+        return [
-+            ('timeout', 'Timeout in seconds for plugin to finish', 'fast', -1),
-+            ('cmd-timeout', 'Timeout in seconds for a command', 'fast', -1),
-+            ('postproc', 'Enable post-processing collected plugin data', 'fast',
-+             True)
-+        ]
-+
-     def set_plugin_manifest(self, manifest):
-         """Pass in a manifest object to the plugin to write to
- 
-@@ -547,7 +541,9 @@ class Plugin(object):
-         self.manifest.add_field('setup_start', '')
-         self.manifest.add_field('setup_end', '')
-         self.manifest.add_field('setup_time', '')
-+        self.manifest.add_field('timeout', self.timeout)
-         self.manifest.add_field('timeout_hit', False)
-+        self.manifest.add_field('command_timeout', self.cmdtimeout)
-         self.manifest.add_list('commands', [])
-         self.manifest.add_list('files', [])
- 
diff --git a/SOURCES/sos-bz2024893-cleaner-hostnames-improvements.patch b/SOURCES/sos-bz2024893-cleaner-hostnames-improvements.patch
deleted file mode 100644
index b129f9e..0000000
--- a/SOURCES/sos-bz2024893-cleaner-hostnames-improvements.patch
+++ /dev/null
@@ -1,1829 +0,0 @@
-From decd39b7799a0579ea085b0da0728b6eabd49b38 Mon Sep 17 00:00:00 2001
-From: Jake Hunsaker <jhunsake@redhat.com>
-Date: Wed, 1 Sep 2021 00:28:58 -0400
-Subject: [PATCH] [clean] Provide archive abstractions to obfuscate more than
- sos archives
-
-This commit removes the restriction imposed on `sos clean` since its
-introduction in sos-4.0 to only work against known sos report archives
-or build directories. This is because there has been interest in using
-the obfuscation bits of sos in other data-collector projects.
-
-The `SoSObfuscationArchive()` class has been revamped to now be an
-abstraction for different types of archives, and the cleaner logic has
-been updated to leverage this new abstraction rather than assuming we're
-working on an sos archive.
-
-Abstractions are added for our own native use cases - that being `sos
-report` and `sos collect` for at-runtime obfuscation, as well as
-standalone archives previously generated. Further generic abstractions
-are available for plain directories and tarballs however these will not
-provide the same level of coverage as fully supported archive types, as
-is noted in the manpage for sos-clean.
-
-Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
----
- man/en/sos-clean.1                            |  25 ++
- sos/cleaner/__init__.py                       | 308 +++++++++---------
- .../__init__.py}                              |  80 ++++-
- sos/cleaner/archives/generic.py               |  52 +++
- sos/cleaner/archives/sos.py                   | 106 ++++++
- sos/cleaner/parsers/__init__.py               |   6 -
- sos/cleaner/parsers/hostname_parser.py        |   1 -
- sos/cleaner/parsers/ip_parser.py              |   1 -
- sos/cleaner/parsers/keyword_parser.py         |   1 -
- sos/cleaner/parsers/mac_parser.py             |   1 -
- sos/cleaner/parsers/username_parser.py        |   8 -
- tests/cleaner_tests/existing_archive.py       |   7 +
- tests/cleaner_tests/full_report_run.py        |   3 +
- tests/cleaner_tests/report_with_mask.py       |   3 +
- 14 files changed, 423 insertions(+), 179 deletions(-)
- rename sos/cleaner/{obfuscation_archive.py => archives/__init__.py} (81%)
- create mode 100644 sos/cleaner/archives/generic.py
- create mode 100644 sos/cleaner/archives/sos.py
-
-diff --git a/man/en/sos-clean.1 b/man/en/sos-clean.1
-index b77bc63c..54026713 100644
---- a/man/en/sos-clean.1
-+++ b/man/en/sos-clean.1
-@@ -10,6 +10,7 @@ sos clean - Obfuscate sensitive data from one or more sosreports
-     [\-\-jobs]
-     [\-\-no-update]
-     [\-\-keep-binary-files]
-+    [\-\-archive-type]
- 
- .SH DESCRIPTION
- \fBsos clean\fR or \fBsos mask\fR is an sos subcommand used to obfuscate sensitive information from
-@@ -88,6 +89,30 @@ Users should review any archive that keeps binary files in place before sending
- a third party.
- 
- Default: False (remove encountered binary files)
-+.TP
-+.B \-\-archive-type TYPE
-+Specify the type of archive that TARGET was generated as.
-+When sos inspects a TARGET archive, it tries to identify what type of archive it is.
-+For example, it may be a report generated by \fBsos report\fR, or a collection of those
-+reports generated by \fBsos collect\fR, which require separate approaches.
-+
-+This option may be useful if a given TARGET archive is known to be of a specific type,
-+but due to unknown reasons or some malformed/missing information in the archive directly,
-+that is not properly identified by sos.
-+
-+The following are accepted values for this option:
-+
-+    \fBauto\fR          Automatically detect the archive type
-+    \fBreport\fR        An archive generated by \fBsos report\fR
-+    \fBcollect\fR       An archive generated by \fBsos collect\fR
-+
-+The following may also be used, however note that these do not attempt to pre-load
-+any information from the archives into the parsers. This means that, among other limitations,
-+items like host and domain names may not be obfuscated unless an obfuscated mapping already exists
-+on the system from a previous execution.
-+
-+    \fBdata-dir\fR      A plain directory on the filesystem.
-+    \fBtarball\fR       A generic tar archive not associated with any known tool
- 
- .SH SEE ALSO
- .BR sos (1)
-diff --git a/sos/cleaner/__init__.py b/sos/cleaner/__init__.py
-index 6aadfe79..6d2eb483 100644
---- a/sos/cleaner/__init__.py
-+++ b/sos/cleaner/__init__.py
-@@ -12,9 +12,7 @@ import hashlib
- import json
- import logging
- import os
--import re
- import shutil
--import tarfile
- import tempfile
- 
- from concurrent.futures import ThreadPoolExecutor
-@@ -27,7 +25,10 @@ from sos.cleaner.parsers.mac_parser import SoSMacParser
- from sos.cleaner.parsers.hostname_parser import SoSHostnameParser
- from sos.cleaner.parsers.keyword_parser import SoSKeywordParser
- from sos.cleaner.parsers.username_parser import SoSUsernameParser
--from sos.cleaner.obfuscation_archive import SoSObfuscationArchive
-+from sos.cleaner.archives.sos import (SoSReportArchive, SoSReportDirectory,
-+                                      SoSCollectorArchive,
-+                                      SoSCollectorDirectory)
-+from sos.cleaner.archives.generic import DataDirArchive, TarballArchive
- from sos.utilities import get_human_readable
- from textwrap import fill
- 
-@@ -41,6 +42,7 @@ class SoSCleaner(SoSComponent):
-     desc = "Obfuscate sensitive networking information in a report"
- 
-     arg_defaults = {
-+        'archive_type': 'auto',
-         'domains': [],
-         'jobs': 4,
-         'keywords': [],
-@@ -70,6 +72,7 @@ class SoSCleaner(SoSComponent):
-             self.from_cmdline = False
-             if not hasattr(self.opts, 'jobs'):
-                 self.opts.jobs = 4
-+            self.opts.archive_type = 'auto'
-             self.soslog = logging.getLogger('sos')
-             self.ui_log = logging.getLogger('sos_ui')
-             # create the tmp subdir here to avoid a potential race condition
-@@ -92,6 +95,17 @@ class SoSCleaner(SoSComponent):
-             SoSUsernameParser(self.cleaner_mapping, self.opts.usernames)
-         ]
- 
-+        self.archive_types = [
-+            SoSReportDirectory,
-+            SoSReportArchive,
-+            SoSCollectorDirectory,
-+            SoSCollectorArchive,
-+            # make sure these two are always last as they are fallbacks
-+            DataDirArchive,
-+            TarballArchive
-+        ]
-+        self.nested_archive = None
-+
-         self.log_info("Cleaner initialized. From cmdline: %s"
-                       % self.from_cmdline)
- 
-@@ -178,6 +192,11 @@ third party.
-         )
-         clean_grp.add_argument('target', metavar='TARGET',
-                                help='The directory or archive to obfuscate')
-+        clean_grp.add_argument('--archive-type', default='auto',
-+                               choices=['auto', 'report', 'collect',
-+                                        'data-dir', 'tarball'],
-+                               help=('Specify what kind of archive the target '
-+                                     'was generated as'))
-         clean_grp.add_argument('--domains', action='extend', default=[],
-                                help='List of domain names to obfuscate')
-         clean_grp.add_argument('-j', '--jobs', default=4, type=int,
-@@ -218,59 +237,28 @@ third party.
- 
-         In the event the target path is not an archive, abort.
-         """
--        if not tarfile.is_tarfile(self.opts.target):
--            self.ui_log.error(
--                "Invalid target: must be directory or tar archive"
--            )
--            self._exit(1)
--
--        archive = tarfile.open(self.opts.target)
--        self.arc_name = self.opts.target.split('/')[-1].split('.')[:-2][0]
--
--        try:
--            archive.getmember(os.path.join(self.arc_name, 'sos_logs'))
--        except Exception:
--            # this is not an sos archive
--            self.ui_log.error("Invalid target: not an sos archive")
--            self._exit(1)
--
--        # see if there are archives within this archive
--        nested_archives = []
--        for _file in archive.getmembers():
--            if (re.match('sosreport-.*.tar', _file.name.split('/')[-1]) and not
--                    (_file.name.endswith(('.md5', '.sha256')))):
--                nested_archives.append(_file.name.split('/')[-1])
--
--        if nested_archives:
--            self.log_info("Found nested archive(s), extracting top level")
--            nested_path = self.extract_archive(archive)
--            for arc_file in os.listdir(nested_path):
--                if re.match('sosreport.*.tar.*', arc_file):
--                    if arc_file.endswith(('.md5', '.sha256')):
--                        continue
--                    self.report_paths.append(os.path.join(nested_path,
--                                                          arc_file))
--            # add the toplevel extracted archive
--            self.report_paths.append(nested_path)
-+        _arc = None
-+        if self.opts.archive_type != 'auto':
-+            check_type = self.opts.archive_type.replace('-', '_')
-+            for archive in self.archive_types:
-+                if archive.type_name == check_type:
-+                    _arc = archive(self.opts.target, self.tmpdir)
-         else:
--            self.report_paths.append(self.opts.target)
--
--        archive.close()
--
--    def extract_archive(self, archive):
--        """Extract an archive into our tmpdir so that we may inspect it or
--        iterate through its contents for obfuscation
--
--        Positional arguments:
--
--            :param archive:     An open TarFile object for the archive
--
--        """
--        if not isinstance(archive, tarfile.TarFile):
--            archive = tarfile.open(archive)
--        path = os.path.join(self.tmpdir, 'cleaner')
--        archive.extractall(path)
--        return os.path.join(path, archive.name.split('/')[-1].split('.tar')[0])
-+            for arc in self.archive_types:
-+                if arc.check_is_type(self.opts.target):
-+                    _arc = arc(self.opts.target, self.tmpdir)
-+                    break
-+        if not _arc:
-+            return
-+        self.report_paths.append(_arc)
-+        if _arc.is_nested:
-+            self.report_paths.extend(_arc.get_nested_archives())
-+            # We need to preserve the top level archive until all
-+            # nested archives are processed
-+            self.report_paths.remove(_arc)
-+            self.nested_archive = _arc
-+        if self.nested_archive:
-+            self.nested_archive.ui_name = self.nested_archive.description
- 
-     def execute(self):
-         """SoSCleaner will begin by inspecting the TARGET option to determine
-@@ -283,6 +271,7 @@ third party.
-         be unpacked, cleaned, and repacked and the final top-level archive will
-         then be repacked as well.
-         """
-+        self.arc_name = self.opts.target.split('/')[-1].split('.tar')[0]
-         if self.from_cmdline:
-             self.print_disclaimer()
-         self.report_paths = []
-@@ -290,23 +279,11 @@ third party.
-             self.ui_log.error("Invalid target: no such file or directory %s"
-                               % self.opts.target)
-             self._exit(1)
--        if os.path.isdir(self.opts.target):
--            self.arc_name = self.opts.target.split('/')[-1]
--            for _file in os.listdir(self.opts.target):
--                if _file == 'sos_logs':
--                    self.report_paths.append(self.opts.target)
--                if (_file.startswith('sosreport') and
--                   (_file.endswith(".tar.gz") or _file.endswith(".tar.xz"))):
--                    self.report_paths.append(os.path.join(self.opts.target,
--                                                          _file))
--            if not self.report_paths:
--                self.ui_log.error("Invalid target: not an sos directory")
--                self._exit(1)
--        else:
--            self.inspect_target_archive()
-+
-+        self.inspect_target_archive()
- 
-         if not self.report_paths:
--            self.ui_log.error("No valid sos archives or directories found\n")
-+            self.ui_log.error("No valid archives or directories found\n")
-             self._exit(1)
- 
-         # we have at least one valid target to obfuscate
-@@ -334,33 +311,7 @@ third party.
- 
-         final_path = None
-         if len(self.completed_reports) > 1:
--            # we have an archive of archives, so repack the obfuscated tarball
--            arc_name = self.arc_name + '-obfuscated'
--            self.setup_archive(name=arc_name)
--            for arc in self.completed_reports:
--                if arc.is_tarfile:
--                    arc_dest = self.obfuscate_string(
--                        arc.final_archive_path.split('/')[-1]
--                    )
--                    self.archive.add_file(arc.final_archive_path,
--                                          dest=arc_dest)
--                    checksum = self.get_new_checksum(arc.final_archive_path)
--                    if checksum is not None:
--                        dname = self.obfuscate_string(
--                            "checksums/%s.%s" % (arc_dest, self.hash_name)
--                        )
--                        self.archive.add_string(checksum, dest=dname)
--                else:
--                    for dirname, dirs, files in os.walk(arc.archive_path):
--                        for filename in files:
--                            if filename.startswith('sosreport'):
--                                continue
--                            fname = os.path.join(dirname, filename)
--                            dnm = self.obfuscate_string(
--                                fname.split(arc.archive_name)[-1].lstrip('/')
--                            )
--                            self.archive.add_file(fname, dest=dnm)
--            arc_path = self.archive.finalize(self.opts.compression_type)
-+            arc_path = self.rebuild_nested_archive()
-         else:
-             arc = self.completed_reports[0]
-             arc_path = arc.final_archive_path
-@@ -371,8 +322,7 @@ third party.
-                 )
-                 with open(os.path.join(self.sys_tmp, chksum_name), 'w') as cf:
-                     cf.write(checksum)
--
--        self.write_cleaner_log()
-+            self.write_cleaner_log()
- 
-         final_path = self.obfuscate_string(
-             os.path.join(self.sys_tmp, arc_path.split('/')[-1])
-@@ -393,6 +343,30 @@ third party.
- 
-         self.cleanup()
- 
-+    def rebuild_nested_archive(self):
-+        """Handles repacking the nested tarball, now containing only obfuscated
-+        copies of the reports, log files, manifest, etc...
-+        """
-+        # we have an archive of archives, so repack the obfuscated tarball
-+        arc_name = self.arc_name + '-obfuscated'
-+        self.setup_archive(name=arc_name)
-+        for archive in self.completed_reports:
-+            arc_dest = archive.final_archive_path.split('/')[-1]
-+            checksum = self.get_new_checksum(archive.final_archive_path)
-+            if checksum is not None:
-+                dname = "checksums/%s.%s" % (arc_dest, self.hash_name)
-+                self.archive.add_string(checksum, dest=dname)
-+        for dirn, dirs, files in os.walk(self.nested_archive.extracted_path):
-+            for filename in files:
-+                fname = os.path.join(dirn, filename)
-+                dname = fname.split(self.nested_archive.extracted_path)[-1]
-+                dname = dname.lstrip('/')
-+                self.archive.add_file(fname, dest=dname)
-+                # remove it now so we don't balloon our fs space needs
-+                os.remove(fname)
-+        self.write_cleaner_log(archive=True)
-+        return self.archive.finalize(self.opts.compression_type)
-+
-     def compile_mapping_dict(self):
-         """Build a dict that contains each parser's map as a key, with the
-         contents as that key's value. This will then be written to disk in the
-@@ -441,7 +415,7 @@ third party.
-                 self.log_error("Could not update mapping config file: %s"
-                                % err)
- 
--    def write_cleaner_log(self):
-+    def write_cleaner_log(self, archive=False):
-         """When invoked via the command line, the logging from SoSCleaner will
-         not be added to the archive(s) it processes, so we need to write it
-         separately to disk
-@@ -454,6 +428,10 @@ third party.
-             for line in self.sos_log_file.readlines():
-                 logfile.write(line)
- 
-+        if archive:
-+            self.obfuscate_file(log_name)
-+            self.archive.add_file(log_name, dest="sos_logs/cleaner.log")
-+
-     def get_new_checksum(self, archive_path):
-         """Calculate a new checksum for the obfuscated archive, as the previous
-         checksum will no longer be valid
-@@ -481,11 +459,11 @@ third party.
-         be obfuscated concurrently.
-         """
-         try:
--            if len(self.report_paths) > 1:
--                msg = ("Found %s total reports to obfuscate, processing up to "
--                       "%s concurrently\n"
--                       % (len(self.report_paths), self.opts.jobs))
--                self.ui_log.info(msg)
-+            msg = (
-+                "Found %s total reports to obfuscate, processing up to %s "
-+                "concurrently\n" % (len(self.report_paths), self.opts.jobs)
-+            )
-+            self.ui_log.info(msg)
-             if self.opts.keep_binary_files:
-                 self.ui_log.warning(
-                     "WARNING: binary files that potentially contain sensitive "
-@@ -494,53 +472,67 @@ third party.
-             pool = ThreadPoolExecutor(self.opts.jobs)
-             pool.map(self.obfuscate_report, self.report_paths, chunksize=1)
-             pool.shutdown(wait=True)
-+            # finally, obfuscate the nested archive if one exists
-+            if self.nested_archive:
-+                self._replace_obfuscated_archives()
-+                self.obfuscate_report(self.nested_archive)
-         except KeyboardInterrupt:
-             self.ui_log.info("Exiting on user cancel")
-             os._exit(130)
- 
-+    def _replace_obfuscated_archives(self):
-+        """When we have a nested archive, we need to rebuild the original
-+        archive, which entails replacing the existing archives with their
-+        obfuscated counterparts
-+        """
-+        for archive in self.completed_reports:
-+            os.remove(archive.archive_path)
-+            dest = self.nested_archive.extracted_path
-+            tarball = archive.final_archive_path.split('/')[-1]
-+            dest_name = os.path.join(dest, tarball)
-+            shutil.move(archive.final_archive_path, dest)
-+            archive.final_archive_path = dest_name
-+
-     def preload_all_archives_into_maps(self):
-         """Before doing the actual obfuscation, if we have multiple archives
-         to obfuscate then we need to preload each of them into the mappings
-         to ensure that node1 is obfuscated in node2 as well as node2 being
-         obfuscated in node1's archive.
-         """
--        self.log_info("Pre-loading multiple archives into obfuscation maps")
-+        self.log_info("Pre-loading all archives into obfuscation maps")
-         for _arc in self.report_paths:
--            is_dir = os.path.isdir(_arc)
--            if is_dir:
--                _arc_name = _arc
--            else:
--                archive = tarfile.open(_arc)
--                _arc_name = _arc.split('/')[-1].split('.tar')[0]
--            # for each parser, load the map_prep_file into memory, and then
--            # send that for obfuscation. We don't actually obfuscate the file
--            # here, do that in the normal archive loop
-             for _parser in self.parsers:
--                if not _parser.prep_map_file:
-+                try:
-+                    pfile = _arc.prep_files[_parser.name.lower().split()[0]]
-+                    if not pfile:
-+                        continue
-+                except (IndexError, KeyError):
-                     continue
--                if isinstance(_parser.prep_map_file, str):
--                    _parser.prep_map_file = [_parser.prep_map_file]
--                for parse_file in _parser.prep_map_file:
--                    _arc_path = os.path.join(_arc_name, parse_file)
-+                if isinstance(pfile, str):
-+                    pfile = [pfile]
-+                for parse_file in pfile:
-+                    self.log_debug("Attempting to load %s" % parse_file)
-                     try:
--                        if is_dir:
--                            _pfile = open(_arc_path, 'r')
--                            content = _pfile.read()
--                        else:
--                            _pfile = archive.extractfile(_arc_path)
--                            content = _pfile.read().decode('utf-8')
--                        _pfile.close()
-+                        content = _arc.get_file_content(parse_file)
-+                        if not content:
-+                            continue
-                         if isinstance(_parser, SoSUsernameParser):
-                             _parser.load_usernames_into_map(content)
--                        for line in content.splitlines():
--                            if isinstance(_parser, SoSHostnameParser):
--                                _parser.load_hostname_into_map(line)
--                            self.obfuscate_line(line)
-+                        elif isinstance(_parser, SoSHostnameParser):
-+                            _parser.load_hostname_into_map(
-+                                content.splitlines()[0]
-+                            )
-+                        else:
-+                            for line in content.splitlines():
-+                                self.obfuscate_line(line)
-                     except Exception as err:
--                        self.log_debug("Could not prep %s: %s"
--                                       % (_arc_path, err))
-+                        self.log_info(
-+                            "Could not prepare %s from %s (archive: %s): %s"
-+                            % (_parser.name, parse_file, _arc.archive_name,
-+                               err)
-+                        )
- 
--    def obfuscate_report(self, report):
-+    def obfuscate_report(self, archive):
-         """Individually handle each archive or directory we've discovered by
-         running through each file therein.
- 
-@@ -549,17 +541,12 @@ third party.
-             :param report str:      Filepath to the directory or archive
-         """
-         try:
--            if not os.access(report, os.W_OK):
--                msg = "Insufficient permissions on %s" % report
--                self.log_info(msg)
--                self.ui_log.error(msg)
--                return
--
--            archive = SoSObfuscationArchive(report, self.tmpdir)
-             arc_md = self.cleaner_md.add_section(archive.archive_name)
-             start_time = datetime.now()
-             arc_md.add_field('start_time', start_time)
--            archive.extract()
-+            # don't double extract nested archives
-+            if not archive.is_extracted:
-+                archive.extract()
-             archive.report_msg("Beginning obfuscation...")
- 
-             file_list = archive.get_file_list()
-@@ -586,27 +573,28 @@ third party.
-                               caller=archive.archive_name)
- 
-             # if the archive was already a tarball, repack it
--            method = archive.get_compression()
--            if method:
--                archive.report_msg("Re-compressing...")
--                try:
--                    archive.rename_top_dir(
--                        self.obfuscate_string(archive.archive_name)
--                    )
--                    archive.compress(method)
--                except Exception as err:
--                    self.log_debug("Archive %s failed to compress: %s"
--                                   % (archive.archive_name, err))
--                    archive.report_msg("Failed to re-compress archive: %s"
--                                       % err)
--                    return
-+            if not archive.is_nested:
-+                method = archive.get_compression()
-+                if method:
-+                    archive.report_msg("Re-compressing...")
-+                    try:
-+                        archive.rename_top_dir(
-+                            self.obfuscate_string(archive.archive_name)
-+                        )
-+                        archive.compress(method)
-+                    except Exception as err:
-+                        self.log_debug("Archive %s failed to compress: %s"
-+                                       % (archive.archive_name, err))
-+                        archive.report_msg("Failed to re-compress archive: %s"
-+                                           % err)
-+                        return
-+                self.completed_reports.append(archive)
- 
-             end_time = datetime.now()
-             arc_md.add_field('end_time', end_time)
-             arc_md.add_field('run_time', end_time - start_time)
-             arc_md.add_field('files_obfuscated', len(archive.file_sub_list))
-             arc_md.add_field('total_substitutions', archive.total_sub_count)
--            self.completed_reports.append(archive)
-             rmsg = ''
-             if archive.removed_file_count:
-                 rmsg = " [removed %s unprocessable files]"
-@@ -615,7 +603,7 @@ third party.
- 
-         except Exception as err:
-             self.ui_log.info("Exception while processing %s: %s"
--                             % (report, err))
-+                             % (archive.archive_name, err))
- 
-     def obfuscate_file(self, filename, short_name=None, arc_name=None):
-         """Obfuscate and individual file, line by line.
-@@ -635,6 +623,8 @@ third party.
-             # the requested file doesn't exist in the archive
-             return
-         subs = 0
-+        if not short_name:
-+            short_name = filename.split('/')[-1]
-         if not os.path.islink(filename):
-             # don't run the obfuscation on the link, but on the actual file
-             # at some other point.
-@@ -745,3 +735,5 @@ third party.
-         for parser in self.parsers:
-             _sec = parse_sec.add_section(parser.name.replace(' ', '_').lower())
-             _sec.add_field('entries', len(parser.mapping.dataset.keys()))
-+
-+# vim: set et ts=4 sw=4 :
-diff --git a/sos/cleaner/obfuscation_archive.py b/sos/cleaner/archives/__init__.py
-similarity index 81%
-rename from sos/cleaner/obfuscation_archive.py
-rename to sos/cleaner/archives/__init__.py
-index ea0b7012..795c5a78 100644
---- a/sos/cleaner/obfuscation_archive.py
-+++ b/sos/cleaner/archives/__init__.py
-@@ -40,6 +40,10 @@ class SoSObfuscationArchive():
-     file_sub_list = []
-     total_sub_count = 0
-     removed_file_count = 0
-+    type_name = 'undetermined'
-+    description = 'undetermined'
-+    is_nested = False
-+    prep_files = {}
- 
-     def __init__(self, archive_path, tmpdir):
-         self.archive_path = archive_path
-@@ -50,7 +54,43 @@ class SoSObfuscationArchive():
-         self.soslog = logging.getLogger('sos')
-         self.ui_log = logging.getLogger('sos_ui')
-         self.skip_list = self._load_skip_list()
--        self.log_info("Loaded %s as an archive" % self.archive_path)
-+        self.is_extracted = False
-+        self._load_self()
-+        self.archive_root = ''
-+        self.log_info(
-+            "Loaded %s as type %s"
-+            % (self.archive_path, self.description)
-+        )
-+
-+    @classmethod
-+    def check_is_type(cls, arc_path):
-+        """Check if the archive is a well-known type we directly support"""
-+        return False
-+
-+    def _load_self(self):
-+        if self.is_tarfile:
-+            self.tarobj = tarfile.open(self.archive_path)
-+
-+    def get_nested_archives(self):
-+        """Return a list of ObfuscationArchives that represent additional
-+        archives found within the target archive. For example, an archive from
-+        `sos collect` will return a list of ``SoSReportArchive`` objects.
-+
-+        This should be overridden by individual types of ObfuscationArchive's
-+        """
-+        return []
-+
-+    def get_archive_root(self):
-+        """Set the root path for the archive that should be prepended to any
-+        filenames given to methods in this class.
-+        """
-+        if self.is_tarfile:
-+            toplevel = self.tarobj.firstmember
-+            if toplevel.isdir():
-+                return toplevel.name
-+            else:
-+                return os.sep
-+        return os.path.abspath(self.archive_path)
- 
-     def report_msg(self, msg):
-         """Helper to easily format ui messages on a per-report basis"""
-@@ -96,10 +136,42 @@ class SoSObfuscationArchive():
-             os.remove(full_fname)
-             self.removed_file_count += 1
- 
--    def extract(self):
-+    def format_file_name(self, fname):
-+        """Based on the type of archive we're dealing with, do whatever that
-+        archive requires to a provided **relative** filepath to be able to
-+        access it within the archive
-+        """
-+        if not self.is_extracted:
-+            if not self.archive_root:
-+                self.archive_root = self.get_archive_root()
-+            return os.path.join(self.archive_root, fname)
-+        else:
-+            return os.path.join(self.extracted_path, fname)
-+
-+    def get_file_content(self, fname):
-+        """Return the content from the specified fname. Particularly useful for
-+        tarball-type archives so we can retrieve prep file contents prior to
-+        extracting the entire archive
-+        """
-+        if self.is_extracted is False and self.is_tarfile:
-+            filename = self.format_file_name(fname)
-+            try:
-+                return self.tarobj.extractfile(filename).read().decode('utf-8')
-+            except KeyError:
-+                self.log_debug(
-+                    "Unable to retrieve %s: no such file in archive" % fname
-+                )
-+                return ''
-+        else:
-+            with open(self.format_file_name(fname), 'r') as to_read:
-+                return to_read.read()
-+
-+    def extract(self, quiet=False):
-         if self.is_tarfile:
--            self.report_msg("Extracting...")
-+            if not quiet:
-+                self.report_msg("Extracting...")
-             self.extracted_path = self.extract_self()
-+            self.is_extracted = True
-         else:
-             self.extracted_path = self.archive_path
-         # if we're running as non-root (e.g. collector), then we can have a
-@@ -317,3 +389,5 @@ class SoSObfuscationArchive():
-                 return False
-             except UnicodeDecodeError:
-                 return True
-+
-+# vim: set et ts=4 sw=4 :
-diff --git a/sos/cleaner/archives/generic.py b/sos/cleaner/archives/generic.py
-new file mode 100644
-index 00000000..2ce6f09b
---- /dev/null
-+++ b/sos/cleaner/archives/generic.py
-@@ -0,0 +1,52 @@
-+# Copyright 2020 Red Hat, Inc. Jake Hunsaker <jhunsake@redhat.com>
-+
-+# This file is part of the sos project: https://github.com/sosreport/sos
-+#
-+# This copyrighted material is made available to anyone wishing to use,
-+# modify, copy, or redistribute it subject to the terms and conditions of
-+# version 2 of the GNU General Public License.
-+#
-+# See the LICENSE file in the source distribution for further information.
-+
-+
-+from sos.cleaner.archives import SoSObfuscationArchive
-+
-+import os
-+import tarfile
-+
-+
-+class DataDirArchive(SoSObfuscationArchive):
-+    """A plain directory on the filesystem that is not directly associated with
-+    any known or supported collection utility
-+    """
-+
-+    type_name = 'data_dir'
-+    description = 'unassociated directory'
-+
-+    @classmethod
-+    def check_is_type(cls, arc_path):
-+        return os.path.isdir(arc_path)
-+
-+    def set_archive_root(self):
-+        return os.path.abspath(self.archive_path)
-+
-+
-+class TarballArchive(SoSObfuscationArchive):
-+    """A generic tar archive that is not associated with any known or supported
-+    collection utility
-+    """
-+
-+    type_name = 'tarball'
-+    description = 'unassociated tarball'
-+
-+    @classmethod
-+    def check_is_type(cls, arc_path):
-+        try:
-+            return tarfile.is_tarfile(arc_path)
-+        except Exception:
-+            return False
-+
-+    def set_archive_root(self):
-+        if self.tarobj.firstmember.isdir():
-+            return self.tarobj.firstmember.name
-+        return ''
-diff --git a/sos/cleaner/archives/sos.py b/sos/cleaner/archives/sos.py
-new file mode 100644
-index 00000000..4401d710
---- /dev/null
-+++ b/sos/cleaner/archives/sos.py
-@@ -0,0 +1,106 @@
-+# Copyright 2021 Red Hat, Inc. Jake Hunsaker <jhunsake@redhat.com>
-+
-+# This file is part of the sos project: https://github.com/sosreport/sos
-+#
-+# This copyrighted material is made available to anyone wishing to use,
-+# modify, copy, or redistribute it subject to the terms and conditions of
-+# version 2 of the GNU General Public License.
-+#
-+# See the LICENSE file in the source distribution for further information.
-+
-+
-+from sos.cleaner.archives import SoSObfuscationArchive
-+
-+import os
-+import tarfile
-+
-+
-+class SoSReportArchive(SoSObfuscationArchive):
-+    """This is the class representing an sos report, or in other words the
-+    type the archive the SoS project natively generates
-+    """
-+
-+    type_name = 'report'
-+    description = 'sos report archive'
-+    prep_files = {
-+        'hostname': 'sos_commands/host/hostname',
-+        'ip': 'sos_commands/networking/ip_-o_addr',
-+        'mac': 'sos_commands/networking/ip_-d_address',
-+        'username': [
-+            'sos_commands/login/lastlog_-u_1000-60000',
-+            'sos_commands/login/lastlog_-u_60001-65536',
-+            'sos_commands/login/lastlog_-u_65537-4294967295',
-+            # AD users will be reported here, but favor the lastlog files since
-+            # those will include local users who have not logged in
-+            'sos_commands/login/last'
-+        ]
-+    }
-+
-+    @classmethod
-+    def check_is_type(cls, arc_path):
-+        try:
-+            return tarfile.is_tarfile(arc_path) and 'sosreport-' in arc_path
-+        except Exception:
-+            return False
-+
-+
-+class SoSReportDirectory(SoSReportArchive):
-+    """This is the archive class representing a build directory, or in other
-+    words what `sos report --clean` will end up using for in-line obfuscation
-+    """
-+
-+    type_name = 'report_dir'
-+    description = 'sos report directory'
-+
-+    @classmethod
-+    def check_is_type(cls, arc_path):
-+        if os.path.isdir(arc_path):
-+            return 'sos_logs' in os.listdir(arc_path)
-+        return False
-+
-+
-+class SoSCollectorArchive(SoSObfuscationArchive):
-+    """Archive class representing the tarball created by ``sos collect``. It
-+    will not provide prep files on its own, however it will provide a list
-+    of SoSReportArchive's which will then be used to prep the parsers
-+    """
-+
-+    type_name = 'collect'
-+    description = 'sos collect tarball'
-+    is_nested = True
-+
-+    @classmethod
-+    def check_is_type(cls, arc_path):
-+        try:
-+            return (tarfile.is_tarfile(arc_path) and 'sos-collect' in arc_path)
-+        except Exception:
-+            return False
-+
-+    def get_nested_archives(self):
-+        self.extract(quiet=True)
-+        _path = self.extracted_path
-+        archives = []
-+        for fname in os.listdir(_path):
-+            arc_name = os.path.join(_path, fname)
-+            if 'sosreport-' in fname and tarfile.is_tarfile(arc_name):
-+                archives.append(SoSReportArchive(arc_name, self.tmpdir))
-+        return archives
-+
-+
-+class SoSCollectorDirectory(SoSCollectorArchive):
-+    """The archive class representing the temp directory used by ``sos
-+    collect`` when ``--clean`` is used during runtime.
-+    """
-+
-+    type_name = 'collect_dir'
-+    description = 'sos collect directory'
-+
-+    @classmethod
-+    def check_is_type(cls, arc_path):
-+        if os.path.isdir(arc_path):
-+            for fname in os.listdir(arc_path):
-+                if 'sos-collector-' in fname:
-+                    return True
-+        return False
-+
-+# vim: set et ts=4 sw=4 :
-diff --git a/sos/cleaner/parsers/__init__.py b/sos/cleaner/parsers/__init__.py
-index af6e375e..e62fd938 100644
---- a/sos/cleaner/parsers/__init__.py
-+++ b/sos/cleaner/parsers/__init__.py
-@@ -37,11 +37,6 @@ class SoSCleanerParser():
-     :cvar map_file_key: The key in the ``map_file`` to read when loading
-                         previous obfuscation matches
-     :vartype map_file_key: ``str``
--
--
--    :cvar prep_map_file: File to read from an archive to pre-seed the map with
--                         matches. E.G. ip_addr for loading IP addresses
--    :vartype prep_map_fie: ``str``
-     """
- 
-     name = 'Undefined Parser'
-@@ -49,7 +44,6 @@ class SoSCleanerParser():
-     skip_line_patterns = []
-     skip_files = []
-     map_file_key = 'unset'
--    prep_map_file = []
- 
-     def __init__(self, config={}):
-         if self.map_file_key in config:
-diff --git a/sos/cleaner/parsers/hostname_parser.py b/sos/cleaner/parsers/hostname_parser.py
-index 71e13d3f..daa76a62 100644
---- a/sos/cleaner/parsers/hostname_parser.py
-+++ b/sos/cleaner/parsers/hostname_parser.py
-@@ -16,7 +16,6 @@ class SoSHostnameParser(SoSCleanerParser):
- 
-     name = 'Hostname Parser'
-     map_file_key = 'hostname_map'
--    prep_map_file = 'sos_commands/host/hostname'
-     regex_patterns = [
-         r'(((\b|_)[a-zA-Z0-9-\.]{1,200}\.[a-zA-Z]{1,63}(\b|_)))'
-     ]
-diff --git a/sos/cleaner/parsers/ip_parser.py b/sos/cleaner/parsers/ip_parser.py
-index 525139e8..71d38be8 100644
---- a/sos/cleaner/parsers/ip_parser.py
-+++ b/sos/cleaner/parsers/ip_parser.py
-@@ -41,7 +41,6 @@ class SoSIPParser(SoSCleanerParser):
-     ]
- 
-     map_file_key = 'ip_map'
--    prep_map_file = 'sos_commands/networking/ip_-o_addr'
- 
-     def __init__(self, config):
-         self.mapping = SoSIPMap()
-diff --git a/sos/cleaner/parsers/keyword_parser.py b/sos/cleaner/parsers/keyword_parser.py
-index 68de3727..694c6073 100644
---- a/sos/cleaner/parsers/keyword_parser.py
-+++ b/sos/cleaner/parsers/keyword_parser.py
-@@ -20,7 +20,6 @@ class SoSKeywordParser(SoSCleanerParser):
- 
-     name = 'Keyword Parser'
-     map_file_key = 'keyword_map'
--    prep_map_file = ''
- 
-     def __init__(self, config, keywords=None, keyword_file=None):
-         self.mapping = SoSKeywordMap()
-diff --git a/sos/cleaner/parsers/mac_parser.py b/sos/cleaner/parsers/mac_parser.py
-index 7ca80b8d..c74288cf 100644
---- a/sos/cleaner/parsers/mac_parser.py
-+++ b/sos/cleaner/parsers/mac_parser.py
-@@ -30,7 +30,6 @@ class SoSMacParser(SoSCleanerParser):
-         '534f:53'
-     )
-     map_file_key = 'mac_map'
--    prep_map_file = 'sos_commands/networking/ip_-d_address'
- 
-     def __init__(self, config):
-         self.mapping = SoSMacMap()
-diff --git a/sos/cleaner/parsers/username_parser.py b/sos/cleaner/parsers/username_parser.py
-index b142e371..35377a31 100644
---- a/sos/cleaner/parsers/username_parser.py
-+++ b/sos/cleaner/parsers/username_parser.py
-@@ -25,14 +25,6 @@ class SoSUsernameParser(SoSCleanerParser):
- 
-     name = 'Username Parser'
-     map_file_key = 'username_map'
--    prep_map_file = [
--        'sos_commands/login/lastlog_-u_1000-60000',
--        'sos_commands/login/lastlog_-u_60001-65536',
--        'sos_commands/login/lastlog_-u_65537-4294967295',
--        # AD users will be reported here, but favor the lastlog files since
--        # those will include local users who have not logged in
--        'sos_commands/login/last'
--    ]
-     regex_patterns = []
-     skip_list = [
-         'core',
-diff --git a/tests/cleaner_tests/existing_archive.py b/tests/cleaner_tests/existing_archive.py
-index 0eaf6c8d..e13d1cae 100644
---- a/tests/cleaner_tests/existing_archive.py
-+++ b/tests/cleaner_tests/existing_archive.py
-@@ -28,6 +28,13 @@ class ExistingArchiveCleanTest(StageTwoReportTest):
-     def test_obfuscation_log_created(self):
-         self.assertFileExists(os.path.join(self.tmpdir, '%s-obfuscation.log' % ARCHIVE))
- 
-+    def test_archive_type_correct(self):
-+        with open(os.path.join(self.tmpdir, '%s-obfuscation.log' % ARCHIVE), 'r') as log:
-+            for line in log:
-+                if "Loaded %s" % ARCHIVE in line:
-+                    assert 'as type sos report archive' in line, "Incorrect archive type detected: %s" % line
-+                    break
-+
-     def test_from_cmdline_logged(self):
-         with open(os.path.join(self.tmpdir, '%s-obfuscation.log' % ARCHIVE), 'r') as log:
-             for line in log:
-diff --git a/tests/cleaner_tests/full_report_run.py b/tests/cleaner_tests/full_report_run.py
-index 3b28e7a2..2de54946 100644
---- a/tests/cleaner_tests/full_report_run.py
-+++ b/tests/cleaner_tests/full_report_run.py
-@@ -35,6 +35,9 @@ class FullCleanTest(StageTwoReportTest):
-     def test_tarball_named_obfuscated(self):
-         self.assertTrue('obfuscated' in self.archive)
- 
-+    def test_archive_type_correct(self):
-+        self.assertSosLogContains('Loaded .* as type sos report directory')
-+
-     def test_hostname_not_in_any_file(self):
-         host = self.sysinfo['pre']['networking']['hostname']
-         # much faster to just use grep here
-diff --git a/tests/cleaner_tests/report_with_mask.py b/tests/cleaner_tests/report_with_mask.py
-index 4f94ba33..08e873d4 100644
---- a/tests/cleaner_tests/report_with_mask.py
-+++ b/tests/cleaner_tests/report_with_mask.py
-@@ -31,6 +31,9 @@ class ReportWithMask(StageOneReportTest):
-     def test_tarball_named_obfuscated(self):
-         self.assertTrue('obfuscated' in self.archive)
- 
-+    def test_archive_type_correct(self):
-+        self.assertSosLogContains('Loaded .* as type sos report directory')
-+
-     def test_localhost_was_obfuscated(self):
-         self.assertFileHasContent('/etc/hostname', 'host0')
- 
--- 
-2.31.1
-
-From 9b119f860eaec089f7ef884ff39c42589a662994 Mon Sep 17 00:00:00 2001
-From: Jake Hunsaker <jhunsake@redhat.com>
-Date: Wed, 1 Sep 2021 00:34:04 -0400
-Subject: [PATCH] [hostname_map] Add a catch for single-character hostnames
-
-If a log file was truncated at a specific boundary in a string of the
-FQDN of the host such that we only get a couple characters before the
-rest of the domain, we would previously bodly replace all instances of
-that character with the obfuscated short name; not very helpful.
-
-Instead, don't sanitize the short name if this happens and instead
-obfuscate the whole FQDN as 'unknown.example.com'.
-
-Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
----
- sos/cleaner/mappings/hostname_map.py | 9 ++++++++-
- 1 file changed, 8 insertions(+), 1 deletion(-)
-
-diff --git a/sos/cleaner/mappings/hostname_map.py b/sos/cleaner/mappings/hostname_map.py
-index d4b2c88e..e70a5530 100644
---- a/sos/cleaner/mappings/hostname_map.py
-+++ b/sos/cleaner/mappings/hostname_map.py
-@@ -184,7 +184,14 @@ class SoSHostnameMap(SoSMap):
-             hostname = host[0]
-             domain = host[1:]
-             # obfuscate the short name
--            ob_hostname = self.sanitize_short_name(hostname)
-+            if len(hostname) > 2:
-+                ob_hostname = self.sanitize_short_name(hostname)
-+            else:
-+                # by best practice it appears the host part of the fqdn was cut
-+                # off due to some form of truncating, as such don't obfuscate
-+                # short strings that are likely to throw off obfuscation of
-+                # unrelated bits and paths
-+                ob_hostname = 'unknown'
-             ob_domain = self.sanitize_domain(domain)
-             self.dataset[item] = ob_domain
-             return '.'.join([ob_hostname, ob_domain])
--- 
-2.31.1
-
-From f3f3e763d7c31b7b7cafdf8dd4dab87056fb7696 Mon Sep 17 00:00:00 2001
-From: Jake Hunsaker <jhunsake@redhat.com>
-Date: Wed, 1 Sep 2021 15:54:55 -0400
-Subject: [PATCH] [cleaner] Add support for Insights client archives
-
-Adds a new type of `SoSObfuscationArchive` to add support for
-obfuscating archives generated by the Insights project.
-
-Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
----
- man/en/sos-clean.1               |  1 +
- sos/cleaner/__init__.py          |  4 ++-
- sos/cleaner/archives/insights.py | 42 ++++++++++++++++++++++++++++++++
- 3 files changed, 46 insertions(+), 1 deletion(-)
- create mode 100644 sos/cleaner/archives/insights.py
-
-diff --git a/man/en/sos-clean.1 b/man/en/sos-clean.1
-index 54026713..358ec0cb 100644
---- a/man/en/sos-clean.1
-+++ b/man/en/sos-clean.1
-@@ -105,6 +105,7 @@ The following are accepted values for this option:
-     \fBauto\fR          Automatically detect the archive type
-     \fBreport\fR        An archive generated by \fBsos report\fR
-     \fBcollect\fR       An archive generated by \fBsos collect\fR
-+    \fBinsights\fR      An archive generated by the \fBinsights-client\fR package
- 
- The following may also be used, however note that these do not attempt to pre-load
- any information from the archives into the parsers. This means that, among other limitations,
-diff --git a/sos/cleaner/__init__.py b/sos/cleaner/__init__.py
-index 6d2eb483..3e08aa28 100644
---- a/sos/cleaner/__init__.py
-+++ b/sos/cleaner/__init__.py
-@@ -29,6 +29,7 @@ from sos.cleaner.archives.sos import (SoSReportArchive, SoSReportDirectory,
-                                       SoSCollectorArchive,
-                                       SoSCollectorDirectory)
- from sos.cleaner.archives.generic import DataDirArchive, TarballArchive
-+from sos.cleaner.archives.insights import InsightsArchive
- from sos.utilities import get_human_readable
- from textwrap import fill
- 
-@@ -100,6 +101,7 @@ class SoSCleaner(SoSComponent):
-             SoSReportArchive,
-             SoSCollectorDirectory,
-             SoSCollectorArchive,
-+            InsightsArchive,
-             # make sure these two are always last as they are fallbacks
-             DataDirArchive,
-             TarballArchive
-@@ -194,7 +196,7 @@ third party.
-                                help='The directory or archive to obfuscate')
-         clean_grp.add_argument('--archive-type', default='auto',
-                                choices=['auto', 'report', 'collect',
--                                        'data-dir', 'tarball'],
-+                                        'insights', 'data-dir', 'tarball'],
-                                help=('Specify what kind of archive the target '
-                                      'was generated as'))
-         clean_grp.add_argument('--domains', action='extend', default=[],
-diff --git a/sos/cleaner/archives/insights.py b/sos/cleaner/archives/insights.py
-new file mode 100644
-index 00000000..dab48b16
---- /dev/null
-+++ b/sos/cleaner/archives/insights.py
-@@ -0,0 +1,42 @@
-+# Copyright 2021 Red Hat, Inc. Jake Hunsaker <jhunsake@redhat.com>
-+
-+# This file is part of the sos project: https://github.com/sosreport/sos
-+#
-+# This copyrighted material is made available to anyone wishing to use,
-+# modify, copy, or redistribute it subject to the terms and conditions of
-+# version 2 of the GNU General Public License.
-+#
-+# See the LICENSE file in the source distribution for further information.
-+
-+
-+from sos.cleaner.archives import SoSObfuscationArchive
-+
-+import tarfile
-+
-+
-+class InsightsArchive(SoSObfuscationArchive):
-+    """This class represents archives generated by the insights-client utility
-+    for RHEL systems.
-+    """
-+
-+    type_name = 'insights'
-+    description = 'insights-client archive'
-+
-+    prep_files = {
-+        'hostname': 'data/insights_commands/hostname_-f',
-+        'ip': 'data/insights_commands/ip_addr',
-+        'mac': 'data/insights_commands/ip_addr'
-+    }
-+
-+    @classmethod
-+    def check_is_type(cls, arc_path):
-+        try:
-+            return tarfile.is_tarfile(arc_path) and 'insights-' in arc_path
-+        except Exception:
-+            return False
-+
-+    def get_archive_root(self):
-+        top = self.archive_path.split('/')[-1].split('.tar')[0]
-+        if self.tarobj.firstmember.name == '.':
-+            top = './' + top
-+        return top
--- 
-2.31.1
-
-From 9639dc3d240076b55f2a1d04b43ea42bebd09215 Mon Sep 17 00:00:00 2001
-From: Jake Hunsaker <jhunsake@redhat.com>
-Date: Tue, 16 Nov 2021 17:50:42 -0500
-Subject: [PATCH] [clean,hostname_parser] Source /etc/hosts for obfuscation
-
-Up until now, our sourcing of hostnames/domains for obfuscation has been
-dependent upon the output of the `hostname` command. However, some
-scenarios have come up where sourcing `/etc/hosts` is advantageous for
-several reasons:
-
-First, if `hostname` output is unavailable, this provides a fallback
-measure.
-
-Second, `/etc/hosts` is a common place to have short names defined which
-would otherwise not be detected (or at the very least would result in a
-race condition based on where/if the short name was elsewhere able to be
-gleaned from an FQDN), thus leaving the potential for unobfuscated data
-in an archive.
-
-Due to both the nature of hostname obfuscation and the malleable syntax
-of `/etc/hosts`, the parsing of this file needs special handling not
-covered by our more generic parsing and obfuscation methods.
-
-Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
----
- sos/cleaner/__init__.py                | 11 ++++++++---
- sos/cleaner/archives/sos.py            |  5 ++++-
- sos/cleaner/parsers/hostname_parser.py | 19 +++++++++++++++++++
- 3 files changed, 31 insertions(+), 4 deletions(-)
-
-diff --git a/sos/cleaner/__init__.py b/sos/cleaner/__init__.py
-index ed461a8f..3f530d44 100644
---- a/sos/cleaner/__init__.py
-+++ b/sos/cleaner/__init__.py
-@@ -523,9 +523,14 @@ third party.
-                         if isinstance(_parser, SoSUsernameParser):
-                             _parser.load_usernames_into_map(content)
-                         elif isinstance(_parser, SoSHostnameParser):
--                            _parser.load_hostname_into_map(
--                                content.splitlines()[0]
--                            )
-+                            if 'hostname' in parse_file:
-+                                _parser.load_hostname_into_map(
-+                                    content.splitlines()[0]
-+                                )
-+                            elif 'etc/hosts' in parse_file:
-+                                _parser.load_hostname_from_etc_hosts(
-+                                    content
-+                                )
-                         else:
-                             for line in content.splitlines():
-                                 self.obfuscate_line(line)
-diff --git a/sos/cleaner/archives/sos.py b/sos/cleaner/archives/sos.py
-index 4401d710..f8720c88 100644
---- a/sos/cleaner/archives/sos.py
-+++ b/sos/cleaner/archives/sos.py
-@@ -23,7 +23,10 @@ class SoSReportArchive(SoSObfuscationArchive):
-     type_name = 'report'
-     description = 'sos report archive'
-     prep_files = {
--        'hostname': 'sos_commands/host/hostname',
-+        'hostname': [
-+            'sos_commands/host/hostname',
-+            'etc/hosts'
-+        ],
-         'ip': 'sos_commands/networking/ip_-o_addr',
-         'mac': 'sos_commands/networking/ip_-d_address',
-         'username': [
-diff --git a/sos/cleaner/parsers/hostname_parser.py b/sos/cleaner/parsers/hostname_parser.py
-index daa76a62..0a733bee 100644
---- a/sos/cleaner/parsers/hostname_parser.py
-+++ b/sos/cleaner/parsers/hostname_parser.py
-@@ -61,6 +61,25 @@ class SoSHostnameParser(SoSCleanerParser):
-             self.mapping.add(high_domain)
-         self.mapping.add(hostname_string)
- 
-+    def load_hostname_from_etc_hosts(self, content):
-+        """Parse an archive's copy of /etc/hosts, which requires handling that
-+        is separate from the output of the `hostname` command. Just like
-+        load_hostname_into_map(), this has to be done explicitly and we
-+        cannot rely upon the more generic methods to do this reliably.
-+        """
-+        lines = content.splitlines()
-+        for line in lines:
-+            if line.startswith('#') or 'localhost' in line:
-+                continue
-+            hostln = line.split()[1:]
-+            for host in hostln:
-+                if len(host.split('.')) == 1:
-+                    # only generate a mapping for fqdns but still record the
-+                    # short name here for later obfuscation with parse_line()
-+                    self.short_names.append(host)
-+                else:
-+                    self.mapping.add(host)
-+
-     def parse_line(self, line):
-         """Override the default parse_line() method to also check for the
-         shortname of the host derived from the hostname.
--- 
-2.31.1
-
-From c1680226b53452b18f27f2e76c3e0e03e521f935 Mon Sep 17 00:00:00 2001
-From: Jake Hunsaker <jhunsake@redhat.com>
-Date: Wed, 17 Nov 2021 13:11:33 -0500
-Subject: [PATCH] [clean, hostname] Fix unintentionally case sensitive
- shortname handling
-
-It was discovered that our extra handling for shortnames was
-unintentionally case sensitive. Fix this to ensure that shortnames are
-obfuscated regardless of case in all collected text.
-
-Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
----
- sos/cleaner/mappings/hostname_map.py   |  6 +++---
- sos/cleaner/parsers/hostname_parser.py |  8 +++++---
- tests/cleaner_tests/full_report_run.py | 21 ++++++++++++++++++++-
- 3 files changed, 28 insertions(+), 7 deletions(-)
-
-diff --git a/sos/cleaner/mappings/hostname_map.py b/sos/cleaner/mappings/hostname_map.py
-index e70a5530..0fe78fb1 100644
---- a/sos/cleaner/mappings/hostname_map.py
-+++ b/sos/cleaner/mappings/hostname_map.py
-@@ -169,13 +169,13 @@ class SoSHostnameMap(SoSMap):
- 
-     def sanitize_item(self, item):
-         host = item.split('.')
--        if all([h.isupper() for h in host]):
-+        if len(host) > 1 and all([h.isupper() for h in host]):
-             # by convention we have just a domain
-             _host = [h.lower() for h in host]
-             return self.sanitize_domain(_host).upper()
-         if len(host) == 1:
-             # we have a shortname for a host
--            return self.sanitize_short_name(host[0])
-+            return self.sanitize_short_name(host[0].lower())
-         if len(host) == 2:
-             # we have just a domain name, e.g. example.com
-             return self.sanitize_domain(host)
-@@ -185,7 +185,7 @@ class SoSHostnameMap(SoSMap):
-             domain = host[1:]
-             # obfuscate the short name
-             if len(hostname) > 2:
--                ob_hostname = self.sanitize_short_name(hostname)
-+                ob_hostname = self.sanitize_short_name(hostname.lower())
-             else:
-                 # by best practice it appears the host part of the fqdn was cut
-                 # off due to some form of truncating, as such don't obfuscate
-diff --git a/sos/cleaner/parsers/hostname_parser.py b/sos/cleaner/parsers/hostname_parser.py
-index 0a733bee..7fd0e698 100644
---- a/sos/cleaner/parsers/hostname_parser.py
-+++ b/sos/cleaner/parsers/hostname_parser.py
-@@ -8,6 +8,8 @@
- #
- # See the LICENSE file in the source distribution for further information.
- 
-+import re
-+
- from sos.cleaner.parsers import SoSCleanerParser
- from sos.cleaner.mappings.hostname_map import SoSHostnameMap
- 
-@@ -91,9 +93,9 @@ class SoSHostnameParser(SoSCleanerParser):
-             """
-             if search in self.mapping.skip_keys:
-                 return ln, count
--            if search in ln:
--                count += ln.count(search)
--                ln = ln.replace(search, self.mapping.get(repl or search))
-+            _reg = re.compile(search, re.I)
-+            if _reg.search(ln):
-+                return _reg.subn(self.mapping.get(repl or search), ln)
-             return ln, count
- 
-         count = 0
-diff --git a/tests/cleaner_tests/full_report_run.py b/tests/cleaner_tests/full_report_run.py
-index 2de54946..0b23acaf 100644
---- a/tests/cleaner_tests/full_report_run.py
-+++ b/tests/cleaner_tests/full_report_run.py
-@@ -26,6 +26,24 @@ class FullCleanTest(StageTwoReportTest):
-     # replace with an empty placeholder, make sure that this test case is not
-     # influenced by previous clean runs
-     files = ['/etc/sos/cleaner/default_mapping']
-+    packages = {
-+        'rhel': ['python3-systemd'],
-+        'ubuntu': ['python3-systemd']
-+    }
-+
-+    def pre_sos_setup(self):
-+        # ensure that case-insensitive matching of FQDNs and shortnames work
-+        from systemd import journal
-+        from socket import gethostname
-+        host = gethostname()
-+        short = host.split('.')[0]
-+        sosfd = journal.stream('sos-testing')
-+        sosfd.write(
-+            "This is a test line from sos clean testing. The hostname %s "
-+            "should not appear, nor should %s in an obfuscated archive. The "
-+            "shortnames of %s and %s should also not appear."
-+            % (host.lower(), host.upper(), short.lower(), short.upper())
-+        )
- 
-     def test_private_map_was_generated(self):
-         self.assertOutputContains('A mapping of obfuscated elements is available at')
-@@ -40,8 +58,9 @@ class FullCleanTest(StageTwoReportTest):
- 
-     def test_hostname_not_in_any_file(self):
-         host = self.sysinfo['pre']['networking']['hostname']
-+        short = host.split('.')[0]
-         # much faster to just use grep here
--        content = self.grep_for_content(host)
-+        content = self.grep_for_content(host) + self.grep_for_content(short)
-         if not content:
-             assert True
-         else:
--- 
-2.31.1
-
-From aaeb8cb57ed55598ab744b96d4f127aedebcb292 Mon Sep 17 00:00:00 2001
-From: Jake Hunsaker <jhunsake@redhat.com>
-Date: Tue, 21 Sep 2021 15:23:20 -0400
-Subject: [PATCH] [build] Add archives to setup.py packages
-
-Adds the newly abstracted `sos.cleaner.archives` package to `setup.py`
-so that manual builds will properly include it.
-
-Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
----
- setup.py | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/setup.py b/setup.py
-index 1e8d8e2dc5..7653b59de3 100644
---- a/setup.py
-+++ b/setup.py
-@@ -102,7 +102,7 @@ def copy_file (self, filename, dirname):
-         'sos.policies.package_managers', 'sos.policies.init_systems',
-         'sos.report', 'sos.report.plugins', 'sos.collector',
-         'sos.collector.clusters', 'sos.cleaner', 'sos.cleaner.mappings',
--        'sos.cleaner.parsers'
-+        'sos.cleaner.parsers', 'sos.cleaner.archives'
-     ],
-     cmdclass=cmdclass,
-     command_options=command_options,
--- 
-2.31.1
-
-From ba3528230256429a4394f155a9ca1fdb91cf3560 Mon Sep 17 00:00:00 2001
-From: Jake Hunsaker <jhunsake@redhat.com>
-Date: Tue, 30 Nov 2021 12:46:34 -0500
-Subject: [PATCH 1/2] [hostname] Simplify case matching for domains
-
-Instead of special handling all uppercase domain conventions, use our
-normal flow for obfuscation and just match the casing at the end of the
-sanitization routine.
-
-Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
----
- sos/cleaner/mappings/hostname_map.py | 14 ++++++++------
- 1 file changed, 8 insertions(+), 6 deletions(-)
-
-diff --git a/sos/cleaner/mappings/hostname_map.py b/sos/cleaner/mappings/hostname_map.py
-index 0fe78fb1..5cd8e985 100644
---- a/sos/cleaner/mappings/hostname_map.py
-+++ b/sos/cleaner/mappings/hostname_map.py
-@@ -169,16 +169,15 @@ class SoSHostnameMap(SoSMap):
- 
-     def sanitize_item(self, item):
-         host = item.split('.')
--        if len(host) > 1 and all([h.isupper() for h in host]):
--            # by convention we have just a domain
--            _host = [h.lower() for h in host]
--            return self.sanitize_domain(_host).upper()
-         if len(host) == 1:
-             # we have a shortname for a host
-             return self.sanitize_short_name(host[0].lower())
-         if len(host) == 2:
-             # we have just a domain name, e.g. example.com
--            return self.sanitize_domain(host)
-+            dname = self.sanitize_domain(host)
-+            if all([h.isupper() for h in host]):
-+                dname = dname.upper()
-+            return dname
-         if len(host) > 2:
-             # we have an FQDN, e.g. foo.example.com
-             hostname = host[0]
-@@ -194,7 +193,10 @@ class SoSHostnameMap(SoSMap):
-                 ob_hostname = 'unknown'
-             ob_domain = self.sanitize_domain(domain)
-             self.dataset[item] = ob_domain
--            return '.'.join([ob_hostname, ob_domain])
-+            _fqdn = '.'.join([ob_hostname, ob_domain])
-+            if all([h.isupper() for h in host]):
-+                _fqdn = _fqdn.upper()
-+            return _fqdn
- 
-     def sanitize_short_name(self, hostname):
-         """Obfuscate the short name of the host with an incremented counter
--- 
-2.31.1
-
-
-From 189586728de22dd55122c1f7e06b19590f9a788f Mon Sep 17 00:00:00 2001
-From: Jake Hunsaker <jhunsake@redhat.com>
-Date: Tue, 30 Nov 2021 12:47:58 -0500
-Subject: [PATCH 2/2] [username] Improve username sourcing and remove case
- sensitivity
-
-First, don't skip the first line of `last` output, and instead add the
-header from lastlog to the skip list. Additionally, add
-`/etc/cron.allow` and `/etc/cron.deny` as sources for usernames that
-might not appear in other locations in certain environments.
-
-Also, make matching and replacement case insensitive.
-
-Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
----
- sos/cleaner/archives/sos.py            |  4 +++-
- sos/cleaner/mappings/username_map.py   |  2 +-
- sos/cleaner/parsers/username_parser.py | 14 +++++++++-----
- 3 files changed, 13 insertions(+), 7 deletions(-)
-
-diff --git a/sos/cleaner/archives/sos.py b/sos/cleaner/archives/sos.py
-index f8720c88..12766496 100644
---- a/sos/cleaner/archives/sos.py
-+++ b/sos/cleaner/archives/sos.py
-@@ -35,7 +35,9 @@ class SoSReportArchive(SoSObfuscationArchive):
-             'sos_commands/login/lastlog_-u_65537-4294967295',
-             # AD users will be reported here, but favor the lastlog files since
-             # those will include local users who have not logged in
--            'sos_commands/login/last'
-+            'sos_commands/login/last',
-+            'etc/cron.allow',
-+            'etc/cron.deny'
-         ]
-     }
- 
-diff --git a/sos/cleaner/mappings/username_map.py b/sos/cleaner/mappings/username_map.py
-index cdbf36fe..7ecccd7b 100644
---- a/sos/cleaner/mappings/username_map.py
-+++ b/sos/cleaner/mappings/username_map.py
-@@ -33,5 +33,5 @@ class SoSUsernameMap(SoSMap):
-         ob_name = "obfuscateduser%s" % self.name_count
-         self.name_count += 1
-         if ob_name in self.dataset.values():
--            return self.sanitize_item(username)
-+            return self.sanitize_item(username.lower())
-         return ob_name
-diff --git a/sos/cleaner/parsers/username_parser.py b/sos/cleaner/parsers/username_parser.py
-index 35377a31..229c7de4 100644
---- a/sos/cleaner/parsers/username_parser.py
-+++ b/sos/cleaner/parsers/username_parser.py
-@@ -8,6 +8,7 @@
- #
- # See the LICENSE file in the source distribution for further information.
- 
-+import re
- 
- from sos.cleaner.parsers import SoSCleanerParser
- from sos.cleaner.mappings.username_map import SoSUsernameMap
-@@ -34,6 +35,7 @@ class SoSUsernameParser(SoSCleanerParser):
-         'reboot',
-         'root',
-         'ubuntu',
-+        'username',
-         'wtmp'
-     ]
- 
-@@ -47,12 +49,12 @@ class SoSUsernameParser(SoSCleanerParser):
-         this parser, we need to override the initial parser prepping here.
-         """
-         users = set()
--        for line in content.splitlines()[1:]:
-+        for line in content.splitlines():
-             try:
-                 user = line.split()[0]
-             except Exception:
-                 continue
--            if user in self.skip_list:
-+            if user.lower() in self.skip_list:
-                 continue
-             users.add(user)
-         for each in users:
-@@ -61,7 +63,9 @@ class SoSUsernameParser(SoSCleanerParser):
-     def parse_line(self, line):
-         count = 0
-         for username in sorted(self.mapping.dataset.keys(), reverse=True):
--            if username in line:
--                count = line.count(username)
--                line = line.replace(username, self.mapping.get(username))
-+            _reg = re.compile(username, re.I)
-+            if _reg.search(line):
-+                line, count = _reg.subn(
-+                    self.mapping.get(username.lower()), line
-+                )
-         return line, count
--- 
-2.31.1
-
-From cafd0f3a52436a3966576e7db21e5dd17c06f0cc Mon Sep 17 00:00:00 2001
-From: Jake Hunsaker <jhunsake@redhat.com>
-Date: Sun, 12 Dec 2021 11:10:46 -0500
-Subject: [PATCH] [hostname] Fix edge case for new hosts in a known subdomain
-
-Fixes an edge case that would cause us to at first not recognize that a
-given hostname string is a new host in a known subdomain, but then on
-the obfuscation attempt properly recognize it as such and result in an
-incomplete obfuscation.
-
-This was mostly triggered by specific patterns for build hosts within
-`sos_commands/rpm/package-data`. With this refined check, these types of
-matches are properly obfuscated.
-
-Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
----
- sos/cleaner/mappings/hostname_map.py | 9 +++++----
- 1 file changed, 5 insertions(+), 4 deletions(-)
-
-diff --git a/sos/cleaner/mappings/hostname_map.py b/sos/cleaner/mappings/hostname_map.py
-index 5cd8e9857..33b0e6c80 100644
---- a/sos/cleaner/mappings/hostname_map.py
-+++ b/sos/cleaner/mappings/hostname_map.py
-@@ -129,7 +129,7 @@ def get(self, item):
-             item = item[0:-1]
-         if not self.domain_name_in_loaded_domains(item.lower()):
-             return item
--        if item.endswith(('.yaml', '.yml', '.crt', '.key', '.pem')):
-+        if item.endswith(('.yaml', '.yml', '.crt', '.key', '.pem', '.log')):
-             ext = '.' + item.split('.')[-1]
-             item = item.replace(ext, '')
-             suffix += ext
-@@ -148,7 +148,8 @@ def get(self, item):
-                 if len(_test) == 1 or not _test[0]:
-                     # does not match existing obfuscation
-                     continue
--                elif _test[0].endswith('.') and not _host_substr:
-+                elif not _host_substr and (_test[0].endswith('.') or
-+                                           item.endswith(_existing)):
-                     # new hostname in known domain
-                     final = super(SoSHostnameMap, self).get(item)
-                     break
-@@ -219,8 +220,8 @@ def sanitize_domain(self, domain):
-             # don't obfuscate vendor domains
-             if re.match(_skip, '.'.join(domain)):
-                 return '.'.join(domain)
--        top_domain = domain[-1]
--        dname = '.'.join(domain[0:-1])
-+        top_domain = domain[-1].lower()
-+        dname = '.'.join(domain[0:-1]).lower()
-         ob_domain = self._new_obfuscated_domain(dname)
-         ob_domain = '.'.join([ob_domain, top_domain])
-         self.dataset['.'.join(domain)] = ob_domain
-From f5e1298162a9393ea2d9f5c4df40dfece50f5f88 Mon Sep 17 00:00:00 2001
-From: Jake Hunsaker <jhunsake@redhat.com>
-Date: Thu, 6 Jan 2022 13:15:15 -0500
-Subject: [PATCH 1/3] [hostname] Fix loading and detection of long base domains
-
-Our domain matching has up to now assumed that users would be providing
-'base' domains such as 'example.com' whereby something like
-'foo.bar.example.com' is a subdomain (or host) within that base domain.
-
-However, the use case exists to provide 'foo.bar.example.com' as the
-base domain, without wanting to obfuscate 'example.com' directly.
-
-This commit fixes our handling of both loading these longer domains and
-doing the 'domain is part of a domain we want to obfuscate' check.
-
-Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
----
- sos/cleaner/mappings/hostname_map.py | 9 ++++++++-
- 1 file changed, 8 insertions(+), 1 deletion(-)
-
-diff --git a/sos/cleaner/mappings/hostname_map.py b/sos/cleaner/mappings/hostname_map.py
-index 33b0e6c8..7a7cf6b8 100644
---- a/sos/cleaner/mappings/hostname_map.py
-+++ b/sos/cleaner/mappings/hostname_map.py
-@@ -50,10 +50,14 @@ class SoSHostnameMap(SoSMap):
-         in this parser, we need to re-inject entries from the map_file into
-         these dicts and not just the underlying 'dataset' dict
-         """
--        for domain in self.dataset:
-+        for domain, ob_pair in self.dataset.items():
-             if len(domain.split('.')) == 1:
-                 self.hosts[domain.split('.')[0]] = self.dataset[domain]
-             else:
-+                if ob_pair.startswith('obfuscateddomain'):
-+                    # directly exact domain matches
-+                    self._domains[domain] = ob_pair.split('.')[0]
-+                    continue
-                 # strip the host name and trailing top-level domain so that
-                 # we in inject the domain properly for later string matching
- 
-@@ -102,9 +106,12 @@ class SoSHostnameMap(SoSMap):
-         and should be obfuscated
-         """
-         host = domain.split('.')
-+        no_tld = '.'.join(domain.split('.')[0:-1])
-         if len(host) == 1:
-             # don't block on host's shortname
-             return host[0] in self.hosts.keys()
-+        elif any([no_tld.endswith(_d) for _d in self._domains]):
-+            return True
-         else:
-             domain = host[0:-1]
-             for known_domain in self._domains:
--- 
-2.31.1
-
-
-From e241cf33a14ecd4e848a5fd857c5d3d7d07fbd71 Mon Sep 17 00:00:00 2001
-From: Jake Hunsaker <jhunsake@redhat.com>
-Date: Thu, 6 Jan 2022 13:18:44 -0500
-Subject: [PATCH 2/3] [cleaner] Improve parser-specific file skipping
-
-This commit improves our handling of skipping files on a per-parser
-basis, by first filtering the list of parsers that `obfuscate_line()`
-will iterate over by the parser's `skip_file` class attr, rather than
-relying on higher-level checks.
-
-Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
----
- sos/cleaner/__init__.py | 17 ++++++++++++++---
- 1 file changed, 14 insertions(+), 3 deletions(-)
-
-diff --git a/sos/cleaner/__init__.py b/sos/cleaner/__init__.py
-index 3f530d44..5686e213 100644
---- a/sos/cleaner/__init__.py
-+++ b/sos/cleaner/__init__.py
-@@ -12,6 +12,7 @@ import hashlib
- import json
- import logging
- import os
-+import re
- import shutil
- import tempfile
- 
-@@ -640,10 +641,16 @@ third party.
-             self.log_debug("Obfuscating %s" % short_name or filename,
-                            caller=arc_name)
-             tfile = tempfile.NamedTemporaryFile(mode='w', dir=self.tmpdir)
-+            _parsers = [
-+                _p for _p in self.parsers if not
-+                any([
-+                    re.match(p, short_name) for p in _p.skip_files
-+                ])
-+            ]
-             with open(filename, 'r') as fname:
-                 for line in fname:
-                     try:
--                        line, count = self.obfuscate_line(line)
-+                        line, count = self.obfuscate_line(line, _parsers)
-                         subs += count
-                         tfile.write(line)
-                     except Exception as err:
-@@ -713,7 +720,7 @@ third party.
-                 pass
-         return string_data
- 
--    def obfuscate_line(self, line):
-+    def obfuscate_line(self, line, parsers=None):
-         """Run a line through each of the obfuscation parsers, keeping a
-         cumulative total of substitutions done on that particular line.
- 
-@@ -721,6 +728,8 @@ third party.
- 
-             :param line str:        The raw line as read from the file being
-                                     processed
-+            :param parsers:         A list of parser objects to obfuscate
-+                                    with. If None, use all.
- 
-         Returns the fully obfuscated line and the number of substitutions made
-         """
-@@ -729,7 +738,9 @@ third party.
-         count = 0
-         if not line.strip():
-             return line, count
--        for parser in self.parsers:
-+        if parsers is None:
-+            parsers = self.parsers
-+        for parser in parsers:
-             try:
-                 line, _count = parser.parse_line(line)
-                 count += _count
--- 
-2.31.1
-
-
-From 96c9a833e77639a853b7d3d6f1df68bbbbe5e9cb Mon Sep 17 00:00:00 2001
-From: Jake Hunsaker <jhunsake@redhat.com>
-Date: Thu, 6 Jan 2022 13:20:32 -0500
-Subject: [PATCH 3/3] [cleaner] Add skips for known files and usernames
-
-Adds skips for `/proc/kallsyms` which should never be obfuscated, as
-well as any packaging-related log file for the IP parser. Further, do
-not obfuscate the `stack` users, as that is a well-known user for many
-configurations that, if obfuscated, could result in undesired string
-substitutions in normal logging.
-
-Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
----
- sos/cleaner/archives/__init__.py       | 2 ++
- sos/cleaner/parsers/ip_parser.py       | 3 ++-
- sos/cleaner/parsers/username_parser.py | 1 +
- 3 files changed, 5 insertions(+), 1 deletion(-)
-
-diff --git a/sos/cleaner/archives/__init__.py b/sos/cleaner/archives/__init__.py
-index 795c5a78..cbf1f809 100644
---- a/sos/cleaner/archives/__init__.py
-+++ b/sos/cleaner/archives/__init__.py
-@@ -43,6 +43,7 @@ class SoSObfuscationArchive():
-     type_name = 'undetermined'
-     description = 'undetermined'
-     is_nested = False
-+    skip_files = []
-     prep_files = {}
- 
-     def __init__(self, archive_path, tmpdir):
-@@ -111,6 +112,7 @@ class SoSObfuscationArchive():
-         Returns: list of files and file regexes
-         """
-         return [
-+            'proc/kallsyms',
-             'sosreport-',
-             'sys/firmware',
-             'sys/fs',
-diff --git a/sos/cleaner/parsers/ip_parser.py b/sos/cleaner/parsers/ip_parser.py
-index 71d38be8..b007368c 100644
---- a/sos/cleaner/parsers/ip_parser.py
-+++ b/sos/cleaner/parsers/ip_parser.py
-@@ -37,7 +37,8 @@ class SoSIPParser(SoSCleanerParser):
-         'sos_commands/snappy/snap_list_--all',
-         'sos_commands/snappy/snap_--version',
-         'sos_commands/vulkan/vulkaninfo',
--        'var/log/.*dnf.*'
-+        'var/log/.*dnf.*',
-+        'var/log/.*packag.*'  # get 'packages' and 'packaging' logs
-     ]
- 
-     map_file_key = 'ip_map'
-diff --git a/sos/cleaner/parsers/username_parser.py b/sos/cleaner/parsers/username_parser.py
-index 229c7de4..3208a655 100644
---- a/sos/cleaner/parsers/username_parser.py
-+++ b/sos/cleaner/parsers/username_parser.py
-@@ -32,6 +32,7 @@ class SoSUsernameParser(SoSCleanerParser):
-         'nobody',
-         'nfsnobody',
-         'shutdown',
-+        'stack',
-         'reboot',
-         'root',
-         'ubuntu',
--- 
-2.31.1
-
-From 7ebb2ce0bcd13c1b3aada648aceb20b5aff636d9 Mon Sep 17 00:00:00 2001
-From: Jake Hunsaker <jhunsake@redhat.com>
-Date: Tue, 15 Feb 2022 14:18:02 -0500
-Subject: [PATCH] [host] Skip entire /etc/sos/cleaner directory
-
-While `default_mapping` is typically the only file expected under
-`/etc/sos/cleaner/` it is possible for other mapping files (such as
-backups) to appear there.
-
-Make the `add_forbidden_path()` spec here target the entire cleaner
-directory to avoid ever capturing these map files.
-
-Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
----
- sos/report/plugins/host.py | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/sos/report/plugins/host.py b/sos/report/plugins/host.py
-index 5e21da7b8e..95a3b9cd95 100644
---- a/sos/report/plugins/host.py
-+++ b/sos/report/plugins/host.py
-@@ -20,7 +20,7 @@ class Host(Plugin, IndependentPlugin):
- 
-     def setup(self):
- 
--        self.add_forbidden_path('/etc/sos/cleaner/default_mapping')
-+        self.add_forbidden_path('/etc/sos/cleaner')
- 
-         self.add_cmd_output('hostname', root_symlink='hostname')
-         self.add_cmd_output('uptime', root_symlink='uptime')
diff --git a/SOURCES/sos-bz2025611-RHTS-api-change.patch b/SOURCES/sos-bz2025611-RHTS-api-change.patch
deleted file mode 100644
index 580117f..0000000
--- a/SOURCES/sos-bz2025611-RHTS-api-change.patch
+++ /dev/null
@@ -1,224 +0,0 @@
-From 2e8b5e2d4f30854cce93d149fc7d24b9d9cfd02c Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Fri, 19 Nov 2021 16:16:07 +0100
-Subject: [PATCH 1/3] [policies] strip path from SFTP upload filename
-
-When case_id is not supplied, we ask SFTP server to store the uploaded
-file under name /var/tmp/<tarball>, which is confusing.
-
-Let remove the path from it also in case_id not supplied.
-
-Related to: #2764
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/policies/distros/redhat.py | 6 +++---
- 1 file changed, 3 insertions(+), 3 deletions(-)
-
-diff --git a/sos/policies/distros/redhat.py b/sos/policies/distros/redhat.py
-index 3476e21fb..8817fc785 100644
---- a/sos/policies/distros/redhat.py
-+++ b/sos/policies/distros/redhat.py
-@@ -269,10 +269,10 @@ def _get_sftp_upload_name(self):
-         """The RH SFTP server will only automatically connect file uploads to
-         cases if the filename _starts_ with the case number
-         """
-+        fname = self.upload_archive_name.split('/')[-1]
-         if self.case_id:
--            return "%s_%s" % (self.case_id,
--                              self.upload_archive_name.split('/')[-1])
--        return self.upload_archive_name
-+            return "%s_%s" % (self.case_id, fname)
-+        return fname
- 
-     def upload_sftp(self):
-         """Override the base upload_sftp to allow for setting an on-demand
-
-From 61023b29a656dd7afaa4a0643368b0a53f1a3779 Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Fri, 19 Nov 2021 17:31:31 +0100
-Subject: [PATCH 2/3] [redhat] update SFTP API version to v2
-
-Change API version from v1 to v2, which includes:
-- change of URL
-- different URI
-- POST method for token generation instead of GET
-
-Resolves: #2764
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/policies/distros/redhat.py | 10 +++++-----
- 1 file changed, 5 insertions(+), 5 deletions(-)
-
-diff --git a/sos/policies/distros/redhat.py b/sos/policies/distros/redhat.py
-index 8817fc785..e4e2b8835 100644
---- a/sos/policies/distros/redhat.py
-+++ b/sos/policies/distros/redhat.py
-@@ -175,7 +175,7 @@ def get_tmp_dir(self, opt_tmp_dir):
- No changes will be made to system configuration.
- """
- 
--RH_API_HOST = "https://access.redhat.com"
-+RH_API_HOST = "https://api.access.redhat.com"
- RH_SFTP_HOST = "sftp://sftp.access.redhat.com"
- 
- 
-@@ -287,12 +287,12 @@ def upload_sftp(self):
-                             " for obtaining SFTP auth token.")
-         _token = None
-         _user = None
-+        url = RH_API_HOST + '/support/v2/sftp/token'
-         # we have a username and password, but we need to reset the password
-         # to be the token returned from the auth endpoint
-         if self.get_upload_user() and self.get_upload_password():
--            url = RH_API_HOST + '/hydra/rest/v1/sftp/token'
-             auth = self.get_upload_https_auth()
--            ret = requests.get(url, auth=auth, timeout=10)
-+            ret = requests.post(url, auth=auth, timeout=10)
-             if ret.status_code == 200:
-                 # credentials are valid
-                 _user = self.get_upload_user()
-@@ -302,8 +302,8 @@ def upload_sftp(self):
-                       "credentials. Will try anonymous.")
-         # we either do not have a username or password/token, or both
-         if not _token:
--            aurl = RH_API_HOST + '/hydra/rest/v1/sftp/token?isAnonymous=true'
--            anon = requests.get(aurl, timeout=10)
-+            adata = {"isAnonymous": True}
-+            anon = requests.post(url, data=json.dumps(adata), timeout=10)
-             if anon.status_code == 200:
-                 resp = json.loads(anon.text)
-                 _user = resp['username']
-
-From 267da2156ec61f526dd28e760ff6528408a76c3f Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Mon, 22 Nov 2021 15:22:32 +0100
-Subject: [PATCH 3/3] [policies] Deal 200 return code as success
-
-Return code 200 of POST method request must be dealt as success.
-
-Newly required due to the SFTP API change using POST.
-
-Related to: #2764
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/policies/distros/__init__.py | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/sos/policies/distros/__init__.py b/sos/policies/distros/__init__.py
-index 0906fa779..6f257fdce 100644
---- a/sos/policies/distros/__init__.py
-+++ b/sos/policies/distros/__init__.py
-@@ -551,7 +551,7 @@ def upload_https(self):
-                 r = self._upload_https_put(arc, verify)
-             else:
-                 r = self._upload_https_post(arc, verify)
--            if r.status_code != 201:
-+            if r.status_code != 200 and r.status_code != 201:
-                 if r.status_code == 401:
-                     raise Exception(
-                         "Authentication failed: invalid user credentials"
-From 8da1b14246226792c160dd04e5c7c75dd4e8d44b Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Mon, 22 Nov 2021 10:44:09 +0100
-Subject: [PATCH] [collect] fix moved get_upload_url under Policy class
-
-SoSCollector does not further declare get_upload_url method
-as that was moved under Policy class(es).
-
-Resolves: #2766
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/collector/__init__.py | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/sos/collector/__init__.py b/sos/collector/__init__.py
-index 50183e873..42a7731d6 100644
---- a/sos/collector/__init__.py
-+++ b/sos/collector/__init__.py
-@@ -1219,7 +1219,7 @@ this utility or remote systems that it c
-             msg = 'No sosreports were collected, nothing to archive...'
-             self.exit(msg, 1)
- 
--        if self.opts.upload and self.get_upload_url():
-+        if self.opts.upload and self.policy.get_upload_url():
-             try:
-                 self.policy.upload_archive(arc_name)
-                 self.ui_log.info("Uploaded archive successfully")
-From abb2fc65bd14760021c61699ad3113cab3bd4c64 Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Tue, 30 Nov 2021 11:37:02 +0100
-Subject: [PATCH 1/2] [redhat] Fix broken URI to upload to customer portal
-
-Revert back the unwanted change in URI of uploading tarball to the
-Red Hat Customer portal.
-
-Related: #2772
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/policies/distros/redhat.py | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/sos/policies/distros/redhat.py b/sos/policies/distros/redhat.py
-index e4e2b883..eb442407 100644
---- a/sos/policies/distros/redhat.py
-+++ b/sos/policies/distros/redhat.py
-@@ -250,7 +250,7 @@ support representative.
-         elif self.commons['cmdlineopts'].upload_protocol == 'sftp':
-             return RH_SFTP_HOST
-         else:
--            rh_case_api = "/hydra/rest/cases/%s/attachments"
-+            rh_case_api = "/support/v1/cases/%s/attachments"
-             return RH_API_HOST + rh_case_api % self.case_id
- 
-     def _get_upload_headers(self):
--- 
-2.31.1
-
-
-From ea4f9e88a412c80a4791396e1bb78ac1e24ece14 Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Tue, 30 Nov 2021 13:00:26 +0100
-Subject: [PATCH 2/2] [policy] Add error message when FTP upload write failure
-
-When (S)FTP upload fails to write the destination file,
-our "expect" code should detect it sooner than after timeout happens
-and write appropriate error message.
-
-Resolves: #2772
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/policies/distros/__init__.py | 5 ++++-
- 1 file changed, 4 insertions(+), 1 deletion(-)
-
-diff --git a/sos/policies/distros/__init__.py b/sos/policies/distros/__init__.py
-index 6f257fdc..7bdc81b8 100644
---- a/sos/policies/distros/__init__.py
-+++ b/sos/policies/distros/__init__.py
-@@ -473,7 +473,8 @@ class LinuxPolicy(Policy):
-         put_expects = [
-             u'100%',
-             pexpect.TIMEOUT,
--            pexpect.EOF
-+            pexpect.EOF,
-+            u'No such file or directory'
-         ]
- 
-         put_success = ret.expect(put_expects, timeout=180)
-@@ -485,6 +486,8 @@ class LinuxPolicy(Policy):
-             raise Exception("Timeout expired while uploading")
-         elif put_success == 2:
-             raise Exception("Unknown error during upload: %s" % ret.before)
-+        elif put_success == 3:
-+            raise Exception("Unable to write archive to destination")
-         else:
-             raise Exception("Unexpected response from server: %s" % ret.before)
- 
--- 
-2.31.1
-
diff --git a/SOURCES/sos-bz2031777-rhui-logs.patch b/SOURCES/sos-bz2031777-rhui-logs.patch
deleted file mode 100644
index dcfbc89..0000000
--- a/SOURCES/sos-bz2031777-rhui-logs.patch
+++ /dev/null
@@ -1,24 +0,0 @@
-From aa2887f71c779448b22e4de67ae68dbaf218b7b9 Mon Sep 17 00:00:00 2001
-From: Taft Sanders <taftsanders@gmail.com>
-Date: Fri, 10 Dec 2021 09:34:59 -0500
-Subject: [PATCH] [rhui] New log folder
-
-Included new log folder per Bugzilla 2030741
-
-Signed-off-by: Taft Sanders <taftsanders@gmail.com>
----
- sos/report/plugins/rhui.py | 1 +
- 1 file changed, 1 insertion(+)
-
-diff --git a/sos/report/plugins/rhui.py b/sos/report/plugins/rhui.py
-index 52065fb44..add024613 100644
---- a/sos/report/plugins/rhui.py
-+++ b/sos/report/plugins/rhui.py
-@@ -27,6 +27,7 @@ def setup(self):
-             "/var/log/rhui-subscription-sync.log",
-             "/var/cache/rhui/*",
-             "/root/.rhui/*",
-+            "/var/log/rhui/*",
-         ])
-         # skip collecting certificate keys
-         self.add_forbidden_path("/etc/pki/rhui/**/*.key", recursive=True)
diff --git a/SOURCES/sos-bz2034001-nvidia-GPU-info.patch b/SOURCES/sos-bz2034001-nvidia-GPU-info.patch
deleted file mode 100644
index 30fbb53..0000000
--- a/SOURCES/sos-bz2034001-nvidia-GPU-info.patch
+++ /dev/null
@@ -1,46 +0,0 @@
-From f2cc67750f55a71edff0c527a1bfc14fde8132c3 Mon Sep 17 00:00:00 2001
-From: Mamatha Inamdar <mamatha4@linux.vnet.ibm.com>
-Date: Mon, 8 Nov 2021 10:50:03 +0530
-Subject: [PATCH] [nvidia]:Patch to update nvidia plugin for GPU info
-
-This patch is to update nvidia plugin to collect
-logs for Nvidia GPUs
-
-Signed-off-by: Mamatha Inamdar <mamatha4@linux.vnet.ibm.com>
-Reported-by: Borislav Stoymirski <borislav.stoymirski@bg.ibm.com>
-Reported-by: Yesenia Jimenez <yesenia@us.ibm.com>
----
- sos/report/plugins/nvidia.py | 15 +++++++++++++--
- 1 file changed, 13 insertions(+), 2 deletions(-)
-
-diff --git a/sos/report/plugins/nvidia.py b/sos/report/plugins/nvidia.py
-index 09aaf586b..9e21b478e 100644
---- a/sos/report/plugins/nvidia.py
-+++ b/sos/report/plugins/nvidia.py
-@@ -23,13 +23,24 @@ def setup(self):
-             '--list-gpus',
-             '-q -d PERFORMANCE',
-             '-q -d SUPPORTED_CLOCKS',
--            '-q -d PAGE_RETIREMENT'
-+            '-q -d PAGE_RETIREMENT',
-+            '-q',
-+            '-q -d ECC',
-+            'nvlink -s',
-+            'nvlink -e'
-         ]
- 
-         self.add_cmd_output(["nvidia-smi %s" % cmd for cmd in subcmds])
- 
-         query = ('gpu_name,gpu_bus_id,vbios_version,temperature.gpu,'
--                 'utilization.gpu,memory.total,memory.free,memory.used')
-+                 'utilization.gpu,memory.total,memory.free,memory.used,'
-+                 'clocks.applications.graphics,clocks.applications.memory')
-+        querypages = ('timestamp,gpu_bus_id,gpu_serial,gpu_uuid,'
-+                      'retired_pages.address,retired_pages.cause')
-         self.add_cmd_output("nvidia-smi --query-gpu=%s --format=csv" % query)
-+        self.add_cmd_output(
-+            "nvidia-smi --query-retired-pages=%s --format=csv" % querypages
-+        )
-+        self.add_journal(boot=0, identifier='nvidia-persistenced')
- 
- # vim: set et ts=4 sw=4 :
diff --git a/SOURCES/sos-bz2037350-ocp-backports.patch b/SOURCES/sos-bz2037350-ocp-backports.patch
deleted file mode 100644
index 3e53e93..0000000
--- a/SOURCES/sos-bz2037350-ocp-backports.patch
+++ /dev/null
@@ -1,5145 +0,0 @@
-From 676dfca09d9c783311a51a1c53fa0f7ecd95bd28 Mon Sep 17 00:00:00 2001
-From: Jake Hunsaker <jhunsake@redhat.com>
-Date: Fri, 10 Sep 2021 13:38:19 -0400
-Subject: [PATCH] [collect] Abstract transport protocol from SoSNode
-
-Since its addition to sos, collect has assumed the use of a system
-installation of SSH in order to connect to the nodes identified for
-collection. However, there may be use cases and desires to use other
-transport protocols.
-
-As such, provide an abstraction for these protocols in the form of the
-new `RemoteTransport` class that `SoSNode` will now leverage. So far an
-abstraction for the currently used SSH ControlPersist function is
-provided, along with a psuedo abstraction for local execution so that
-SoSNode does not directly need to make more "if local then foo" checks
-than are absolutely necessary.
-
-Related: #2668
-
-Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
----
- setup.py                                    |   4 +-
- sos/collector/__init__.py                   |  54 +--
- sos/collector/clusters/__init__.py          |   4 +-
- sos/collector/clusters/jbon.py              |   2 +
- sos/collector/clusters/kubernetes.py        |   4 +-
- sos/collector/clusters/ocp.py               |   6 +-
- sos/collector/clusters/ovirt.py             |  10 +-
- sos/collector/clusters/pacemaker.py         |   8 +-
- sos/collector/clusters/satellite.py         |   4 +-
- sos/collector/sosnode.py                    | 388 +++++---------------
- sos/collector/transports/__init__.py        | 317 ++++++++++++++++
- sos/collector/transports/control_persist.py | 199 ++++++++++
- sos/collector/transports/local.py           |  49 +++
- 13 files changed, 705 insertions(+), 344 deletions(-)
- create mode 100644 sos/collector/transports/__init__.py
- create mode 100644 sos/collector/transports/control_persist.py
- create mode 100644 sos/collector/transports/local.py
-
-diff --git a/setup.py b/setup.py
-index 7653b59d..25e87a71 100644
---- a/setup.py
-+++ b/setup.py
-@@ -101,8 +101,8 @@ setup(
-         'sos.policies.distros', 'sos.policies.runtimes',
-         'sos.policies.package_managers', 'sos.policies.init_systems',
-         'sos.report', 'sos.report.plugins', 'sos.collector',
--        'sos.collector.clusters', 'sos.cleaner', 'sos.cleaner.mappings',
--        'sos.cleaner.parsers', 'sos.cleaner.archives'
-+        'sos.collector.clusters', 'sos.collector.transports', 'sos.cleaner',
-+        'sos.cleaner.mappings', 'sos.cleaner.parsers', 'sos.cleaner.archives'
-     ],
-     cmdclass=cmdclass,
-     command_options=command_options,
-diff --git a/sos/collector/__init__.py b/sos/collector/__init__.py
-index b2a07f37..da912655 100644
---- a/sos/collector/__init__.py
-+++ b/sos/collector/__init__.py
-@@ -17,7 +17,6 @@ import re
- import string
- import socket
- import shutil
--import subprocess
- import sys
- 
- from datetime import datetime
-@@ -28,7 +27,6 @@ from pipes import quote
- from textwrap import fill
- from sos.cleaner import SoSCleaner
- from sos.collector.sosnode import SosNode
--from sos.collector.exceptions import ControlPersistUnsupportedException
- from sos.options import ClusterOption
- from sos.component import SoSComponent
- from sos import __version__
-@@ -154,7 +152,6 @@ class SoSCollector(SoSComponent):
-             try:
-                 self.parse_node_strings()
-                 self.parse_cluster_options()
--                self._check_for_control_persist()
-                 self.log_debug('Executing %s' % ' '.join(s for s in sys.argv))
-                 self.log_debug("Found cluster profiles: %s"
-                                % self.clusters.keys())
-@@ -437,33 +434,6 @@ class SoSCollector(SoSComponent):
-                                  action='extend',
-                                  help='List of usernames to obfuscate')
- 
--    def _check_for_control_persist(self):
--        """Checks to see if the local system supported SSH ControlPersist.
--
--        ControlPersist allows OpenSSH to keep a single open connection to a
--        remote host rather than building a new session each time. This is the
--        same feature that Ansible uses in place of paramiko, which we have a
--        need to drop in sos-collector.
--
--        This check relies on feedback from the ssh binary. The command being
--        run should always generate stderr output, but depending on what that
--        output reads we can determine if ControlPersist is supported or not.
--
--        For our purposes, a host that does not support ControlPersist is not
--        able to run sos-collector.
--
--        Returns
--            True if ControlPersist is supported, else raise Exception.
--        """
--        ssh_cmd = ['ssh', '-o', 'ControlPersist']
--        cmd = subprocess.Popen(ssh_cmd, stdout=subprocess.PIPE,
--                               stderr=subprocess.PIPE)
--        out, err = cmd.communicate()
--        err = err.decode('utf-8')
--        if 'Bad configuration option' in err or 'Usage:' in err:
--            raise ControlPersistUnsupportedException
--        return True
--
-     def exit(self, msg, error=1):
-         """Used to safely terminate if sos-collector encounters an error"""
-         self.log_error(msg)
-@@ -455,7 +455,7 @@ class SoSCollector(SoSComponent):
-             'cmdlineopts': self.opts,
-             'need_sudo': True if self.opts.ssh_user != 'root' else False,
-             'tmpdir': self.tmpdir,
--            'hostlen': len(self.opts.master) or len(self.hostname),
-+            'hostlen': max(len(self.opts.primary), len(self.hostname)),
-             'policy': self.policy
-         }
- 
-@@ -1020,9 +1020,10 @@ class SoSCollector(SoSComponent):
-             self.node_list.append(self.hostname)
-         self.reduce_node_list()
-         try:
--            self.commons['hostlen'] = len(max(self.node_list, key=len))
-+            _node_max = len(max(self.node_list, key=len))
-+            self.commons['hostlen'] = max(_node_max, self.commons['hostlen'])
-         except (TypeError, ValueError):
--            self.commons['hostlen'] = len(self.opts.master)
-+            pass
- 
-     def _connect_to_node(self, node):
-         """Try to connect to the node, and if we can add to the client list to
-@@ -1068,7 +1039,7 @@ class SoSCollector(SoSComponent):
-                 client.set_node_manifest(getattr(self.collect_md.nodes,
-                                                  node[0]))
-             else:
--                client.close_ssh_session()
-+                client.disconnect()
-         except Exception:
-             pass
- 
-@@ -1077,12 +1048,11 @@ class SoSCollector(SoSComponent):
-         provided on the command line
-         """
-         disclaimer = ("""\
--This utility is used to collect sosreports from multiple \
--nodes simultaneously. It uses OpenSSH's ControlPersist feature \
--to connect to nodes and run commands remotely. If your system \
--installation of OpenSSH is older than 5.6, please upgrade.
-+This utility is used to collect sos reports from multiple \
-+nodes simultaneously. Remote connections are made and/or maintained \
-+to those nodes via well-known transport protocols such as SSH.
- 
--An archive of sosreport tarballs collected from the nodes will be \
-+An archive of sos report tarballs collected from the nodes will be \
- generated in %s and may be provided to an appropriate support representative.
- 
- The generated archive may contain data considered sensitive \
-@@ -1230,10 +1200,10 @@ this utility or remote systems that it connects to.
-             self.log_error("Error running sosreport: %s" % err)
- 
-     def close_all_connections(self):
--        """Close all ssh sessions for nodes"""
-+        """Close all sessions for nodes"""
-         for client in self.client_list:
--            self.log_debug('Closing SSH connection to %s' % client.address)
--            client.close_ssh_session()
-+            self.log_debug('Closing connection to %s' % client.address)
-+            client.disconnect()
- 
-     def create_cluster_archive(self):
-         """Calls for creation of tar archive then cleans up the temporary
-diff --git a/sos/collector/clusters/__init__.py b/sos/collector/clusters/__init__.py
-index 2b5d7018..64ac2a44 100644
---- a/sos/collector/clusters/__init__.py
-+++ b/sos/collector/clusters/__init__.py
-@@ -188,8 +188,8 @@ class Cluster():
-         :rtype: ``dict``
-         """
-         res = self.master.run_command(cmd, get_pty=True, need_root=need_root)
--        if res['stdout']:
--            res['stdout'] = res['stdout'].replace('Password:', '')
-+        if res['output']:
-+            res['output'] = res['output'].replace('Password:', '')
-         return res
- 
-     def setup(self):
-diff --git a/sos/collector/clusters/jbon.py b/sos/collector/clusters/jbon.py
-index 488fbd16..8f083ac6 100644
---- a/sos/collector/clusters/jbon.py
-+++ b/sos/collector/clusters/jbon.py
-@@ -28,3 +28,5 @@ class jbon(Cluster):
-         # This should never be called, but as insurance explicitly never
-         # allow this to be enabled via the determine_cluster() path
-         return False
-+
-+# vim: set et ts=4 sw=4 :
-diff --git a/sos/collector/clusters/kubernetes.py b/sos/collector/clusters/kubernetes.py
-index cdbf8861..99f788dc 100644
---- a/sos/collector/clusters/kubernetes.py
-+++ b/sos/collector/clusters/kubernetes.py
-@@ -34,7 +34,7 @@ class kubernetes(Cluster):
-         if res['status'] == 0:
-             nodes = []
-             roles = [x for x in self.get_option('role').split(',') if x]
--            for nodeln in res['stdout'].splitlines()[1:]:
-+            for nodeln in res['output'].splitlines()[1:]:
-                 node = nodeln.split()
-                 if not roles:
-                     nodes.append(node[0])
-@@ -44,3 +44,5 @@ class kubernetes(Cluster):
-             return nodes
-         else:
-             raise Exception('Node enumeration did not return usable output')
-+
-+# vim: set et ts=4 sw=4 :
-diff --git a/sos/collector/clusters/ocp.py b/sos/collector/clusters/ocp.py
-index 5479417d..ad97587f 100644
---- a/sos/collector/clusters/ocp.py
-+++ b/sos/collector/clusters/ocp.py
-@@ -93,7 +93,7 @@ class ocp(Cluster):
-         res = self.exec_master_cmd(self.fmt_oc_cmd(cmd))
-         if res['status'] == 0:
-             roles = [r for r in self.get_option('role').split(':')]
--            self.node_dict = self._build_dict(res['stdout'].splitlines())
-+            self.node_dict = self._build_dict(res['output'].splitlines())
-             for node in self.node_dict:
-                 if roles:
-                     for role in roles:
-@@ -103,7 +103,7 @@ class ocp(Cluster):
-                     nodes.append(node)
-         else:
-             msg = "'oc' command failed"
--            if 'Missing or incomplete' in res['stdout']:
-+            if 'Missing or incomplete' in res['output']:
-                 msg = ("'oc' failed due to missing kubeconfig on master node."
-                        " Specify one via '-c ocp.kubeconfig=<path>'")
-             raise Exception(msg)
-@@ -168,3 +168,5 @@ class ocp(Cluster):
-     def set_node_options(self, node):
-         # don't attempt OC API collections on non-primary nodes
-         node.plugin_options.append('openshift.no-oc=on')
-+
-+# vim: set et ts=4 sw=4 :
-diff --git a/sos/collector/clusters/ovirt.py b/sos/collector/clusters/ovirt.py
-index 079a122e..bd2d0c74 100644
---- a/sos/collector/clusters/ovirt.py
-+++ b/sos/collector/clusters/ovirt.py
-@@ -98,7 +98,7 @@ class ovirt(Cluster):
-             return []
-         res = self._run_db_query(self.dbquery)
-         if res['status'] == 0:
--            nodes = res['stdout'].splitlines()[2:-1]
-+            nodes = res['output'].splitlines()[2:-1]
-             return [n.split('(')[0].strip() for n in nodes]
-         else:
-             raise Exception('database query failed, return code: %s'
-@@ -114,7 +114,7 @@ class ovirt(Cluster):
-         engconf = '/etc/ovirt-engine/engine.conf.d/10-setup-database.conf'
-         res = self.exec_primary_cmd('cat %s' % engconf, need_root=True)
-         if res['status'] == 0:
--            config = res['stdout'].splitlines()
-+            config = res['output'].splitlines()
-             for line in config:
-                 try:
-                     k = str(line.split('=')[0])
-@@ -141,7 +141,7 @@ class ovirt(Cluster):
-                '--batch -o postgresql {}'
-                ).format(self.conf['ENGINE_DB_PASSWORD'], sos_opt)
-         db_sos = self.exec_primary_cmd(cmd, need_root=True)
--        for line in db_sos['stdout'].splitlines():
-+        for line in db_sos['output'].splitlines():
-             if fnmatch.fnmatch(line, '*sosreport-*tar*'):
-                 _pg_dump = line.strip()
-                 self.master.manifest.add_field('postgresql_dump',
-@@ -180,5 +180,7 @@ class rhhi_virt(rhv):
-         ret = self._run_db_query('SELECT count(server_id) FROM gluster_server')
-         if ret['status'] == 0:
-             # if there are any entries in this table, RHHI-V is in use
--            return ret['stdout'].splitlines()[2].strip() != '0'
-+            return ret['output'].splitlines()[2].strip() != '0'
-         return False
-+
-+# vim: set et ts=4 sw=4 :
-diff --git a/sos/collector/clusters/pacemaker.py b/sos/collector/clusters/pacemaker.py
-index 034f3f3e..55024314 100644
---- a/sos/collector/clusters/pacemaker.py
-+++ b/sos/collector/clusters/pacemaker.py
-@@ -27,7 +27,7 @@ class pacemaker(Cluster):
-             self.log_error('Cluster status could not be determined. Is the '
-                            'cluster running on this node?')
-             return []
--        if 'node names do not match' in self.res['stdout']:
-+        if 'node names do not match' in self.res['output']:
-             self.log_warn('Warning: node name mismatch reported. Attempts to '
-                           'connect to some nodes may fail.\n')
-         return self.parse_pcs_output()
-@@ -41,17 +41,19 @@ class pacemaker(Cluster):
-         return nodes
- 
-     def get_online_nodes(self):
--        for line in self.res['stdout'].splitlines():
-+        for line in self.res['output'].splitlines():
-             if line.startswith('Online:'):
-                 nodes = line.split('[')[1].split(']')[0]
-                 return [n for n in nodes.split(' ') if n]
- 
-     def get_offline_nodes(self):
-         offline = []
--        for line in self.res['stdout'].splitlines():
-+        for line in self.res['output'].splitlines():
-             if line.startswith('Node') and line.endswith('(offline)'):
-                 offline.append(line.split()[1].replace(':', ''))
-             if line.startswith('OFFLINE:'):
-                 nodes = line.split('[')[1].split(']')[0]
-                 offline.extend([n for n in nodes.split(' ') if n])
-         return offline
-+
-+# vim: set et ts=4 sw=4 :
-diff --git a/sos/collector/clusters/satellite.py b/sos/collector/clusters/satellite.py
-index e123c8a3..7c21e553 100644
---- a/sos/collector/clusters/satellite.py
-+++ b/sos/collector/clusters/satellite.py
-@@ -28,7 +28,7 @@ class satellite(Cluster):
-         res = self.exec_primary_cmd(cmd, need_root=True)
-         if res['status'] == 0:
-             nodes = [
--                n.strip() for n in res['stdout'].splitlines()
-+                n.strip() for n in res['output'].splitlines()
-                 if 'could not change directory' not in n
-             ]
-             return nodes
-@@ -38,3 +38,5 @@ class satellite(Cluster):
-         if node.address == self.master.address:
-             return 'satellite'
-         return 'capsule'
-+
-+# vim: set et ts=4 sw=4 :
-diff --git a/sos/collector/sosnode.py b/sos/collector/sosnode.py
-index 4b1ee109..f79bd5ff 100644
---- a/sos/collector/sosnode.py
-+++ b/sos/collector/sosnode.py
-@@ -12,22 +12,16 @@ import fnmatch
- import inspect
- import logging
- import os
--import pexpect
- import re
--import shutil
- 
- from distutils.version import LooseVersion
- from pipes import quote
- from sos.policies import load
- from sos.policies.init_systems import InitSystem
--from sos.collector.exceptions import (InvalidPasswordException,
--                                      TimeoutPasswordAuthException,
--                                      PasswordRequestException,
--                                      AuthPermissionDeniedException,
-+from sos.collector.transports.control_persist import SSHControlPersist
-+from sos.collector.transports.local import LocalTransport
-+from sos.collector.exceptions import (CommandTimeoutException,
-                                       ConnectionException,
--                                      CommandTimeoutException,
--                                      ConnectionTimeoutException,
--                                      ControlSocketMissingException,
-                                       UnsupportedHostException)
- 
- 
-@@ -61,34 +61,25 @@ class SosNode():
-             'sos_cmd': commons['sos_cmd']
-         }
-         self.sos_bin = 'sosreport'
--        filt = ['localhost', '127.0.0.1']
-         self.soslog = logging.getLogger('sos')
-         self.ui_log = logging.getLogger('sos_ui')
--        self.control_path = ("%s/.sos-collector-%s"
--                             % (self.tmpdir, self.address))
--        self.ssh_cmd = self._create_ssh_command()
--        if self.address not in filt:
--            try:
--                self.connected = self._create_ssh_session()
--            except Exception as err:
--                self.log_error('Unable to open SSH session: %s' % err)
--                raise
--        else:
--            self.connected = True
--            self.local = True
--            self.need_sudo = os.getuid() != 0
-+        self._transport = self._load_remote_transport(commons)
-+        try:
-+            self._transport.connect(self._password)
-+        except Exception as err:
-+            self.log_error('Unable to open remote session: %s' % err)
-+            raise
-         # load the host policy now, even if we don't want to load further
-         # host information. This is necessary if we're running locally on the
-         # cluster master but do not want a local report as we still need to do
-         # package checks in that instance
-         self.host = self.determine_host_policy()
--        self.get_hostname()
-+        self.hostname = self._transport.hostname
-         if self.local and self.opts.no_local:
-             load_facts = False
-         if self.connected and load_facts:
-             if not self.host:
--                self.connected = False
--                self.close_ssh_session()
-+                self._transport.disconnect()
-                 return None
-             if self.local:
-                 if self.check_in_container():
-@@ -103,11 +88,26 @@ class SosNode():
-                 self.create_sos_container()
-             self._load_sos_info()
- 
--    def _create_ssh_command(self):
--        """Build the complete ssh command for this node"""
--        cmd = "ssh -oControlPath=%s " % self.control_path
--        cmd += "%s@%s " % (self.opts.ssh_user, self.address)
--        return cmd
-+    @property
-+    def connected(self):
-+        if self._transport:
-+            return self._transport.connected
-+        # if no transport, we're running locally
-+        return True
-+
-+    def disconnect(self):
-+        """Wrapper to close the remote session via our transport agent
-+        """
-+        self._transport.disconnect()
-+
-+    def _load_remote_transport(self, commons):
-+        """Determine the type of remote transport to load for this node, then
-+        return an instantiated instance of that transport
-+        """
-+        if self.address in ['localhost', '127.0.0.1']:
-+            self.local = True
-+            return LocalTransport(self.address, commons)
-+        return SSHControlPersist(self.address, commons)
- 
-     def _fmt_msg(self, msg):
-         return '{:<{}} : {}'.format(self._hostname, self.hostlen + 1, msg)
-@@ -135,6 +135,7 @@ class SosNode():
-         self.manifest.add_field('policy', self.host.distro)
-         self.manifest.add_field('sos_version', self.sos_info['version'])
-         self.manifest.add_field('final_sos_command', '')
-+        self.manifest.add_field('transport', self._transport.name)
- 
-     def check_in_container(self):
-         """
-@@ -160,13 +161,13 @@ class SosNode():
-             res = self.run_command(cmd, need_root=True)
-             if res['status'] in [0, 125]:
-                 if res['status'] == 125:
--                    if 'unable to retrieve auth token' in res['stdout']:
-+                    if 'unable to retrieve auth token' in res['output']:
-                         self.log_error(
-                             "Could not pull image. Provide either a username "
-                             "and password or authfile"
-                         )
-                         raise Exception
--                    elif 'unknown: Not found' in res['stdout']:
-+                    elif 'unknown: Not found' in res['output']:
-                         self.log_error('Specified image not found on registry')
-                         raise Exception
-                     # 'name exists' with code 125 means the container was
-@@ -181,11 +182,11 @@ class SosNode():
-                     return True
-                 else:
-                     self.log_error("Could not start container after create: %s"
--                                   % ret['stdout'])
-+                                   % ret['output'])
-                     raise Exception
-             else:
-                 self.log_error("Could not create container on host: %s"
--                               % res['stdout'])
-+                               % res['output'])
-                 raise Exception
- 
-     def get_container_auth(self):
-@@ -204,18 +205,11 @@ class SosNode():
- 
-     def file_exists(self, fname, need_root=False):
-         """Checks for the presence of fname on the remote node"""
--        if not self.local:
--            try:
--                res = self.run_command("stat %s" % fname, need_root=need_root)
--                return res['status'] == 0
--            except Exception:
--                return False
--        else:
--            try:
--                os.stat(fname)
--                return True
--            except Exception:
--                return False
-+        try:
-+            res = self.run_command("stat %s" % fname, need_root=need_root)
-+            return res['status'] == 0
-+        except Exception:
-+            return False
- 
-     @property
-     def _hostname(self):
-@@ -223,18 +217,6 @@ class SosNode():
-             return self.hostname
-         return self.address
- 
--    @property
--    def control_socket_exists(self):
--        """Check if the SSH control socket exists
--
--        The control socket is automatically removed by the SSH daemon in the
--        event that the last connection to the node was greater than the timeout
--        set by the ControlPersist option. This can happen for us if we are
--        collecting from a large number of nodes, and the timeout expires before
--        we start collection.
--        """
--        return os.path.exists(self.control_path)
--
-     def _sanitize_log_msg(self, msg):
-         """Attempts to obfuscate sensitive information in log messages such as
-         passwords"""
-@@ -264,12 +246,6 @@ class SosNode():
-         msg = '[%s:%s] %s' % (self._hostname, caller, msg)
-         self.soslog.debug(msg)
- 
--    def get_hostname(self):
--        """Get the node's hostname"""
--        sout = self.run_command('hostname')
--        self.hostname = sout['stdout'].strip()
--        self.log_info('Hostname set to %s' % self.hostname)
--
-     def _format_cmd(self, cmd):
-         """If we need to provide a sudo or root password to a command, then
-         here we prefix the command with the correct bits
-@@ -280,19 +256,6 @@ class SosNode():
-             return "sudo -S %s" % cmd
-         return cmd
- 
--    def _fmt_output(self, output=None, rc=0):
--        """Formats the returned output from a command into a dict"""
--        if rc == 0:
--            stdout = output
--            stderr = ''
--        else:
--            stdout = ''
--            stderr = output
--        res = {'status': rc,
--               'stdout': stdout,
--               'stderr': stderr}
--        return res
--
-     def _load_sos_info(self):
-         """Queries the node for information about the installed version of sos
-         """
-@@ -306,7 +269,7 @@ class SosNode():
-             pkgs = self.run_command(self.host.container_version_command,
-                                     use_container=True, need_root=True)
-             if pkgs['status'] == 0:
--                ver = pkgs['stdout'].strip().split('-')[1]
-+                ver = pkgs['output'].strip().split('-')[1]
-                 if ver:
-                     self.sos_info['version'] = ver
-             else:
-@@ -321,18 +284,21 @@ class SosNode():
-                 self.log_error('sos is not installed on this node')
-             self.connected = False
-             return False
--        cmd = 'sosreport -l'
-+        # sos-4.0 changes the binary
-+        if self.check_sos_version('4.0'):
-+            self.sos_bin = 'sos report'
-+        cmd = "%s -l" % self.sos_bin
-         sosinfo = self.run_command(cmd, use_container=True, need_root=True)
-         if sosinfo['status'] == 0:
--            self._load_sos_plugins(sosinfo['stdout'])
-+            self._load_sos_plugins(sosinfo['output'])
-         if self.check_sos_version('3.6'):
-             self._load_sos_presets()
- 
-     def _load_sos_presets(self):
--        cmd = 'sosreport --list-presets'
-+        cmd = '%s --list-presets' % self.sos_bin
-         res = self.run_command(cmd, use_container=True, need_root=True)
-         if res['status'] == 0:
--            for line in res['stdout'].splitlines():
-+            for line in res['output'].splitlines():
-                 if line.strip().startswith('name:'):
-                     pname = line.split('name:')[1].strip()
-                     self.sos_info['presets'].append(pname)
-@@ -372,21 +338,7 @@ class SosNode():
-         """Reads the specified file and returns the contents"""
-         try:
-             self.log_info("Reading file %s" % to_read)
--            if not self.local:
--                res = self.run_command("cat %s" % to_read, timeout=5)
--                if res['status'] == 0:
--                    return res['stdout']
--                else:
--                    if 'No such file' in res['stdout']:
--                        self.log_debug("File %s does not exist on node"
--                                       % to_read)
--                    else:
--                        self.log_error("Error reading %s: %s" %
--                                       (to_read, res['stdout'].split(':')[1:]))
--                    return ''
--            else:
--                with open(to_read, 'r') as rfile:
--                    return rfile.read()
-+            return self._transport.read_file(to_read)
-         except Exception as err:
-             self.log_error("Exception while reading %s: %s" % (to_read, err))
-             return ''
-@@ -400,7 +352,8 @@ class SosNode():
-                           % self.commons['policy'].distro)
-             return self.commons['policy']
-         host = load(cache={}, sysroot=self.opts.sysroot, init=InitSystem(),
--                    probe_runtime=True, remote_exec=self.ssh_cmd,
-+                    probe_runtime=True,
-+                    remote_exec=self._transport.remote_exec,
-                     remote_check=self.read_file('/etc/os-release'))
-         if host:
-             self.log_info("loaded policy %s for host" % host.distro)
-@@ -422,7 +375,7 @@ class SosNode():
-         return self.host.package_manager.pkg_by_name(pkg) is not None
- 
-     def run_command(self, cmd, timeout=180, get_pty=False, need_root=False,
--                    force_local=False, use_container=False, env=None):
-+                    use_container=False, env=None):
-         """Runs a given cmd, either via the SSH session or locally
- 
-         Arguments:
-@@ -433,58 +386,37 @@ class SosNode():
-             need_root - if a command requires root privileges, setting this to
-                         True tells sos-collector to format the command with
-                         sudo or su - as appropriate and to input the password
--            force_local - force a command to run locally. Mainly used for scp.
-             use_container - Run this command in a container *IF* the host is
-                             containerized
-         """
--        if not self.control_socket_exists and not self.local:
--            self.log_debug('Control socket does not exist, attempting to '
--                           're-create')
-+        if not self.connected and not self.local:
-+            self.log_debug('Node is disconnected, attempting to reconnect')
-             try:
--                _sock = self._create_ssh_session()
--                if not _sock:
--                    self.log_debug('Failed to re-create control socket')
--                    raise ControlSocketMissingException
-+                reconnected = self._transport.reconnect(self._password)
-+                if not reconnected:
-+                    self.log_debug('Failed to reconnect to node')
-+                    raise ConnectionException
-             except Exception as err:
--                self.log_error('Cannot run command: control socket does not '
--                               'exist')
--                self.log_debug("Error while trying to create new SSH control "
--                               "socket: %s" % err)
-+                self.log_debug("Error while trying to reconnect: %s" % err)
-                 raise
-         if use_container and self.host.containerized:
-             cmd = self.host.format_container_command(cmd)
-         if need_root:
--            get_pty = True
-             cmd = self._format_cmd(cmd)
--        self.log_debug('Running command %s' % cmd)
-+
-         if 'atomic' in cmd:
-             get_pty = True
--        if not self.local and not force_local:
--            cmd = "%s %s" % (self.ssh_cmd, quote(cmd))
--        else:
--            if get_pty:
--                cmd = "/bin/bash -c %s" % quote(cmd)
-+
-+        if get_pty:
-+            cmd = "/bin/bash -c %s" % quote(cmd)
-+
-         if env:
-             _cmd_env = self.env_vars
-             env = _cmd_env.update(env)
--        res = pexpect.spawn(cmd, encoding='utf-8', env=env)
--        if need_root:
--            if self.need_sudo:
--                res.sendline(self.opts.sudo_pw)
--            if self.opts.become_root:
--                res.sendline(self.opts.root_password)
--        output = res.expect([pexpect.EOF, pexpect.TIMEOUT],
--                            timeout=timeout)
--        if output == 0:
--            out = res.before
--            res.close()
--            rc = res.exitstatus
--            return {'status': rc, 'stdout': out}
--        elif output == 1:
--            raise CommandTimeoutException(cmd)
-+        return self._transport.run_command(cmd, timeout, need_root, env)
- 
-     def sosreport(self):
--        """Run a sosreport on the node, then collect it"""
-+        """Run an sos report on the node, then collect it"""
-         try:
-             path = self.execute_sos_command()
-             if path:
-@@ -497,109 +429,6 @@ class SosNode():
-             pass
-         self.cleanup()
- 
--    def _create_ssh_session(self):
--        """
--        Using ControlPersist, create the initial connection to the node.
--
--        This will generate an OpenSSH ControlPersist socket within the tmp
--        directory created or specified for sos-collector to use.
--
--        At most, we will wait 30 seconds for a connection. This involves a 15
--        second wait for the initial connection attempt, and a subsequent 15
--        second wait for a response when we supply a password.
--
--        Since we connect to nodes in parallel (using the --threads value), this
--        means that the time between 'Connecting to nodes...' and 'Beginning
--        collection of sosreports' that users see can be up to an amount of time
--        equal to 30*(num_nodes/threads) seconds.
--
--        Returns
--            True if session is successfully opened, else raise Exception
--        """
--        # Don't use self.ssh_cmd here as we need to add a few additional
--        # parameters to establish the initial connection
--        self.log_info('Opening SSH session to create control socket')
--        connected = False
--        ssh_key = ''
--        ssh_port = ''
--        if self.opts.ssh_port != 22:
--            ssh_port = "-p%s " % self.opts.ssh_port
--        if self.opts.ssh_key:
--            ssh_key = "-i%s" % self.opts.ssh_key
--        cmd = ("ssh %s %s -oControlPersist=600 -oControlMaster=auto "
--               "-oStrictHostKeyChecking=no -oControlPath=%s %s@%s "
--               "\"echo Connected\"" % (ssh_key,
--                                       ssh_port,
--                                       self.control_path,
--                                       self.opts.ssh_user,
--                                       self.address))
--        res = pexpect.spawn(cmd, encoding='utf-8')
--
--        connect_expects = [
--            u'Connected',
--            u'password:',
--            u'.*Permission denied.*',
--            u'.* port .*: No route to host',
--            u'.*Could not resolve hostname.*',
--            pexpect.TIMEOUT
--        ]
--
--        index = res.expect(connect_expects, timeout=15)
--
--        if index == 0:
--            connected = True
--        elif index == 1:
--            if self._password:
--                pass_expects = [
--                    u'Connected',
--                    u'Permission denied, please try again.',
--                    pexpect.TIMEOUT
--                ]
--                res.sendline(self._password)
--                pass_index = res.expect(pass_expects, timeout=15)
--                if pass_index == 0:
--                    connected = True
--                elif pass_index == 1:
--                    # Note that we do not get an exitstatus here, so matching
--                    # this line means an invalid password will be reported for
--                    # both invalid passwords and invalid user names
--                    raise InvalidPasswordException
--                elif pass_index == 2:
--                    raise TimeoutPasswordAuthException
--            else:
--                raise PasswordRequestException
--        elif index == 2:
--            raise AuthPermissionDeniedException
--        elif index == 3:
--            raise ConnectionException(self.address, self.opts.ssh_port)
--        elif index == 4:
--            raise ConnectionException(self.address)
--        elif index == 5:
--            raise ConnectionTimeoutException
--        else:
--            raise Exception("Unknown error, client returned %s" % res.before)
--        if connected:
--            self.log_debug("Successfully created control socket at %s"
--                           % self.control_path)
--            return True
--        return False
--
--    def close_ssh_session(self):
--        """Remove the control socket to effectively terminate the session"""
--        if self.local:
--            return True
--        try:
--            res = self.run_command("rm -f %s" % self.control_path,
--                                   force_local=True)
--            if res['status'] == 0:
--                return True
--            self.log_error("Could not remove ControlPath %s: %s"
--                           % (self.control_path, res['stdout']))
--            return False
--        except Exception as e:
--            self.log_error('Error closing SSH session: %s' % e)
--            return False
--
-     def _preset_exists(self, preset):
-         """Verifies if the given preset exists on the node"""
-         return preset in self.sos_info['presets']
-@@ -646,8 +475,8 @@ class SosNode():
-         self.cluster = cluster
- 
-     def update_cmd_from_cluster(self):
--        """This is used to modify the sosreport command run on the nodes.
--        By default, sosreport is run without any options, using this will
-+        """This is used to modify the sos report command run on the nodes.
-+        By default, sos report is run without any options, using this will
-         allow the profile to specify what plugins to run or not and what
-         options to use.
- 
-@@ -727,10 +556,6 @@ class SosNode():
-             if self.opts.since:
-                 sos_opts.append('--since=%s' % quote(self.opts.since))
- 
--        # sos-4.0 changes the binary
--        if self.check_sos_version('4.0'):
--            self.sos_bin = 'sos report'
--
-         if self.check_sos_version('4.1'):
-             if self.opts.skip_commands:
-                 sos_opts.append(
-@@ -811,7 +636,7 @@ class SosNode():
-         self.manifest.add_field('final_sos_command', self.sos_cmd)
- 
-     def determine_sos_label(self):
--        """Determine what, if any, label should be added to the sosreport"""
-+        """Determine what, if any, label should be added to the sos report"""
-         label = ''
-         label += self.cluster.get_node_label(self)
- 
-@@ -822,7 +647,7 @@ class SosNode():
-         if not label:
-             return None
- 
--        self.log_debug('Label for sosreport set to %s' % label)
-+        self.log_debug('Label for sos report set to %s' % label)
-         if self.check_sos_version('3.6'):
-             lcmd = '--label'
-         else:
-@@ -844,20 +669,20 @@ class SosNode():
- 
-     def determine_sos_error(self, rc, stdout):
-         if rc == -1:
--            return 'sosreport process received SIGKILL on node'
-+            return 'sos report process received SIGKILL on node'
-         if rc == 1:
-             if 'sudo' in stdout:
-                 return 'sudo attempt failed'
-         if rc == 127:
--            return 'sosreport terminated unexpectedly. Check disk space'
-+            return 'sos report terminated unexpectedly. Check disk space'
-         if len(stdout) > 0:
-             return stdout.split('\n')[0:1]
-         else:
-             return 'sos exited with code %s' % rc
- 
-     def execute_sos_command(self):
--        """Run sosreport and capture the resulting file path"""
--        self.ui_msg('Generating sosreport...')
-+        """Run sos report and capture the resulting file path"""
-+        self.ui_msg('Generating sos report...')
-         try:
-             path = False
-             checksum = False
-@@ -867,7 +692,7 @@ class SosNode():
-                                    use_container=True,
-                                    env=self.sos_env_vars)
-             if res['status'] == 0:
--                for line in res['stdout'].splitlines():
-+                for line in res['output'].splitlines():
-                     if fnmatch.fnmatch(line, '*sosreport-*tar*'):
-                         path = line.strip()
-                     if line.startswith((" sha256\t", " md5\t")):
-@@ -884,44 +709,31 @@ class SosNode():
-                     else:
-                         self.manifest.add_field('checksum_type', 'unknown')
-             else:
--                err = self.determine_sos_error(res['status'], res['stdout'])
--                self.log_debug("Error running sosreport. rc = %s msg = %s"
--                               % (res['status'], res['stdout'] or
--                                  res['stderr']))
-+                err = self.determine_sos_error(res['status'], res['output'])
-+                self.log_debug("Error running sos report. rc = %s msg = %s"
-+                               % (res['status'], res['output']))
-                 raise Exception(err)
-             return path
-         except CommandTimeoutException:
-             self.log_error('Timeout exceeded')
-             raise
-         except Exception as e:
--            self.log_error('Error running sosreport: %s' % e)
-+            self.log_error('Error running sos report: %s' % e)
-             raise
- 
-     def retrieve_file(self, path):
-         """Copies the specified file from the host to our temp dir"""
-         destdir = self.tmpdir + '/'
--        dest = destdir + path.split('/')[-1]
-+        dest = os.path.join(destdir, path.split('/')[-1])
-         try:
--            if not self.local:
--                if self.file_exists(path):
--                    self.log_info("Copying remote %s to local %s" %
--                                  (path, destdir))
--                    cmd = "/usr/bin/scp -oControlPath=%s %s@%s:%s %s" % (
--                        self.control_path,
--                        self.opts.ssh_user,
--                        self.address,
--                        path,
--                        destdir
--                    )
--                    res = self.run_command(cmd, force_local=True)
--                    return res['status'] == 0
--                else:
--                    self.log_debug("Attempting to copy remote file %s, but it "
--                                   "does not exist on filesystem" % path)
--                    return False
-+            if self.file_exists(path):
-+                self.log_info("Copying remote %s to local %s" %
-+                              (path, destdir))
-+                self._transport.retrieve_file(path, dest)
-             else:
--                self.log_debug("Moving %s to %s" % (path, destdir))
--                shutil.copy(path, dest)
-+                self.log_debug("Attempting to copy remote file %s, but it "
-+                               "does not exist on filesystem" % path)
-+                return False
-             return True
-         except Exception as err:
-             self.log_debug("Failed to retrieve %s: %s" % (path, err))
-@@ -933,7 +745,7 @@ class SosNode():
-         """
-         path = ''.join(path.split())
-         try:
--            if len(path) <= 2:  # ensure we have a non '/' path
-+            if len(path.split('/')) <= 2:  # ensure we have a non '/' path
-                 self.log_debug("Refusing to remove path %s: appears to be "
-                                "incorrect and possibly dangerous" % path)
-                 return False
-@@ -959,14 +771,14 @@ class SosNode():
-                 except Exception:
-                     self.log_error('Failed to make archive readable')
-                     return False
--            self.soslog.info('Retrieving sosreport from %s' % self.address)
--            self.ui_msg('Retrieving sosreport...')
-+            self.soslog.info('Retrieving sos report from %s' % self.address)
-+            self.ui_msg('Retrieving sos report...')
-             ret = self.retrieve_file(self.sos_path)
-             if ret:
--                self.ui_msg('Successfully collected sosreport')
-+                self.ui_msg('Successfully collected sos report')
-                 self.file_list.append(self.sos_path.split('/')[-1])
-             else:
--                self.log_error('Failed to retrieve sosreport')
-+                self.log_error('Failed to retrieve sos report')
-                 raise SystemExit
-             return True
-         else:
-@@ -976,8 +788,8 @@ class SosNode():
-             else:
-                 e = [x.strip() for x in self.stdout.readlines() if x.strip][-1]
-             self.soslog.error(
--                'Failed to run sosreport on %s: %s' % (self.address, e))
--            self.log_error('Failed to run sosreport. %s' % e)
-+                'Failed to run sos report on %s: %s' % (self.address, e))
-+            self.log_error('Failed to run sos report. %s' % e)
-             return False
- 
-     def remove_sos_archive(self):
-@@ -986,20 +798,20 @@ class SosNode():
-         if self.sos_path is None:
-             return
-         if 'sosreport' not in self.sos_path:
--            self.log_debug("Node sosreport path %s looks incorrect. Not "
-+            self.log_debug("Node sos report path %s looks incorrect. Not "
-                            "attempting to remove path" % self.sos_path)
-             return
-         removed = self.remove_file(self.sos_path)
-         if not removed:
--            self.log_error('Failed to remove sosreport')
-+            self.log_error('Failed to remove sos report')
- 
-     def cleanup(self):
-         """Remove the sos archive from the node once we have it locally"""
-         self.remove_sos_archive()
-         if self.sos_path:
-             for ext in ['.sha256', '.md5']:
--                if os.path.isfile(self.sos_path + ext):
--                    self.remove_file(self.sos_path + ext)
-+                if self.remove_file(self.sos_path + ext):
-+                    break
-         cleanup = self.host.set_cleanup_cmd()
-         if cleanup:
-             self.run_command(cleanup, need_root=True)
-@@ -1040,3 +852,5 @@ class SosNode():
-             msg = "Exception while making %s readable. Return code was %s"
-             self.log_error(msg % (filepath, res['status']))
-             raise Exception
-+
-+# vim: set et ts=4 sw=4 :
-diff --git a/sos/collector/transports/__init__.py b/sos/collector/transports/__init__.py
-new file mode 100644
-index 00000000..5be7dc6d
---- /dev/null
-+++ b/sos/collector/transports/__init__.py
-@@ -0,0 +1,317 @@
-+# Copyright Red Hat 2021, Jake Hunsaker <jhunsake@redhat.com>
-+
-+# This file is part of the sos project: https://github.com/sosreport/sos
-+#
-+# This copyrighted material is made available to anyone wishing to use,
-+# modify, copy, or redistribute it subject to the terms and conditions of
-+# version 2 of the GNU General Public License.
-+#
-+# See the LICENSE file in the source distribution for further information.
-+
-+import inspect
-+import logging
-+import pexpect
-+import re
-+
-+from pipes import quote
-+from sos.collector.exceptions import (ConnectionException,
-+                                      CommandTimeoutException)
-+
-+
-+class RemoteTransport():
-+    """The base class used for defining supported remote transports to connect
-+    to remote nodes in conjunction with `sos collect`.
-+
-+    This abstraction is used to manage the backend connections to nodes so that
-+    SoSNode() objects can be leveraged generically to connect to nodes, inspect
-+    those nodes, and run commands on them.
-+    """
-+
-+    name = 'undefined'
-+
-+    def __init__(self, address, commons):
-+        self.address = address
-+        self.opts = commons['cmdlineopts']
-+        self.tmpdir = commons['tmpdir']
-+        self.need_sudo = commons['need_sudo']
-+        self._hostname = None
-+        self.soslog = logging.getLogger('sos')
-+        self.ui_log = logging.getLogger('sos_ui')
-+
-+    def _sanitize_log_msg(self, msg):
-+        """Attempts to obfuscate sensitive information in log messages such as
-+        passwords"""
-+        reg = r'(?P<var>(pass|key|secret|PASS|KEY|SECRET).*?=)(?P<value>.*?\s)'
-+        return re.sub(reg, r'\g<var>****** ', msg)
-+
-+    def log_info(self, msg):
-+        """Used to print and log info messages"""
-+        caller = inspect.stack()[1][3]
-+        lmsg = '[%s:%s] %s' % (self.hostname, caller, msg)
-+        self.soslog.info(lmsg)
-+
-+    def log_error(self, msg):
-+        """Used to print and log error messages"""
-+        caller = inspect.stack()[1][3]
-+        lmsg = '[%s:%s] %s' % (self.hostname, caller, msg)
-+        self.soslog.error(lmsg)
-+
-+    def log_debug(self, msg):
-+        """Used to print and log debug messages"""
-+        msg = self._sanitize_log_msg(msg)
-+        caller = inspect.stack()[1][3]
-+        msg = '[%s:%s] %s' % (self.hostname, caller, msg)
-+        self.soslog.debug(msg)
-+
-+    @property
-+    def hostname(self):
-+        if self._hostname and 'localhost' not in self._hostname:
-+            return self._hostname
-+        return self.address
-+
-+    @property
-+    def connected(self):
-+        """Is the transport __currently__ connected to the node, or otherwise
-+        capable of seamlessly running a command or similar on the node?
-+        """
-+        return False
-+
-+    @property
-+    def remote_exec(self):
-+        """This is the command string needed to leverage the remote transport
-+        when executing commands. For example, for an SSH transport this would
-+        be the `ssh <options>` string prepended to any command so that the
-+        command is executed by the ssh binary.
-+
-+        This is also referenced by the `remote_exec` parameter for policies
-+        when loading a policy for a remote node
-+        """
-+        return None
-+
-+    def connect(self, password):
-+        """Perform the connection steps in order to ensure that we are able to
-+        connect to the node for all future operations. Note that this should
-+        not provide an interactive shell at this time.
-+        """
-+        if self._connect(password):
-+            if not self._hostname:
-+                self._get_hostname()
-+            return True
-+        return False
-+
-+    def _connect(self, password):
-+        """Actually perform the connection requirements. Should be overridden
-+        by specific transports that subclass RemoteTransport
-+        """
-+        raise NotImplementedError("Transport %s does not define connect"
-+                                  % self.name)
-+
-+    def reconnect(self, password):
-+        """Attempts to reconnect to the node using the standard connect()
-+        but does not do so indefinitely. This imposes a strict number of retry
-+        attempts before failing out
-+        """
-+        attempts = 1
-+        last_err = 'unknown'
-+        while attempts < 5:
-+            self.log_debug("Attempting reconnect (#%s) to node" % attempts)
-+            try:
-+                if self.connect(password):
-+                    return True
-+            except Exception as err:
-+                self.log_debug("Attempt #%s exception: %s" % (attempts, err))
-+                last_err = err
-+            attempts += 1
-+        self.log_error("Unable to reconnect to node after 5 attempts, "
-+                       "aborting.")
-+        raise ConnectionException("last exception from transport: %s"
-+                                  % last_err)
-+
-+    def disconnect(self):
-+        """Perform whatever steps are necessary, if any, to terminate any
-+        connection to the node
-+        """
-+        try:
-+            if self._disconnect():
-+                self.log_debug("Successfully disconnected from node")
-+            else:
-+                self.log_error("Unable to successfully disconnect, see log for"
-+                               " more details")
-+        except Exception as err:
-+            self.log_error("Failed to disconnect: %s" % err)
-+
-+    def _disconnect(self):
-+        raise NotImplementedError("Transport %s does not define disconnect"
-+                                  % self.name)
-+
-+    def run_command(self, cmd, timeout=180, need_root=False, env=None):
-+        """Run a command on the node, returning its output and exit code.
-+        This should return the exit code of the command being executed, not the
-+        exit code of whatever mechanism the transport uses to execute that
-+        command
-+
-+        :param cmd:         The command to run
-+        :type cmd:          ``str``
-+
-+        :param timeout:     The maximum time in seconds to allow the cmd to run
-+        :type timeout:      ``int``
-+
-+        :param get_pty:     Does ``cmd`` require a pty?
-+        :type get_pty:      ``bool``
-+
-+        :param need_root:   Does ``cmd`` require root privileges?
-+        :type neeed_root:   ``bool``
-+
-+        :param env:         Specify env vars to be passed to the ``cmd``
-+        :type env:          ``dict``
-+
-+        :returns:           Output of ``cmd`` and the exit code
-+        :rtype:             ``dict`` with keys ``output`` and ``status``
-+        """
-+        self.log_debug('Running command %s' % cmd)
-+        # currently we only use/support the use of pexpect for handling the
-+        # execution of these commands, as opposed to directly invoking
-+        # subprocess.Popen() in conjunction with tools like sshpass.
-+        # If that changes in the future, we'll add decision making logic here
-+        # to route to the appropriate handler, but for now we just go straight
-+        # to using pexpect
-+        return self._run_command_with_pexpect(cmd, timeout, need_root, env)
-+
-+    def _format_cmd_for_exec(self, cmd):
-+        """Format the command in the way needed for the remote transport to
-+        successfully execute it as one would when manually executing it
-+
-+        :param cmd:     The command being executed, as formatted by SoSNode
-+        :type cmd:      ``str``
-+
-+
-+        :returns:       The command further formatted as needed by this
-+                        transport
-+        :rtype:         ``str``
-+        """
-+        cmd = "%s %s" % (self.remote_exec, quote(cmd))
-+        cmd = cmd.lstrip()
-+        return cmd
-+
-+    def _run_command_with_pexpect(self, cmd, timeout, need_root, env):
-+        """Execute the command using pexpect, which allows us to more easily
-+        handle prompts and timeouts compared to directly leveraging the
-+        subprocess.Popen() method.
-+
-+        :param cmd:     The command to execute. This will be automatically
-+                        formatted to use the transport.
-+        :type cmd:      ``str``
-+
-+        :param timeout: The maximum time in seconds to run ``cmd``
-+        :type timeout:  ``int``
-+
-+        :param need_root:   Does ``cmd`` need to run as root or with sudo?
-+        :type need_root:    ``bool``
-+
-+        :param env:     Any env vars that ``cmd`` should be run with
-+        :type env:      ``dict``
-+        """
-+        cmd = self._format_cmd_for_exec(cmd)
-+        result = pexpect.spawn(cmd, encoding='utf-8', env=env)
-+
-+        _expects = [pexpect.EOF, pexpect.TIMEOUT]
-+        if need_root and self.opts.ssh_user != 'root':
-+            _expects.extend([
-+                '\\[sudo\\] password for .*:',
-+                'Password:'
-+            ])
-+
-+        index = result.expect(_expects, timeout=timeout)
-+
-+        if index in [2, 3]:
-+            self._send_pexpect_password(index, result)
-+            index = result.expect(_expects, timeout=timeout)
-+
-+        if index == 0:
-+            out = result.before
-+            result.close()
-+            return {'status': result.exitstatus, 'output': out}
-+        elif index == 1:
-+            raise CommandTimeoutException(cmd)
-+
-+    def _send_pexpect_password(self, index, result):
-+        """Handle password prompts for sudo and su usage for non-root SSH users
-+
-+        :param index:       The index pexpect.spawn returned to match against
-+                            either a sudo or su prompt
-+        :type index:        ``int``
-+
-+        :param result:      The spawn running the command
-+        :type result:       ``pexpect.spawn``
-+        """
-+        if index == 2:
-+            if not self.opts.sudo_pw and not self.opt.nopasswd_sudo:
-+                msg = ("Unable to run command: sudo password "
-+                       "required but not provided")
-+                self.log_error(msg)
-+                raise Exception(msg)
-+            result.sendline(self.opts.sudo_pw)
-+        elif index == 3:
-+            if not self.opts.root_password:
-+                msg = ("Unable to run command as root: no root password given")
-+                self.log_error(msg)
-+                raise Exception(msg)
-+            result.sendline(self.opts.root_password)
-+
-+    def _get_hostname(self):
-+        """Determine the hostname of the node and set that for future reference
-+        and logging
-+
-+        :returns:   The hostname of the system, per the `hostname` command
-+        :rtype:     ``str``
-+        """
-+        _out = self.run_command('hostname')
-+        if _out['status'] == 0:
-+            self._hostname = _out['output'].strip()
-+        self.log_info("Hostname set to %s" % self._hostname)
-+        return self._hostname
-+
-+    def retrieve_file(self, fname, dest):
-+        """Copy a remote file, fname, to dest on the local node
-+
-+        :param fname:   The name of the file to retrieve
-+        :type fname:    ``str``
-+
-+        :param dest:    Where to save the file to locally
-+        :type dest:     ``str``
-+
-+        :returns:   True if file was successfully copied from remote, or False
-+        :rtype:     ``bool``
-+        """
-+        return self._retrieve_file(fname, dest)
-+
-+    def _retrieve_file(self, fname, dest):
-+        raise NotImplementedError("Transport %s does not support file copying"
-+                                  % self.name)
-+
-+    def read_file(self, fname):
-+        """Read the given file fname and return its contents
-+
-+        :param fname:   The name of the file to read
-+        :type fname:    ``str``
-+
-+        :returns:   The content of the file
-+        :rtype:     ``str``
-+        """
-+        self.log_debug("Reading file %s" % fname)
-+        return self._read_file(fname)
-+
-+    def _read_file(self, fname):
-+        res = self.run_command("cat %s" % fname, timeout=5)
-+        if res['status'] == 0:
-+            return res['output']
-+        else:
-+            if 'No such file' in res['output']:
-+                self.log_debug("File %s does not exist on node"
-+                               % fname)
-+            else:
-+                self.log_error("Error reading %s: %s" %
-+                               (fname, res['output'].split(':')[1:]))
-+            return ''
-+
-+# vim: set et ts=4 sw=4 :
-diff --git a/sos/collector/transports/control_persist.py b/sos/collector/transports/control_persist.py
-new file mode 100644
-index 00000000..3e848b41
---- /dev/null
-+++ b/sos/collector/transports/control_persist.py
-@@ -0,0 +1,199 @@
-+# Copyright Red Hat 2021, Jake Hunsaker <jhunsake@redhat.com>
-+
-+# This file is part of the sos project: https://github.com/sosreport/sos
-+#
-+# This copyrighted material is made available to anyone wishing to use,
-+# modify, copy, or redistribute it subject to the terms and conditions of
-+# version 2 of the GNU General Public License.
-+#
-+# See the LICENSE file in the source distribution for further information.
-+
-+
-+import os
-+import pexpect
-+import subprocess
-+
-+from sos.collector.transports import RemoteTransport
-+from sos.collector.exceptions import (InvalidPasswordException,
-+                                      TimeoutPasswordAuthException,
-+                                      PasswordRequestException,
-+                                      AuthPermissionDeniedException,
-+                                      ConnectionException,
-+                                      ConnectionTimeoutException,
-+                                      ControlSocketMissingException,
-+                                      ControlPersistUnsupportedException)
-+from sos.utilities import sos_get_command_output
-+
-+
-+class SSHControlPersist(RemoteTransport):
-+    """A transport for collect that leverages OpenSSH's Control Persist
-+    functionality which uses control sockets to transparently keep a connection
-+    open to the remote host without needing to rebuild the SSH connection for
-+    each and every command executed on the node
-+    """
-+
-+    name = 'control_persist'
-+
-+    def _check_for_control_persist(self):
-+        """Checks to see if the local system supported SSH ControlPersist.
-+
-+        ControlPersist allows OpenSSH to keep a single open connection to a
-+        remote host rather than building a new session each time. This is the
-+        same feature that Ansible uses in place of paramiko, which we have a
-+        need to drop in sos-collector.
-+
-+        This check relies on feedback from the ssh binary. The command being
-+        run should always generate stderr output, but depending on what that
-+        output reads we can determine if ControlPersist is supported or not.
-+
-+        For our purposes, a host that does not support ControlPersist is not
-+        able to run sos-collector.
-+
-+        Returns
-+            True if ControlPersist is supported, else raise Exception.
-+        """
-+        ssh_cmd = ['ssh', '-o', 'ControlPersist']
-+        cmd = subprocess.Popen(ssh_cmd, stdout=subprocess.PIPE,
-+                               stderr=subprocess.PIPE)
-+        out, err = cmd.communicate()
-+        err = err.decode('utf-8')
-+        if 'Bad configuration option' in err or 'Usage:' in err:
-+            raise ControlPersistUnsupportedException
-+        return True
-+
-+    def _connect(self, password=''):
-+        """
-+        Using ControlPersist, create the initial connection to the node.
-+
-+        This will generate an OpenSSH ControlPersist socket within the tmp
-+        directory created or specified for sos-collector to use.
-+
-+        At most, we will wait 30 seconds for a connection. This involves a 15
-+        second wait for the initial connection attempt, and a subsequent 15
-+        second wait for a response when we supply a password.
-+
-+        Since we connect to nodes in parallel (using the --threads value), this
-+        means that the time between 'Connecting to nodes...' and 'Beginning
-+        collection of sosreports' that users see can be up to an amount of time
-+        equal to 30*(num_nodes/threads) seconds.
-+
-+        Returns
-+            True if session is successfully opened, else raise Exception
-+        """
-+        try:
-+            self._check_for_control_persist()
-+        except ControlPersistUnsupportedException:
-+            self.log_error("OpenSSH ControlPersist is not locally supported. "
-+                           "Please update your OpenSSH installation.")
-+            raise
-+        self.log_info('Opening SSH session to create control socket')
-+        self.control_path = ("%s/.sos-collector-%s" % (self.tmpdir,
-+                                                       self.address))
-+        self.ssh_cmd = ''
-+        connected = False
-+        ssh_key = ''
-+        ssh_port = ''
-+        if self.opts.ssh_port != 22:
-+            ssh_port = "-p%s " % self.opts.ssh_port
-+        if self.opts.ssh_key:
-+            ssh_key = "-i%s" % self.opts.ssh_key
-+
-+        cmd = ("ssh %s %s -oControlPersist=600 -oControlMaster=auto "
-+               "-oStrictHostKeyChecking=no -oControlPath=%s %s@%s "
-+               "\"echo Connected\"" % (ssh_key,
-+                                       ssh_port,
-+                                       self.control_path,
-+                                       self.opts.ssh_user,
-+                                       self.address))
-+        res = pexpect.spawn(cmd, encoding='utf-8')
-+
-+        connect_expects = [
-+            u'Connected',
-+            u'password:',
-+            u'.*Permission denied.*',
-+            u'.* port .*: No route to host',
-+            u'.*Could not resolve hostname.*',
-+            pexpect.TIMEOUT
-+        ]
-+
-+        index = res.expect(connect_expects, timeout=15)
-+
-+        if index == 0:
-+            connected = True
-+        elif index == 1:
-+            if password:
-+                pass_expects = [
-+                    u'Connected',
-+                    u'Permission denied, please try again.',
-+                    pexpect.TIMEOUT
-+                ]
-+                res.sendline(password)
-+                pass_index = res.expect(pass_expects, timeout=15)
-+                if pass_index == 0:
-+                    connected = True
-+                elif pass_index == 1:
-+                    # Note that we do not get an exitstatus here, so matching
-+                    # this line means an invalid password will be reported for
-+                    # both invalid passwords and invalid user names
-+                    raise InvalidPasswordException
-+                elif pass_index == 2:
-+                    raise TimeoutPasswordAuthException
-+            else:
-+                raise PasswordRequestException
-+        elif index == 2:
-+            raise AuthPermissionDeniedException
-+        elif index == 3:
-+            raise ConnectionException(self.address, self.opts.ssh_port)
-+        elif index == 4:
-+            raise ConnectionException(self.address)
-+        elif index == 5:
-+            raise ConnectionTimeoutException
-+        else:
-+            raise Exception("Unknown error, client returned %s" % res.before)
-+        if connected:
-+            if not os.path.exists(self.control_path):
-+                raise ControlSocketMissingException
-+            self.log_debug("Successfully created control socket at %s"
-+                           % self.control_path)
-+            return True
-+        return False
-+
-+    def _disconnect(self):
-+        if os.path.exists(self.control_path):
-+            try:
-+                os.remove(self.control_path)
-+                return True
-+            except Exception as err:
-+                self.log_debug("Could not disconnect properly: %s" % err)
-+                return False
-+        self.log_debug("Control socket not present when attempting to "
-+                       "terminate session")
-+
-+    @property
-+    def connected(self):
-+        """Check if the SSH control socket exists
-+
-+        The control socket is automatically removed by the SSH daemon in the
-+        event that the last connection to the node was greater than the timeout
-+        set by the ControlPersist option. This can happen for us if we are
-+        collecting from a large number of nodes, and the timeout expires before
-+        we start collection.
-+        """
-+        return os.path.exists(self.control_path)
-+
-+    @property
-+    def remote_exec(self):
-+        if not self.ssh_cmd:
-+            self.ssh_cmd = "ssh -oControlPath=%s %s@%s" % (
-+                self.control_path, self.opts.ssh_user, self.address
-+            )
-+        return self.ssh_cmd
-+
-+    def _retrieve_file(self, fname, dest):
-+        cmd = "/usr/bin/scp -oControlPath=%s %s@%s:%s %s" % (
-+            self.control_path, self.opts.ssh_user, self.address, fname, dest
-+        )
-+        res = sos_get_command_output(cmd)
-+        return res['status'] == 0
-+
-+# vim: set et ts=4 sw=4 :
-diff --git a/sos/collector/transports/local.py b/sos/collector/transports/local.py
-new file mode 100644
-index 00000000..a4897f19
---- /dev/null
-+++ b/sos/collector/transports/local.py
-@@ -0,0 +1,49 @@
-+# Copyright Red Hat 2021, Jake Hunsaker <jhunsake@redhat.com>
-+
-+# This file is part of the sos project: https://github.com/sosreport/sos
-+#
-+# This copyrighted material is made available to anyone wishing to use,
-+# modify, copy, or redistribute it subject to the terms and conditions of
-+# version 2 of the GNU General Public License.
-+#
-+# See the LICENSE file in the source distribution for further information.
-+
-+import os
-+import shutil
-+
-+from sos.collector.transports import RemoteTransport
-+
-+
-+class LocalTransport(RemoteTransport):
-+    """A 'transport' to represent a local node. This allows us to more easily
-+    extend SoSNode() without having a ton of 'if local' or similar checks in
-+    more places than we actually need them
-+    """
-+
-+    name = 'local_node'
-+
-+    def _connect(self, password):
-+        return True
-+
-+    def _disconnect(self):
-+        return True
-+
-+    @property
-+    def connected(self):
-+        return True
-+
-+    def _retrieve_file(self, fname, dest):
-+        self.log_debug("Moving %s to %s" % (fname, dest))
-+        shutil.copy(fname, dest)
-+
-+    def _format_cmd_for_exec(self, cmd):
-+        return cmd
-+
-+    def _read_file(self, fname):
-+        if os.path.exists(fname):
-+            with open(fname, 'r') as rfile:
-+                return rfile.read()
-+        self.log_debug("No such file: %s" % fname)
-+        return ''
-+
-+# vim: set et ts=4 sw=4 :
--- 
-2.31.1
-
-From 07d96d52ef69b9f8fe1ef32a1b88089d31c33fe8 Mon Sep 17 00:00:00 2001
-From: Jake Hunsaker <jhunsake@redhat.com>
-Date: Mon, 27 Sep 2021 12:28:27 -0400
-Subject: [PATCH 2/2] [plugins] Update plugins to use new os.path.join wrapper
-
-Updates plugins to use the new `self.path_join()` wrapper for
-`os.path.join()` so that these plugins now account for non-/ sysroots
-for their collections.
-
-Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
----
- sos/report/plugins/__init__.py            |  2 +-
- sos/report/plugins/azure.py               |  4 +--
- sos/report/plugins/collectd.py            |  2 +-
- sos/report/plugins/container_log.py       |  2 +-
- sos/report/plugins/corosync.py            |  2 +-
- sos/report/plugins/docker_distribution.py |  5 ++--
- sos/report/plugins/ds.py                  |  3 +--
- sos/report/plugins/elastic.py             |  4 ++-
- sos/report/plugins/etcd.py                |  2 +-
- sos/report/plugins/gluster.py             |  3 ++-
- sos/report/plugins/jars.py                |  2 +-
- sos/report/plugins/kdump.py               |  4 +--
- sos/report/plugins/libvirt.py             |  2 +-
- sos/report/plugins/logs.py                |  8 +++---
- sos/report/plugins/manageiq.py            | 12 ++++-----
- sos/report/plugins/numa.py                |  9 +++----
- sos/report/plugins/openstack_instack.py   |  2 +-
- sos/report/plugins/openstack_nova.py      |  2 +-
- sos/report/plugins/openvswitch.py         | 13 ++++-----
- sos/report/plugins/origin.py              | 28 +++++++++++---------
- sos/report/plugins/ovirt.py               |  2 +-
- sos/report/plugins/ovirt_engine_backup.py |  5 ++--
- sos/report/plugins/ovn_central.py         | 26 +++++++++---------
- sos/report/plugins/ovn_host.py            |  4 +--
- sos/report/plugins/pacemaker.py           |  4 +--
- sos/report/plugins/pcp.py                 | 32 +++++++++++------------
- sos/report/plugins/postfix.py             |  2 +-
- sos/report/plugins/postgresql.py          |  2 +-
- sos/report/plugins/powerpc.py             |  2 +-
- sos/report/plugins/processor.py           |  3 +--
- sos/report/plugins/python.py              |  4 +--
- sos/report/plugins/sar.py                 |  5 ++--
- sos/report/plugins/sos_extras.py          |  2 +-
- sos/report/plugins/ssh.py                 |  7 +++--
- sos/report/plugins/unpackaged.py          |  4 +--
- sos/report/plugins/watchdog.py            | 13 +++++----
- sos/report/plugins/yum.py                 |  2 +-
- 37 files changed, 115 insertions(+), 115 deletions(-)
-
-diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py
-index 1f84bca4..ec138f83 100644
---- a/sos/report/plugins/__init__.py
-+++ b/sos/report/plugins/__init__.py
-@@ -2897,7 +2897,7 @@ class Plugin():
-         try:
-             cmd_line_paths = glob.glob(cmd_line_glob)
-             for path in cmd_line_paths:
--                f = open(path, 'r')
-+                f = open(self.path_join(path), 'r')
-                 cmd_line = f.read().strip()
-                 if process in cmd_line:
-                     status = True
-diff --git a/sos/report/plugins/azure.py b/sos/report/plugins/azure.py
-index 45971a61..90999b3f 100644
---- a/sos/report/plugins/azure.py
-+++ b/sos/report/plugins/azure.py
-@@ -8,8 +8,8 @@
- #
- # See the LICENSE file in the source distribution for further information.
- 
--import os
- from sos.report.plugins import Plugin, UbuntuPlugin, RedHatPlugin
-+import os
- 
- 
- class Azure(Plugin, UbuntuPlugin):
-@@ -38,7 +38,7 @@ class Azure(Plugin, UbuntuPlugin):
- 
-         for path, subdirs, files in os.walk("/var/log/azure"):
-             for name in files:
--                self.add_copy_spec(os.path.join(path, name), sizelimit=limit)
-+                self.add_copy_spec(self.path_join(path, name), sizelimit=limit)
- 
-         self.add_cmd_output((
-             'curl -s -H Metadata:true '
-diff --git a/sos/report/plugins/collectd.py b/sos/report/plugins/collectd.py
-index 80d4b00a..8584adf9 100644
---- a/sos/report/plugins/collectd.py
-+++ b/sos/report/plugins/collectd.py
-@@ -33,7 +33,7 @@ class Collectd(Plugin, IndependentPlugin):
- 
-         p = re.compile('^LoadPlugin.*')
-         try:
--            with open("/etc/collectd.conf") as f:
-+            with open(self.path_join("/etc/collectd.conf"), 'r') as f:
-                 for line in f:
-                     if p.match(line):
-                         self.add_alert("Active Plugin found: %s" %
-diff --git a/sos/report/plugins/container_log.py b/sos/report/plugins/container_log.py
-index 14e0b7d8..e8dedad2 100644
---- a/sos/report/plugins/container_log.py
-+++ b/sos/report/plugins/container_log.py
-@@ -29,6 +29,6 @@ class ContainerLog(Plugin, IndependentPlugin):
-         """Collect *.log files from subdirs of passed root path
-         """
-         for dirName, _, _ in os.walk(root):
--            self.add_copy_spec(os.path.join(dirName, '*.log'))
-+            self.add_copy_spec(self.path_join(dirName, '*.log'))
- 
- # vim: set et ts=4 sw=4 :
-diff --git a/sos/report/plugins/corosync.py b/sos/report/plugins/corosync.py
-index d74086e3..10e096c6 100644
---- a/sos/report/plugins/corosync.py
-+++ b/sos/report/plugins/corosync.py
-@@ -47,7 +47,7 @@ class Corosync(Plugin):
-         # (it isnt precise but sufficient)
-         pattern = r'^\s*(logging.)?logfile:\s*(\S+)$'
-         try:
--            with open("/etc/corosync/corosync.conf") as f:
-+            with open(self.path_join("/etc/corosync/corosync.conf"), 'r') as f:
-                 for line in f:
-                     if re.match(pattern, line):
-                         self.add_copy_spec(re.search(pattern, line).group(2))
-diff --git a/sos/report/plugins/docker_distribution.py b/sos/report/plugins/docker_distribution.py
-index 84222ff7..e760f252 100644
---- a/sos/report/plugins/docker_distribution.py
-+++ b/sos/report/plugins/docker_distribution.py
-@@ -19,8 +19,9 @@ class DockerDistribution(Plugin):
-     def setup(self):
-         self.add_copy_spec('/etc/docker-distribution/')
-         self.add_journal('docker-distribution')
--        if self.path_exists('/etc/docker-distribution/registry/config.yml'):
--            with open('/etc/docker-distribution/registry/config.yml') as f:
-+        conf = self.path_join('/etc/docker-distribution/registry/config.yml')
-+        if self.path_exists(conf):
-+            with open(conf) as f:
-                 for line in f:
-                     if 'rootdirectory' in line:
-                         loc = line.split()[1]
-diff --git a/sos/report/plugins/ds.py b/sos/report/plugins/ds.py
-index addf49e1..43feb21e 100644
---- a/sos/report/plugins/ds.py
-+++ b/sos/report/plugins/ds.py
-@@ -11,7 +11,6 @@
- # See the LICENSE file in the source distribution for further information.
- 
- from sos.report.plugins import Plugin, RedHatPlugin
--import os
- 
- 
- class DirectoryServer(Plugin, RedHatPlugin):
-@@ -47,7 +46,7 @@ class DirectoryServer(Plugin, RedHatPlugin):
-         try:
-             for d in self.listdir("/etc/dirsrv"):
-                 if d[0:5] == 'slapd':
--                    certpath = os.path.join("/etc/dirsrv", d)
-+                    certpath = self.path_join("/etc/dirsrv", d)
-                     self.add_cmd_output("certutil -L -d %s" % certpath)
-                     self.add_cmd_output("dsctl %s healthcheck" % d)
-         except OSError:
-diff --git a/sos/report/plugins/elastic.py b/sos/report/plugins/elastic.py
-index ad9a06ff..da2662bc 100644
---- a/sos/report/plugins/elastic.py
-+++ b/sos/report/plugins/elastic.py
-@@ -39,7 +39,9 @@ class Elastic(Plugin, IndependentPlugin):
-         return hostname, port
- 
-     def setup(self):
--        els_config_file = "/etc/elasticsearch/elasticsearch.yml"
-+        els_config_file = self.path_join(
-+            "/etc/elasticsearch/elasticsearch.yml"
-+        )
-         self.add_copy_spec(els_config_file)
- 
-         if self.get_option("all_logs"):
-diff --git a/sos/report/plugins/etcd.py b/sos/report/plugins/etcd.py
-index fd4f67eb..fe017e9f 100644
---- a/sos/report/plugins/etcd.py
-+++ b/sos/report/plugins/etcd.py
-@@ -62,7 +62,7 @@ class etcd(Plugin, RedHatPlugin):
- 
-     def get_etcd_url(self):
-         try:
--            with open('/etc/etcd/etcd.conf', 'r') as ef:
-+            with open(self.path_join('/etc/etcd/etcd.conf'), 'r') as ef:
-                 for line in ef:
-                     if line.startswith('ETCD_LISTEN_CLIENT_URLS'):
-                         return line.split('=')[1].replace('"', '').strip()
-diff --git a/sos/report/plugins/gluster.py b/sos/report/plugins/gluster.py
-index a44ffeb7..e518e3d3 100644
---- a/sos/report/plugins/gluster.py
-+++ b/sos/report/plugins/gluster.py
-@@ -35,9 +35,10 @@ class Gluster(Plugin, RedHatPlugin):
-         ]
-         for statedump_file in statedump_entries:
-             statedumps_present = statedumps_present+1
-+            _spath = self.path_join(name_dir, statedump_file)
-             ret = -1
-             while ret == -1:
--                with open(name_dir + '/' + statedump_file, 'r') as sfile:
-+                with open(_spath, 'r') as sfile:
-                     last_line = sfile.readlines()[-1]
-                     ret = string.count(last_line, 'DUMP_END_TIME')
- 
-diff --git a/sos/report/plugins/jars.py b/sos/report/plugins/jars.py
-index 0d3cf37e..4b98684e 100644
---- a/sos/report/plugins/jars.py
-+++ b/sos/report/plugins/jars.py
-@@ -63,7 +63,7 @@ class Jars(Plugin, RedHatPlugin):
-         for location in locations:
-             for dirpath, _, filenames in os.walk(location):
-                 for filename in filenames:
--                    path = os.path.join(dirpath, filename)
-+                    path = self.path_join(dirpath, filename)
-                     if Jars.is_jar(path):
-                         jar_paths.append(path)
- 
-diff --git a/sos/report/plugins/kdump.py b/sos/report/plugins/kdump.py
-index 757c2736..66565664 100644
---- a/sos/report/plugins/kdump.py
-+++ b/sos/report/plugins/kdump.py
-@@ -40,7 +40,7 @@ class RedHatKDump(KDump, RedHatPlugin):
-     packages = ('kexec-tools',)
- 
-     def fstab_parse_fs(self, device):
--        with open('/etc/fstab', 'r') as fp:
-+        with open(self.path_join('/etc/fstab'), 'r') as fp:
-             for line in fp:
-                 if line.startswith((device)):
-                     return line.split()[1].rstrip('/')
-@@ -50,7 +50,7 @@ class RedHatKDump(KDump, RedHatPlugin):
-         fs = ""
-         path = "/var/crash"
- 
--        with open('/etc/kdump.conf', 'r') as fp:
-+        with open(self.path_join('/etc/kdump.conf'), 'r') as fp:
-             for line in fp:
-                 if line.startswith("path"):
-                     path = line.split()[1]
-diff --git a/sos/report/plugins/libvirt.py b/sos/report/plugins/libvirt.py
-index be8120ff..5caa5802 100644
---- a/sos/report/plugins/libvirt.py
-+++ b/sos/report/plugins/libvirt.py
-@@ -55,7 +55,7 @@ class Libvirt(Plugin, IndependentPlugin):
-         else:
-             self.add_copy_spec("/var/log/libvirt")
- 
--        if self.path_exists(self.join_sysroot(libvirt_keytab)):
-+        if self.path_exists(self.path_join(libvirt_keytab)):
-             self.add_cmd_output("klist -ket %s" % libvirt_keytab)
- 
-         self.add_cmd_output("ls -lR /var/lib/libvirt/qemu")
-diff --git a/sos/report/plugins/logs.py b/sos/report/plugins/logs.py
-index ee6bb98d..606e574a 100644
---- a/sos/report/plugins/logs.py
-+++ b/sos/report/plugins/logs.py
-@@ -24,15 +24,15 @@ class Logs(Plugin, IndependentPlugin):
-         since = self.get_option("since")
- 
-         if self.path_exists('/etc/rsyslog.conf'):
--            with open('/etc/rsyslog.conf', 'r') as conf:
-+            with open(self.path_join('/etc/rsyslog.conf'), 'r') as conf:
-                 for line in conf.readlines():
-                     if line.startswith('$IncludeConfig'):
-                         confs += glob.glob(line.split()[1])
- 
-         for conf in confs:
--            if not self.path_exists(conf):
-+            if not self.path_exists(self.path_join(conf)):
-                 continue
--            config = self.join_sysroot(conf)
-+            config = self.path_join(conf)
-             logs += self.do_regex_find_all(r"^\S+\s+(-?\/.*$)\s+", config)
- 
-         for i in logs:
-@@ -60,7 +60,7 @@ class Logs(Plugin, IndependentPlugin):
-         # - there is some data present, either persistent or runtime only
-         # - systemd-journald service exists
-         # otherwise fallback to collecting few well known logfiles directly
--        journal = any([self.path_exists(p + "/log/journal/")
-+        journal = any([self.path_exists(self.path_join(p, "log/journal/"))
-                       for p in ["/var", "/run"]])
-         if journal and self.is_service("systemd-journald"):
-             self.add_journal(since=since, tags='journal_full', priority=100)
-diff --git a/sos/report/plugins/manageiq.py b/sos/report/plugins/manageiq.py
-index 27ad6ef4..e20c4a2a 100644
---- a/sos/report/plugins/manageiq.py
-+++ b/sos/report/plugins/manageiq.py
-@@ -58,7 +58,7 @@ class ManageIQ(Plugin, RedHatPlugin):
-     # Log files to collect from miq_dir/log/
-     miq_log_dir = os.path.join(miq_dir, "log")
- 
--    miq_main_log_files = [
-+    miq_main_logs = [
-         'ansible_tower.log',
-         'top_output.log',
-         'evm.log',
-@@ -81,16 +81,16 @@ class ManageIQ(Plugin, RedHatPlugin):
-         self.add_copy_spec(list(self.files))
- 
-         self.add_copy_spec([
--            os.path.join(self.miq_conf_dir, x) for x in self.miq_conf_files
-+            self.path_join(self.miq_conf_dir, x) for x in self.miq_conf_files
-         ])
- 
-         # Collect main log files without size limit.
-         self.add_copy_spec([
--            os.path.join(self.miq_log_dir, x) for x in self.miq_main_log_files
-+            self.path_join(self.miq_log_dir, x) for x in self.miq_main_logs
-         ], sizelimit=0)
- 
-         self.add_copy_spec([
--            os.path.join(self.miq_log_dir, x) for x in self.miq_log_files
-+            self.path_join(self.miq_log_dir, x) for x in self.miq_log_files
-         ])
- 
-         self.add_copy_spec([
-@@ -101,8 +101,8 @@ class ManageIQ(Plugin, RedHatPlugin):
-         if environ.get("APPLIANCE_PG_DATA"):
-             pg_dir = environ.get("APPLIANCE_PG_DATA")
-             self.add_copy_spec([
--                    os.path.join(pg_dir, 'pg_log'),
--                    os.path.join(pg_dir, 'postgresql.conf')
-+                    self.path_join(pg_dir, 'pg_log'),
-+                    self.path_join(pg_dir, 'postgresql.conf')
-             ])
- 
- # vim: set et ts=4 sw=4 :
-diff --git a/sos/report/plugins/numa.py b/sos/report/plugins/numa.py
-index 0faef8d2..9094baef 100644
---- a/sos/report/plugins/numa.py
-+++ b/sos/report/plugins/numa.py
-@@ -9,7 +9,6 @@
- # See the LICENSE file in the source distribution for further information.
- 
- from sos.report.plugins import Plugin, IndependentPlugin
--import os.path
- 
- 
- class Numa(Plugin, IndependentPlugin):
-@@ -42,10 +41,10 @@ class Numa(Plugin, IndependentPlugin):
-         ])
- 
-         self.add_copy_spec([
--            os.path.join(numa_path, "node*/meminfo"),
--            os.path.join(numa_path, "node*/cpulist"),
--            os.path.join(numa_path, "node*/distance"),
--            os.path.join(numa_path, "node*/hugepages/hugepages-*/*")
-+            self.path_join(numa_path, "node*/meminfo"),
-+            self.path_join(numa_path, "node*/cpulist"),
-+            self.path_join(numa_path, "node*/distance"),
-+            self.path_join(numa_path, "node*/hugepages/hugepages-*/*")
-         ])
- 
- # vim: set et ts=4 sw=4 :
-diff --git a/sos/report/plugins/openstack_instack.py b/sos/report/plugins/openstack_instack.py
-index 7c56c162..5b4f7d41 100644
---- a/sos/report/plugins/openstack_instack.py
-+++ b/sos/report/plugins/openstack_instack.py
-@@ -68,7 +68,7 @@ class OpenStackInstack(Plugin):
-                 p = uc_config.get(opt)
-                 if p:
-                     if not os.path.isabs(p):
--                        p = os.path.join('/home/stack', p)
-+                        p = self.path_join('/home/stack', p)
-                     self.add_copy_spec(p)
-         except Exception:
-             pass
-diff --git a/sos/report/plugins/openstack_nova.py b/sos/report/plugins/openstack_nova.py
-index 53210c48..f840081e 100644
---- a/sos/report/plugins/openstack_nova.py
-+++ b/sos/report/plugins/openstack_nova.py
-@@ -103,7 +103,7 @@ class OpenStackNova(Plugin):
-                 "nova-scheduler.log*"
-             ]
-             for novalog in novalogs:
--                self.add_copy_spec(os.path.join(novadir, novalog))
-+                self.add_copy_spec(self.path_join(novadir, novalog))
- 
-         self.add_copy_spec([
-             "/etc/nova/",
-diff --git a/sos/report/plugins/openvswitch.py b/sos/report/plugins/openvswitch.py
-index 003596c6..179d1532 100644
---- a/sos/report/plugins/openvswitch.py
-+++ b/sos/report/plugins/openvswitch.py
-@@ -10,7 +10,6 @@
- 
- from sos.report.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
- 
--from os.path import join as path_join
- from os import environ
- 
- import re
-@@ -65,7 +64,9 @@ class OpenVSwitch(Plugin):
-             log_dirs.append(environ.get('OVS_LOGDIR'))
- 
-         if not all_logs:
--            self.add_copy_spec([path_join(ld, '*.log') for ld in log_dirs])
-+            self.add_copy_spec([
-+                self.path_join(ld, '*.log') for ld in log_dirs
-+            ])
-         else:
-             self.add_copy_spec(log_dirs)
- 
-@@ -76,13 +77,13 @@ class OpenVSwitch(Plugin):
-         ])
- 
-         self.add_copy_spec([
--            path_join('/usr/local/etc/openvswitch', 'conf.db'),
--            path_join('/etc/openvswitch', 'conf.db'),
--            path_join('/var/lib/openvswitch', 'conf.db'),
-+            self.path_join('/usr/local/etc/openvswitch', 'conf.db'),
-+            self.path_join('/etc/openvswitch', 'conf.db'),
-+            self.path_join('/var/lib/openvswitch', 'conf.db'),
-         ])
-         ovs_dbdir = environ.get('OVS_DBDIR')
-         if ovs_dbdir:
--            self.add_copy_spec(path_join(ovs_dbdir, 'conf.db'))
-+            self.add_copy_spec(self.path_join(ovs_dbdir, 'conf.db'))
- 
-         self.add_cmd_output([
-             # The '-t 5' adds an upper bound on how long to wait to connect
-diff --git a/sos/report/plugins/origin.py b/sos/report/plugins/origin.py
-index f9cc32c1..7df9c019 100644
---- a/sos/report/plugins/origin.py
-+++ b/sos/report/plugins/origin.py
-@@ -69,20 +69,21 @@ class OpenShiftOrigin(Plugin):
- 
-     def is_static_etcd(self):
-         """Determine if we are on a node running etcd"""
--        return self.path_exists(os.path.join(self.static_pod_dir, "etcd.yaml"))
-+        return self.path_exists(self.path_join(self.static_pod_dir,
-+                                               "etcd.yaml"))
- 
-     def is_static_pod_compatible(self):
-         """Determine if a node is running static pods"""
-         return self.path_exists(self.static_pod_dir)
- 
-     def setup(self):
--        bstrap_node_cfg = os.path.join(self.node_base_dir,
--                                       "bootstrap-" + self.node_cfg_file)
--        bstrap_kubeconfig = os.path.join(self.node_base_dir,
--                                         "bootstrap.kubeconfig")
--        node_certs = os.path.join(self.node_base_dir, "certs", "*")
--        node_client_ca = os.path.join(self.node_base_dir, "client-ca.crt")
--        admin_cfg = os.path.join(self.master_base_dir, "admin.kubeconfig")
-+        bstrap_node_cfg = self.path_join(self.node_base_dir,
-+                                         "bootstrap-" + self.node_cfg_file)
-+        bstrap_kubeconfig = self.path_join(self.node_base_dir,
-+                                           "bootstrap.kubeconfig")
-+        node_certs = self.path_join(self.node_base_dir, "certs", "*")
-+        node_client_ca = self.path_join(self.node_base_dir, "client-ca.crt")
-+        admin_cfg = self.path_join(self.master_base_dir, "admin.kubeconfig")
-         oc_cmd_admin = "%s --config=%s" % ("oc", admin_cfg)
-         static_pod_logs_cmd = "master-logs"
- 
-@@ -92,11 +93,12 @@ class OpenShiftOrigin(Plugin):
-             self.add_copy_spec([
-                 self.master_cfg,
-                 self.master_env,
--                os.path.join(self.master_base_dir, "*.crt"),
-+                self.path_join(self.master_base_dir, "*.crt"),
-             ])
- 
-             if self.is_static_pod_compatible():
--                self.add_copy_spec(os.path.join(self.static_pod_dir, "*.yaml"))
-+                self.add_copy_spec(self.path_join(self.static_pod_dir,
-+                                                  "*.yaml"))
-                 self.add_cmd_output([
-                     "%s api api" % static_pod_logs_cmd,
-                     "%s controllers controllers" % static_pod_logs_cmd,
-@@ -177,9 +179,9 @@ class OpenShiftOrigin(Plugin):
-                 node_client_ca,
-                 bstrap_node_cfg,
-                 bstrap_kubeconfig,
--                os.path.join(self.node_base_dir, "*.crt"),
--                os.path.join(self.node_base_dir, "resolv.conf"),
--                os.path.join(self.node_base_dir, "node-dnsmasq.conf"),
-+                self.path_join(self.node_base_dir, "*.crt"),
-+                self.path_join(self.node_base_dir, "resolv.conf"),
-+                self.path_join(self.node_base_dir, "node-dnsmasq.conf"),
-             ])
- 
-             self.add_journal(units="atomic-openshift-node")
-diff --git a/sos/report/plugins/ovirt.py b/sos/report/plugins/ovirt.py
-index 1de606be..09647bf1 100644
---- a/sos/report/plugins/ovirt.py
-+++ b/sos/report/plugins/ovirt.py
-@@ -216,7 +216,7 @@ class Ovirt(Plugin, RedHatPlugin):
-             "isouploader.conf"
-         ]
-         for conf_file in passwd_files:
--            conf_path = os.path.join("/etc/ovirt-engine", conf_file)
-+            conf_path = self.path_join("/etc/ovirt-engine", conf_file)
-             self.do_file_sub(
-                 conf_path,
-                 r"passwd=(.*)",
-diff --git a/sos/report/plugins/ovirt_engine_backup.py b/sos/report/plugins/ovirt_engine_backup.py
-index 676e419e..7fb6a5c7 100644
---- a/sos/report/plugins/ovirt_engine_backup.py
-+++ b/sos/report/plugins/ovirt_engine_backup.py
-@@ -8,7 +8,6 @@
- #
- # See the LICENSE file in the source distribution for further information.
- 
--import os
- from sos.report.plugins import (Plugin, RedHatPlugin)
- from datetime import datetime
- 
-@@ -29,11 +28,11 @@ class oVirtEngineBackup(Plugin, RedHatPlugin):
- 
-     def setup(self):
-         now = datetime.now().strftime("%Y%m%d%H%M%S")
--        backup_filename = os.path.join(
-+        backup_filename = self.path_join(
-             self.get_option("backupdir"),
-             "engine-db-backup-%s.tar.gz" % (now)
-         )
--        log_filename = os.path.join(
-+        log_filename = self.path_join(
-             self.get_option("backupdir"),
-             "engine-db-backup-%s.log" % (now)
-         )
-diff --git a/sos/report/plugins/ovn_central.py b/sos/report/plugins/ovn_central.py
-index d6647aad..914eda60 100644
---- a/sos/report/plugins/ovn_central.py
-+++ b/sos/report/plugins/ovn_central.py
-@@ -42,7 +42,7 @@ class OVNCentral(Plugin):
-                 return
-         else:
-             try:
--                with open(filename, 'r') as f:
-+                with open(self.path_join(filename), 'r') as f:
-                     try:
-                         db = json.load(f)
-                     except Exception:
-@@ -71,13 +71,13 @@ class OVNCentral(Plugin):
-         ovs_rundir = os.environ.get('OVS_RUNDIR')
-         for pidfile in ['ovnnb_db.pid', 'ovnsb_db.pid', 'ovn-northd.pid']:
-             self.add_copy_spec([
--                os.path.join('/var/lib/openvswitch/ovn', pidfile),
--                os.path.join('/usr/local/var/run/openvswitch', pidfile),
--                os.path.join('/run/openvswitch/', pidfile),
-+                self.path_join('/var/lib/openvswitch/ovn', pidfile),
-+                self.path_join('/usr/local/var/run/openvswitch', pidfile),
-+                self.path_join('/run/openvswitch/', pidfile),
-             ])
- 
-             if ovs_rundir:
--                self.add_copy_spec(os.path.join(ovs_rundir, pidfile))
-+                self.add_copy_spec(self.path_join(ovs_rundir, pidfile))
- 
-         if self.get_option("all_logs"):
-             self.add_copy_spec("/var/log/ovn/")
-@@ -104,7 +104,7 @@ class OVNCentral(Plugin):
- 
-         schema_dir = '/usr/share/openvswitch'
- 
--        nb_tables = self.get_tables_from_schema(os.path.join(
-+        nb_tables = self.get_tables_from_schema(self.path_join(
-             schema_dir, 'ovn-nb.ovsschema'))
- 
-         self.add_database_output(nb_tables, nbctl_cmds, 'ovn-nbctl')
-@@ -116,7 +116,7 @@ class OVNCentral(Plugin):
-               format(self.ovn_sbdb_sock_path),
-               "output": "Leader: self"}
-         if self.test_predicate(self, pred=SoSPredicate(self, cmd_outputs=co)):
--            sb_tables = self.get_tables_from_schema(os.path.join(
-+            sb_tables = self.get_tables_from_schema(self.path_join(
-                 schema_dir, 'ovn-sb.ovsschema'), ['Logical_Flow'])
-             self.add_database_output(sb_tables, sbctl_cmds, 'ovn-sbctl')
-             cmds += sbctl_cmds
-@@ -134,14 +134,14 @@ class OVNCentral(Plugin):
-         ovs_dbdir = os.environ.get('OVS_DBDIR')
-         for dbfile in ['ovnnb_db.db', 'ovnsb_db.db']:
-             self.add_copy_spec([
--                os.path.join('/var/lib/openvswitch/ovn', dbfile),
--                os.path.join('/usr/local/etc/openvswitch', dbfile),
--                os.path.join('/etc/openvswitch', dbfile),
--                os.path.join('/var/lib/openvswitch', dbfile),
--                os.path.join('/var/lib/ovn/etc', dbfile),
-+                self.path_join('/var/lib/openvswitch/ovn', dbfile),
-+                self.path_join('/usr/local/etc/openvswitch', dbfile),
-+                self.path_join('/etc/openvswitch', dbfile),
-+                self.path_join('/var/lib/openvswitch', dbfile),
-+                self.path_join('/var/lib/ovn/etc', dbfile)
-             ])
-             if ovs_dbdir:
--                self.add_copy_spec(os.path.join(ovs_dbdir, dbfile))
-+                self.add_copy_spec(self.path_join(ovs_dbdir, dbfile))
- 
-         self.add_journal(units="ovn-northd")
- 
-diff --git a/sos/report/plugins/ovn_host.py b/sos/report/plugins/ovn_host.py
-index 3742c49f..78604a15 100644
---- a/sos/report/plugins/ovn_host.py
-+++ b/sos/report/plugins/ovn_host.py
-@@ -35,7 +35,7 @@ class OVNHost(Plugin):
-         else:
-             self.add_copy_spec("/var/log/ovn/*.log")
- 
--        self.add_copy_spec([os.path.join(pp, pidfile) for pp in pid_paths])
-+        self.add_copy_spec([self.path_join(pp, pidfile) for pp in pid_paths])
- 
-         self.add_copy_spec('/etc/sysconfig/ovn-controller')
- 
-@@ -49,7 +49,7 @@ class OVNHost(Plugin):
- 
-     def check_enabled(self):
-         return (any([self.path_isfile(
--            os.path.join(pp, pidfile)) for pp in pid_paths]) or
-+            self.path_join(pp, pidfile)) for pp in pid_paths]) or
-             super(OVNHost, self).check_enabled())
- 
- 
-diff --git a/sos/report/plugins/pacemaker.py b/sos/report/plugins/pacemaker.py
-index 497807ff..6ce80881 100644
---- a/sos/report/plugins/pacemaker.py
-+++ b/sos/report/plugins/pacemaker.py
-@@ -129,7 +129,7 @@ class Pacemaker(Plugin):
- 
- class DebianPacemaker(Pacemaker, DebianPlugin, UbuntuPlugin):
-     def setup(self):
--        self.envfile = "/etc/default/pacemaker"
-+        self.envfile = self.path_join("/etc/default/pacemaker")
-         self.setup_crm_shell()
-         self.setup_pcs()
-         super(DebianPacemaker, self).setup()
-@@ -141,7 +141,7 @@ class DebianPacemaker(Pacemaker, DebianPlugin, UbuntuPlugin):
- 
- class RedHatPacemaker(Pacemaker, RedHatPlugin):
-     def setup(self):
--        self.envfile = "/etc/sysconfig/pacemaker"
-+        self.envfile = self.path_join("/etc/sysconfig/pacemaker")
-         self.setup_pcs()
-         self.add_copy_spec("/etc/sysconfig/sbd")
-         super(RedHatPacemaker, self).setup()
-diff --git a/sos/report/plugins/pcp.py b/sos/report/plugins/pcp.py
-index 9707d7a9..ad902332 100644
---- a/sos/report/plugins/pcp.py
-+++ b/sos/report/plugins/pcp.py
-@@ -41,7 +41,7 @@ class Pcp(Plugin, RedHatPlugin, DebianPlugin):
-         total_size = 0
-         for dirpath, dirnames, filenames in os.walk(path):
-             for f in filenames:
--                fp = os.path.join(dirpath, f)
-+                fp = self.path_join(dirpath, f)
-                 total_size += os.path.getsize(fp)
-         return total_size
- 
-@@ -86,7 +86,7 @@ class Pcp(Plugin, RedHatPlugin, DebianPlugin):
-         # unconditionally. Obviously if someone messes up their /etc/pcp.conf
-         # in a ridiculous way (i.e. setting PCP_SYSCONF_DIR to '/') this will
-         # break badly.
--        var_conf_dir = os.path.join(self.pcp_var_dir, 'config')
-+        var_conf_dir = self.path_join(self.pcp_var_dir, 'config')
-         self.add_copy_spec([
-             self.pcp_sysconf_dir,
-             self.pcp_conffile,
-@@ -98,10 +98,10 @@ class Pcp(Plugin, RedHatPlugin, DebianPlugin):
-         # rpms. It does not make up for a lot of size but it contains many
-         # files
-         self.add_forbidden_path([
--            os.path.join(var_conf_dir, 'pmchart'),
--            os.path.join(var_conf_dir, 'pmlogconf'),
--            os.path.join(var_conf_dir, 'pmieconf'),
--            os.path.join(var_conf_dir, 'pmlogrewrite')
-+            self.path_join(var_conf_dir, 'pmchart'),
-+            self.path_join(var_conf_dir, 'pmlogconf'),
-+            self.path_join(var_conf_dir, 'pmieconf'),
-+            self.path_join(var_conf_dir, 'pmlogrewrite')
-         ])
- 
-         # Take PCP_LOG_DIR/pmlogger/`hostname` + PCP_LOG_DIR/pmmgr/`hostname`
-@@ -121,13 +121,13 @@ class Pcp(Plugin, RedHatPlugin, DebianPlugin):
-         # we would collect everything
-         if self.pcp_hostname != '':
-             # collect pmmgr logs up to 'pmmgrlogs' size limit
--            path = os.path.join(self.pcp_log_dir, 'pmmgr',
--                                self.pcp_hostname, '*')
-+            path = self.path_join(self.pcp_log_dir, 'pmmgr',
-+                                  self.pcp_hostname, '*')
-             self.add_copy_spec(path, sizelimit=self.sizelimit, tailit=False)
-             # collect newest pmlogger logs up to 'pmloggerfiles' count
-             files_collected = 0
--            path = os.path.join(self.pcp_log_dir, 'pmlogger',
--                                self.pcp_hostname, '*')
-+            path = self.path_join(self.pcp_log_dir, 'pmlogger',
-+                                  self.pcp_hostname, '*')
-             pmlogger_ls = self.exec_cmd("ls -t1 %s" % path)
-             if pmlogger_ls['status'] == 0:
-                 for line in pmlogger_ls['output'].splitlines():
-@@ -138,15 +138,15 @@ class Pcp(Plugin, RedHatPlugin, DebianPlugin):
- 
-         self.add_copy_spec([
-             # Collect PCP_LOG_DIR/pmcd and PCP_LOG_DIR/NOTICES
--            os.path.join(self.pcp_log_dir, 'pmcd'),
--            os.path.join(self.pcp_log_dir, 'NOTICES*'),
-+            self.path_join(self.pcp_log_dir, 'pmcd'),
-+            self.path_join(self.pcp_log_dir, 'NOTICES*'),
-             # Collect PCP_VAR_DIR/pmns
--            os.path.join(self.pcp_var_dir, 'pmns'),
-+            self.path_join(self.pcp_var_dir, 'pmns'),
-             # Also collect any other log and config files
-             # (as suggested by fche)
--            os.path.join(self.pcp_log_dir, '*/*.log*'),
--            os.path.join(self.pcp_log_dir, '*/*/*.log*'),
--            os.path.join(self.pcp_log_dir, '*/*/config*')
-+            self.path_join(self.pcp_log_dir, '*/*.log*'),
-+            self.path_join(self.pcp_log_dir, '*/*/*.log*'),
-+            self.path_join(self.pcp_log_dir, '*/*/config*')
-         ])
- 
-         # Collect a summary for the current day
-diff --git a/sos/report/plugins/postfix.py b/sos/report/plugins/postfix.py
-index 8f584430..3ca0c4ad 100644
---- a/sos/report/plugins/postfix.py
-+++ b/sos/report/plugins/postfix.py
-@@ -41,7 +41,7 @@ class Postfix(Plugin):
-         ]
-         fp = []
-         try:
--            with open('/etc/postfix/main.cf', 'r') as cffile:
-+            with open(self.path_join('/etc/postfix/main.cf'), 'r') as cffile:
-                 for line in cffile.readlines():
-                     # ignore comments and take the first word after '='
-                     if line.startswith('#'):
-diff --git a/sos/report/plugins/postgresql.py b/sos/report/plugins/postgresql.py
-index bec0b019..00824db7 100644
---- a/sos/report/plugins/postgresql.py
-+++ b/sos/report/plugins/postgresql.py
-@@ -124,7 +124,7 @@ class RedHatPostgreSQL(PostgreSQL, SCLPlugin):
- 
-             # copy PG_VERSION and postmaster.opts
-             for f in ["PG_VERSION", "postmaster.opts"]:
--                self.add_copy_spec(os.path.join(_dir, "data", f))
-+                self.add_copy_spec(self.path_join(_dir, "data", f))
- 
- 
- class DebianPostgreSQL(PostgreSQL, DebianPlugin, UbuntuPlugin):
-diff --git a/sos/report/plugins/powerpc.py b/sos/report/plugins/powerpc.py
-index 4fb4f87c..50f88650 100644
---- a/sos/report/plugins/powerpc.py
-+++ b/sos/report/plugins/powerpc.py
-@@ -22,7 +22,7 @@ class PowerPC(Plugin, IndependentPlugin):
- 
-     def setup(self):
-         try:
--            with open('/proc/cpuinfo', 'r') as fp:
-+            with open(self.path_join('/proc/cpuinfo'), 'r') as fp:
-                 contents = fp.read()
-                 ispSeries = "pSeries" in contents
-                 isPowerNV = "PowerNV" in contents
-diff --git a/sos/report/plugins/processor.py b/sos/report/plugins/processor.py
-index 2df2dc9a..c3d8930c 100644
---- a/sos/report/plugins/processor.py
-+++ b/sos/report/plugins/processor.py
-@@ -7,7 +7,6 @@
- # See the LICENSE file in the source distribution for further information.
- 
- from sos.report.plugins import Plugin, IndependentPlugin
--import os
- 
- 
- class Processor(Plugin, IndependentPlugin):
-@@ -41,7 +40,7 @@ class Processor(Plugin, IndependentPlugin):
-         # cumulative directory size exceeds 25MB or even 100MB.
-         cdirs = self.listdir('/sys/devices/system/cpu')
-         self.add_copy_spec([
--            os.path.join('/sys/devices/system/cpu', cdir) for cdir in cdirs
-+            self.path_join('/sys/devices/system/cpu', cdir) for cdir in cdirs
-         ])
- 
-         self.add_cmd_output([
-diff --git a/sos/report/plugins/python.py b/sos/report/plugins/python.py
-index e2ab39ab..a8ec0cd8 100644
---- a/sos/report/plugins/python.py
-+++ b/sos/report/plugins/python.py
-@@ -68,9 +68,9 @@ class RedHatPython(Python, RedHatPlugin):
-             ]
- 
-             for py_path in py_paths:
--                for root, _, files in os.walk(py_path):
-+                for root, _, files in os.walk(self.path_join(py_path)):
-                     for file_ in files:
--                        filepath = os.path.join(root, file_)
-+                        filepath = self.path_join(root, file_)
-                         if filepath.endswith('.py'):
-                             try:
-                                 with open(filepath, 'rb') as f:
-diff --git a/sos/report/plugins/sar.py b/sos/report/plugins/sar.py
-index 669f5d7b..b60005b1 100644
---- a/sos/report/plugins/sar.py
-+++ b/sos/report/plugins/sar.py
-@@ -7,7 +7,6 @@
- # See the LICENSE file in the source distribution for further information.
- 
- from sos.report.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
--import os
- import re
- 
- 
-@@ -24,7 +23,7 @@ class Sar(Plugin,):
-                     "", False)]
- 
-     def setup(self):
--        self.add_copy_spec(os.path.join(self.sa_path, '*'),
-+        self.add_copy_spec(self.path_join(self.sa_path, '*'),
-                            sizelimit=0 if self.get_option("all_sar") else None,
-                            tailit=False)
- 
-@@ -44,7 +43,7 @@ class Sar(Plugin,):
-         # as option for sadc
-         for fname in dir_list:
-             if sa_regex.match(fname):
--                sa_data_path = os.path.join(self.sa_path, fname)
-+                sa_data_path = self.path_join(self.sa_path, fname)
-                 sar_filename = 'sar' + fname[2:]
-                 if sar_filename not in dir_list:
-                     sar_cmd = 'sh -c "sar -A -f %s"' % sa_data_path
-diff --git a/sos/report/plugins/sos_extras.py b/sos/report/plugins/sos_extras.py
-index ffde4138..55bc4dc0 100644
---- a/sos/report/plugins/sos_extras.py
-+++ b/sos/report/plugins/sos_extras.py
-@@ -58,7 +58,7 @@ class SosExtras(Plugin, IndependentPlugin):
- 
-         for path, dirlist, filelist in os.walk(self.extras_dir):
-             for f in filelist:
--                _file = os.path.join(path, f)
-+                _file = self.path_join(path, f)
-                 self._log_warn("Collecting data from extras file %s" % _file)
-                 try:
-                     for line in open(_file).read().splitlines():
-diff --git a/sos/report/plugins/ssh.py b/sos/report/plugins/ssh.py
-index 971cda8b..9ac9dec0 100644
---- a/sos/report/plugins/ssh.py
-+++ b/sos/report/plugins/ssh.py
-@@ -42,7 +41,7 @@ class Ssh(Plugin, IndependentPlugin):
-         try:
-             for sshcfg in sshcfgs:
-                 tag = sshcfg.split('/')[-1]
--                with open(sshcfg, 'r') as cfgfile:
-+                with open(self.path_join(sshcfg), 'r') as cfgfile:
-                     for line in cfgfile:
-                         # skip empty lines and comments
-                         if len(line.split()) == 0 or line.startswith('#'):
-diff --git a/sos/report/plugins/unpackaged.py b/sos/report/plugins/unpackaged.py
-index 9205e53f..772b1d1f 100644
---- a/sos/report/plugins/unpackaged.py
-+++ b/sos/report/plugins/unpackaged.py
-@@ -40,7 +40,7 @@ class Unpackaged(Plugin, RedHatPlugin):
-                     for e in exclude:
-                         dirs[:] = [d for d in dirs if d not in e]
-                 for name in files:
--                    path = os.path.join(root, name)
-+                    path = self.path_join(root, name)
-                     try:
-                         if stat.S_ISLNK(os.lstat(path).st_mode):
-                             path = Path(path).resolve()
-@@ -49,7 +49,7 @@ class Unpackaged(Plugin, RedHatPlugin):
-                     file_list.append(os.path.realpath(path))
-                 for name in dirs:
-                     file_list.append(os.path.realpath(
--                                     os.path.join(root, name)))
-+                                     self.path_join(root, name)))
- 
-             return file_list
- 
-diff --git a/sos/report/plugins/watchdog.py b/sos/report/plugins/watchdog.py
-index 1bf3f4cb..bf2dc9cb 100644
---- a/sos/report/plugins/watchdog.py
-+++ b/sos/report/plugins/watchdog.py
-@@ -11,7 +11,6 @@
- from sos.report.plugins import Plugin, RedHatPlugin
- 
- from glob import glob
--import os
- 
- 
- class Watchdog(Plugin, RedHatPlugin):
-@@ -56,8 +55,8 @@ class Watchdog(Plugin, RedHatPlugin):
-             Collect configuration files, custom executables for test-binary
-             and repair-binary, and stdout/stderr logs.
-         """
--        conf_file = self.get_option('conf_file')
--        log_dir = '/var/log/watchdog'
-+        conf_file = self.path_join(self.get_option('conf_file'))
-+        log_dir = self.path_join('/var/log/watchdog')
- 
-         # Get service configuration and sysconfig files
-         self.add_copy_spec([
-@@ -80,15 +79,15 @@ class Watchdog(Plugin, RedHatPlugin):
-             self._log_warn("Could not read %s: %s" % (conf_file, ex))
- 
-         if self.get_option('all_logs'):
--            log_files = glob(os.path.join(log_dir, '*'))
-+            log_files = glob(self.path_join(log_dir, '*'))
-         else:
--            log_files = (glob(os.path.join(log_dir, '*.stdout')) +
--                         glob(os.path.join(log_dir, '*.stderr')))
-+            log_files = (glob(self.path_join(log_dir, '*.stdout')) +
-+                         glob(self.path_join(log_dir, '*.stderr')))
- 
-         self.add_copy_spec(log_files)
- 
-         # Get output of "wdctl <device>" for each /dev/watchdog*
--        for dev in glob('/dev/watchdog*'):
-+        for dev in glob(self.path_join('/dev/watchdog*')):
-             self.add_cmd_output("wdctl %s" % dev)
- 
- # vim: set et ts=4 sw=4 :
-diff --git a/sos/report/plugins/yum.py b/sos/report/plugins/yum.py
-index 148464cb..e5256642 100644
---- a/sos/report/plugins/yum.py
-+++ b/sos/report/plugins/yum.py
-@@ -61,7 +61,7 @@ class Yum(Plugin, RedHatPlugin):
-                 if not p.endswith(".py"):
-                     continue
-                 plugins = plugins + " " if len(plugins) else ""
--                plugins = plugins + os.path.join(YUM_PLUGIN_PATH, p)
-+                plugins = plugins + self.path_join(YUM_PLUGIN_PATH, p)
-             if len(plugins):
-                 self.add_cmd_output("rpm -qf %s" % plugins,
-                                     suggest_filename="plugin-packages")
--- 
-2.31.1
-
-From f4af5efdc79aefe1aa685c36d095925bae14dc4a Mon Sep 17 00:00:00 2001
-From: Jake Hunsaker <jhunsake@redhat.com>
-Date: Tue, 28 Sep 2021 13:00:17 -0400
-Subject: [PATCH 1/4] [collect] Add --transport option and allow clusters to
- set transport type
-
-Adds a new `--transport` option for users to be able to specify the type
-of transport to use when connecting to nodes. The default value of
-`auto` will defer to the cluster profile to set the transport type,
-which will continue to default to use OpenSSH's ControlPersist feature.
-
-Clusters may override the new `set_transport_type()` method to change
-the default transport used.
-
-If `--transport` is anything besides `auto`, then the cluster profile
-will not be deferred to when choosing a transport for each remote node.
-
-Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
----
- man/en/sos-collect.1               | 15 +++++++++++++++
- sos/collector/__init__.py          |  6 ++++++
- sos/collector/clusters/__init__.py | 10 ++++++++++
- sos/collector/exceptions.py        | 13 ++++++++++++-
- sos/collector/sosnode.py           | 16 +++++++++++++++-
- 5 files changed, 58 insertions(+), 2 deletions(-)
-
-diff --git a/man/en/sos-collect.1 b/man/en/sos-collect.1
-index e930023e..8ad4fe5e 100644
---- a/man/en/sos-collect.1
-+++ b/man/en/sos-collect.1
-@@ -43,6 +43,7 @@ sos collect \- Collect sosreports from multiple (cluster) nodes
-     [\-\-sos-cmd SOS_CMD]
-     [\-t|\-\-threads THREADS]
-     [\-\-timeout TIMEOUT]
-+    [\-\-transport TRANSPORT]
-     [\-\-tmp\-dir TMP_DIR]
-     [\-v|\-\-verbose]
-     [\-\-verify]
-@@ -350,6 +351,20 @@ Note that sosreports are collected in parallel, so you can approximate the total
- runtime of sos collect via timeout*(number of nodes/jobs).
- 
- Default is 180 seconds.
-+.TP
-+\fB\-\-transport\fR TRANSPORT
-+Specify the type of remote transport to use to manage connections to remote nodes.
-+
-+\fBsos collect\fR uses locally installed binaries to connect to and interact with remote
-+nodes, instead of directly establishing those connections. By default, OpenSSH's ControlPersist
-+feature is preferred, however certain cluster types may have preferences of their own for how
-+remote sessions should be established.
-+
-+The types of transports supported are currently as follows:
-+
-+    \fBauto\fR                  Allow the cluster type to determine the transport used
-+    \fBcontrol_persist\fR       Use OpenSSH's ControlPersist feature. This is the default behavior
-+
- .TP
- \fB\-\-tmp\-dir\fR TMP_DIR
- Specify a temporary directory to save sos archives to. By default one will be created in
-diff --git a/sos/collector/__init__.py b/sos/collector/__init__.py
-index da912655..fecfe6aa 100644
---- a/sos/collector/__init__.py
-+++ b/sos/collector/__init__.py
-@@ -98,6 +98,7 @@ class SoSCollector(SoSComponent):
-         'ssh_port': 22,
-         'ssh_user': 'root',
-         'timeout': 600,
-+        'transport': 'auto',
-         'verify': False,
-         'usernames': [],
-         'upload': False,
-@@ -378,6 +379,8 @@ class SoSCollector(SoSComponent):
-                                  help='Specify an SSH user. Default root')
-         collect_grp.add_argument('--timeout', type=int, required=False,
-                                  help='Timeout for sosreport on each node.')
-+        collect_grp.add_argument('--transport', default='auto', type=str,
-+                                 help='Remote connection transport to use')
-         collect_grp.add_argument("--upload", action="store_true",
-                                  default=False,
-                                  help="Upload archive to a policy-default "
-@@ -813,6 +813,8 @@ class SoSCollector(SoSComponent):
-         self.collect_md.add_field('cluster_type', self.cluster_type)
-         if self.cluster:
-             self.master.cluster = self.cluster
-+            if self.opts.transport == 'auto':
-+                self.opts.transport = self.cluster.set_transport_type()
-             self.cluster.setup()
-             if self.cluster.cluster_ssh_key:
-                 if not self.opts.ssh_key:
-@@ -1041,6 +1046,7 @@ class SoSCollector(SoSComponent):
-             else:
-                 client.disconnect()
-         except Exception:
-+            # all exception logging is handled within SoSNode
-             pass
- 
-     def intro(self):
-diff --git a/sos/collector/clusters/__init__.py b/sos/collector/clusters/__init__.py
-index 64ac2a44..cf1e7a0b 100644
---- a/sos/collector/clusters/__init__.py
-+++ b/sos/collector/clusters/__init__.py
-@@ -149,6 +149,16 @@ class Cluster():
-         """
-         pass
- 
-+    def set_transport_type(self):
-+        """The default connection type used by sos collect is to leverage the
-+        local system's SSH installation using ControlPersist, however certain
-+        cluster types may want to use something else.
-+
-+        Override this in a specific cluster profile to set the ``transport``
-+        option according to what type of transport should be used.
-+        """
-+        return 'control_persist'
-+
-     def set_master_options(self, node):
-         """If there is a need to set specific options in the sos command being
-         run on the cluster's master nodes, override this method in the cluster
-diff --git a/sos/collector/exceptions.py b/sos/collector/exceptions.py
-index 1e44768b..2bb07e7b 100644
---- a/sos/collector/exceptions.py
-+++ b/sos/collector/exceptions.py
-@@ -94,6 +94,16 @@ class UnsupportedHostException(Exception):
-         super(UnsupportedHostException, self).__init__(message)
- 
- 
-+class InvalidTransportException(Exception):
-+    """Raised when a transport is requested but it does not exist or is
-+    not supported locally"""
-+
-+    def __init__(self, transport=None):
-+        message = ("Connection failed: unknown or unsupported transport %s"
-+                   % transport if transport else '')
-+        super(InvalidTransportException, self).__init__(message)
-+
-+
- __all__ = [
-     'AuthPermissionDeniedException',
-     'CommandTimeoutException',
-@@ -104,5 +114,6 @@ __all__ = [
-     'InvalidPasswordException',
-     'PasswordRequestException',
-     'TimeoutPasswordAuthException',
--    'UnsupportedHostException'
-+    'UnsupportedHostException',
-+    'InvalidTransportException'
- ]
-diff --git a/sos/collector/sosnode.py b/sos/collector/sosnode.py
-index f79bd5ff..5c5c7201 100644
---- a/sos/collector/sosnode.py
-+++ b/sos/collector/sosnode.py
-@@ -22,7 +22,13 @@ from sos.collector.transports.control_persist import SSHControlPersist
- from sos.collector.transports.local import LocalTransport
- from sos.collector.exceptions import (CommandTimeoutException,
-                                       ConnectionException,
--                                      UnsupportedHostException)
-+                                      UnsupportedHostException,
-+                                      InvalidTransportException)
-+
-+TRANSPORTS = {
-+    'local': LocalTransport,
-+    'control_persist': SSHControlPersist,
-+}
- 
- 
- class SosNode():
-@@ -107,6 +113,14 @@ class SosNode():
-         if self.address in ['localhost', '127.0.0.1']:
-             self.local = True
-             return LocalTransport(self.address, commons)
-+        elif self.opts.transport in TRANSPORTS.keys():
-+            return TRANSPORTS[self.opts.transport](self.address, commons)
-+        elif self.opts.transport != 'auto':
-+            self.log_error(
-+                "Connection failed: unknown or unsupported transport %s"
-+                % self.opts.transport
-+            )
-+            raise InvalidTransportException(self.opts.transport)
-         return SSHControlPersist(self.address, commons)
- 
-     def _fmt_msg(self, msg):
--- 
-2.31.1
-
-
-From dbc49345384404600f45d68b8d3c6541b2a26480 Mon Sep 17 00:00:00 2001
-From: Jake Hunsaker <jhunsake@redhat.com>
-Date: Thu, 30 Sep 2021 10:38:18 -0400
-Subject: [PATCH 2/4] [transports] Add 'oc' as a transport option for remote
- nodes
-
-This commit adds a new transport for `sos collect` by leveraging a
-locally available `oc` binary that has been properly configured for
-access to an OCP cluster.
-
-This transport will allow users to use `sos collect` to collect reports
-from an OCP cluster without directly connecting to any of the nodes
-involved. We do this by using the `oc` binary to first launch a pod on
-target node(s) and then exec our discovery commands and eventual `sos
-report` command to that pod. This in turn is dependent on a function API
-for the `oc` binary to communicate with. In the event that `oc` is not
-__locally__ available or is not properly configured, we will fallback to
-the current default of using SSH ControlPersist to directly connect to
-the nodes. Otherwise, the OCP cluster will attempt to automatically use
-this new transport.
----
- man/en/sos-collect.1                 |   1 +
- sos/collector/clusters/ocp.py        |  14 ++
- sos/collector/sosnode.py             |   8 +-
- sos/collector/transports/__init__.py |  20 ++-
- sos/collector/transports/oc.py       | 220 +++++++++++++++++++++++++++
- 5 files changed, 257 insertions(+), 6 deletions(-)
- create mode 100644 sos/collector/transports/oc.py
-
-diff --git a/man/en/sos-collect.1 b/man/en/sos-collect.1
-index 8ad4fe5e..a1f6c10e 100644
---- a/man/en/sos-collect.1
-+++ b/man/en/sos-collect.1
-@@ -364,6 +364,7 @@ The types of transports supported are currently as follows:
- 
-     \fBauto\fR                  Allow the cluster type to determine the transport used
-     \fBcontrol_persist\fR       Use OpenSSH's ControlPersist feature. This is the default behavior
-+    \fBoc\fR                    Use a \fBlocally\fR configured \fBoc\fR binary to deploy collection pods on OCP nodes
- 
- .TP
- \fB\-\-tmp\-dir\fR TMP_DIR
-diff --git a/sos/collector/clusters/ocp.py b/sos/collector/clusters/ocp.py
-index ad97587f..a9357dbf 100644
---- a/sos/collector/clusters/ocp.py
-+++ b/sos/collector/clusters/ocp.py
-@@ -12,6 +12,7 @@ import os
- 
- from pipes import quote
- from sos.collector.clusters import Cluster
-+from sos.utilities import is_executable
- 
- 
- class ocp(Cluster):
-@@ -83,6 +84,19 @@ class ocp(Cluster):
-                     nodes[_node[0]][column] = _node[idx[column]]
-         return nodes
- 
-+    def set_transport_type(self):
-+        if is_executable('oc'):
-+            return 'oc'
-+        self.log_info("Local installation of 'oc' not found or is not "
-+                      "correctly configured. Will use ControlPersist")
-+        self.ui_log.warn(
-+            "Preferred transport 'oc' not available, will fallback to SSH."
-+        )
-+        if not self.opts.batch:
-+            input("Press ENTER to continue connecting with SSH, or Ctrl+C to"
-+                  "abort.")
-+        return 'control_persist'
-+
-     def get_nodes(self):
-         nodes = []
-         self.node_dict = {}
-diff --git a/sos/collector/sosnode.py b/sos/collector/sosnode.py
-index 5c5c7201..8a9dbd7a 100644
---- a/sos/collector/sosnode.py
-+++ b/sos/collector/sosnode.py
-@@ -20,6 +20,7 @@ from sos.policies import load
- from sos.policies.init_systems import InitSystem
- from sos.collector.transports.control_persist import SSHControlPersist
- from sos.collector.transports.local import LocalTransport
-+from sos.collector.transports.oc import OCTransport
- from sos.collector.exceptions import (CommandTimeoutException,
-                                       ConnectionException,
-                                       UnsupportedHostException,
-@@ -28,6 +29,7 @@ from sos.collector.exceptions import (CommandTimeoutException,
- TRANSPORTS = {
-     'local': LocalTransport,
-     'control_persist': SSHControlPersist,
-+    'oc': OCTransport
- }
- 
- 
-@@ -421,13 +423,11 @@ class SosNode():
-         if 'atomic' in cmd:
-             get_pty = True
- 
--        if get_pty:
--            cmd = "/bin/bash -c %s" % quote(cmd)
--
-         if env:
-             _cmd_env = self.env_vars
-             env = _cmd_env.update(env)
--        return self._transport.run_command(cmd, timeout, need_root, env)
-+        return self._transport.run_command(cmd, timeout, need_root, env,
-+                                           get_pty)
- 
-     def sosreport(self):
-         """Run an sos report on the node, then collect it"""
-diff --git a/sos/collector/transports/__init__.py b/sos/collector/transports/__init__.py
-index 5be7dc6d..7bffee62 100644
---- a/sos/collector/transports/__init__.py
-+++ b/sos/collector/transports/__init__.py
-@@ -144,7 +144,8 @@ class RemoteTransport():
-         raise NotImplementedError("Transport %s does not define disconnect"
-                                   % self.name)
- 
--    def run_command(self, cmd, timeout=180, need_root=False, env=None):
-+    def run_command(self, cmd, timeout=180, need_root=False, env=None,
-+                    get_pty=False):
-         """Run a command on the node, returning its output and exit code.
-         This should return the exit code of the command being executed, not the
-         exit code of whatever mechanism the transport uses to execute that
-@@ -165,10 +166,15 @@ class RemoteTransport():
-         :param env:         Specify env vars to be passed to the ``cmd``
-         :type env:          ``dict``
- 
-+        :param get_pty:     Does ``cmd`` require execution with a pty?
-+        :type get_pty:      ``bool``
-+
-         :returns:           Output of ``cmd`` and the exit code
-         :rtype:             ``dict`` with keys ``output`` and ``status``
-         """
-         self.log_debug('Running command %s' % cmd)
-+        if get_pty:
-+            cmd = "/bin/bash -c %s" % quote(cmd)
-         # currently we only use/support the use of pexpect for handling the
-         # execution of these commands, as opposed to directly invoking
-         # subprocess.Popen() in conjunction with tools like sshpass.
-@@ -212,6 +218,13 @@ class RemoteTransport():
-         :type env:      ``dict``
-         """
-         cmd = self._format_cmd_for_exec(cmd)
-+
-+        # if for any reason env is empty, set it to None as otherwise
-+        # pexpect interprets this to mean "run this command with no env vars of
-+        # any kind"
-+        if not env:
-+            env = None
-+
-         result = pexpect.spawn(cmd, encoding='utf-8', env=env)
- 
-         _expects = [pexpect.EOF, pexpect.TIMEOUT]
-@@ -268,6 +281,9 @@ class RemoteTransport():
-         _out = self.run_command('hostname')
-         if _out['status'] == 0:
-             self._hostname = _out['output'].strip()
-+
-+        if not self._hostname:
-+            self._hostname = self.address
-         self.log_info("Hostname set to %s" % self._hostname)
-         return self._hostname
- 
-@@ -302,7 +318,7 @@ class RemoteTransport():
-         return self._read_file(fname)
- 
-     def _read_file(self, fname):
--        res = self.run_command("cat %s" % fname, timeout=5)
-+        res = self.run_command("cat %s" % fname, timeout=10)
-         if res['status'] == 0:
-             return res['output']
-         else:
-diff --git a/sos/collector/transports/oc.py b/sos/collector/transports/oc.py
-new file mode 100644
-index 00000000..649037b9
---- /dev/null
-+++ b/sos/collector/transports/oc.py
-@@ -0,0 +1,220 @@
-+# Copyright Red Hat 2021, Jake Hunsaker <jhunsake@redhat.com>
-+
-+# This file is part of the sos project: https://github.com/sosreport/sos
-+#
-+# This copyrighted material is made available to anyone wishing to use,
-+# modify, copy, or redistribute it subject to the terms and conditions of
-+# version 2 of the GNU General Public License.
-+#
-+# See the LICENSE file in the source distribution for further information.
-+
-+import json
-+import tempfile
-+import os
-+
-+from sos.collector.transports import RemoteTransport
-+from sos.utilities import (is_executable, sos_get_command_output,
-+                           SoSTimeoutError)
-+
-+
-+class OCTransport(RemoteTransport):
-+    """This transport leverages the execution of commands via a locally
-+    available and configured ``oc`` binary for OCPv4 environments.
-+
-+    OCPv4 clusters generally discourage the use of SSH, so this transport may
-+    be used to remove our use of SSH in favor of the environment provided
-+    method of connecting to nodes and executing commands via debug pods.
-+
-+    Note that this approach will generate multiple debug pods over the course
-+    of our execution
-+    """
-+
-+    name = 'oc'
-+    project = 'sos-collect-tmp'
-+
-+    def run_oc(self, cmd, **kwargs):
-+        """Format and run a command with `oc` in the project defined for our
-+        execution
-+        """
-+        return sos_get_command_output(
-+            "oc -n sos-collect-tmp %s" % cmd,
-+            **kwargs
-+        )
-+
-+    @property
-+    def connected(self):
-+        up = self.run_oc(
-+            "wait --timeout=0s --for=condition=ready pod/%s" % self.pod_name
-+        )
-+        return up['status'] == 0
-+
-+    def get_node_pod_config(self):
-+        """Based on our template for the debug container, add the node-specific
-+        items so that we can deploy one of these on each node we're collecting
-+        from
-+        """
-+        return {
-+            "kind": "Pod",
-+            "apiVersion": "v1",
-+            "metadata": {
-+                "name": "%s-sos-collector" % self.address.split('.')[0],
-+                "namespace": "sos-collect-tmp"
-+            },
-+            "priorityClassName": "system-cluster-critical",
-+            "spec": {
-+                "volumes": [
-+                    {
-+                        "name": "host",
-+                        "hostPath": {
-+                            "path": "/",
-+                            "type": "Directory"
-+                        }
-+                    },
-+                    {
-+                        "name": "run",
-+                        "hostPath": {
-+                            "path": "/run",
-+                            "type": "Directory"
-+                        }
-+                    },
-+                    {
-+                        "name": "varlog",
-+                        "hostPath": {
-+                            "path": "/var/log",
-+                            "type": "Directory"
-+                        }
-+                    },
-+                    {
-+                        "name": "machine-id",
-+                        "hostPath": {
-+                            "path": "/etc/machine-id",
-+                            "type": "File"
-+                        }
-+                    }
-+                ],
-+                "containers": [
-+                    {
-+                        "name": "sos-collector-tmp",
-+                        "image": "registry.redhat.io/rhel8/support-tools",
-+                        "command": [
-+                            "/bin/bash"
-+                        ],
-+                        "env": [
-+                            {
-+                                "name": "HOST",
-+                                "value": "/host"
-+                            }
-+                        ],
-+                        "resources": {},
-+                        "volumeMounts": [
-+                            {
-+                                "name": "host",
-+                                "mountPath": "/host"
-+                            },
-+                            {
-+                                "name": "run",
-+                                "mountPath": "/run"
-+                            },
-+                            {
-+                                "name": "varlog",
-+                                "mountPath": "/var/log"
-+                            },
-+                            {
-+                                "name": "machine-id",
-+                                "mountPath": "/etc/machine-id"
-+                            }
-+                        ],
-+                        "securityContext": {
-+                            "privileged": True,
-+                            "runAsUser": 0
-+                        },
-+                        "stdin": True,
-+                        "stdinOnce": True,
-+                        "tty": True
-+                    }
-+                ],
-+                "restartPolicy": "Never",
-+                "nodeName": self.address,
-+                "hostNetwork": True,
-+                "hostPID": True,
-+                "hostIPC": True
-+            }
-+        }
-+
-+    def _connect(self, password):
-+        # the oc binary must be _locally_ available for this to work
-+        if not is_executable('oc'):
-+            return False
-+
-+        # deploy the debug container we'll exec into
-+        podconf = self.get_node_pod_config()
-+        self.pod_name = podconf['metadata']['name']
-+        fd, self.pod_tmp_conf = tempfile.mkstemp(dir=self.tmpdir)
-+        with open(fd, 'w') as cfile:
-+            json.dump(podconf, cfile)
-+        self.log_debug("Starting sos collector container '%s'" % self.pod_name)
-+        # this specifically does not need to run with a project definition
-+        out = sos_get_command_output(
-+            "oc create -f %s" % self.pod_tmp_conf
-+        )
-+        if (out['status'] != 0 or "pod/%s created" % self.pod_name not in
-+                out['output']):
-+            self.log_error("Unable to deploy sos collect pod")
-+            self.log_debug("Debug pod deployment failed: %s" % out['output'])
-+            return False
-+        self.log_debug("Pod '%s' successfully deployed, waiting for pod to "
-+                       "enter ready state" % self.pod_name)
-+
-+        # wait for the pod to report as running
-+        try:
-+            up = self.run_oc("wait --for=condition=Ready pod/%s --timeout=30s"
-+                             % self.pod_name,
-+                             # timeout is for local safety, not oc
-+                             timeout=40)
-+            if not up['status'] == 0:
-+                self.log_error("Pod not available after 30 seconds")
-+                return False
-+        except SoSTimeoutError:
-+            self.log_error("Timeout while polling for pod readiness")
-+            return False
-+        except Exception as err:
-+            self.log_error("Error while waiting for pod to be ready: %s"
-+                           % err)
-+            return False
-+
-+        return True
-+
-+    def _format_cmd_for_exec(self, cmd):
-+        if cmd.startswith('oc'):
-+            return ("oc -n %s exec --request-timeout=0 %s -- chroot /host %s"
-+                    % (self.project, self.pod_name, cmd))
-+        return super(OCTransport, self)._format_cmd_for_exec(cmd)
-+
-+    def run_command(self, cmd, timeout=180, need_root=False, env=None,
-+                    get_pty=False):
-+        # debug pod setup is slow, extend all timeouts to account for this
-+        if timeout:
-+            timeout += 10
-+
-+        # since we always execute within a bash shell, force disable get_pty
-+        # to avoid double-quoting
-+        return super(OCTransport, self).run_command(cmd, timeout, need_root,
-+                                                    env, False)
-+
-+    def _disconnect(self):
-+        os.unlink(self.pod_tmp_conf)
-+        removed = self.run_oc("delete pod %s" % self.pod_name)
-+        if "deleted" not in removed['output']:
-+            self.log_debug("Calling delete on pod '%s' failed: %s"
-+                           % (self.pod_name, removed))
-+            return False
-+        return True
-+
-+    @property
-+    def remote_exec(self):
-+        return ("oc -n %s exec --request-timeout=0 %s -- /bin/bash -c"
-+                % (self.project, self.pod_name))
-+
-+    def _retrieve_file(self, fname, dest):
-+        cmd = self.run_oc("cp %s:%s %s" % (self.pod_name, fname, dest))
-+        return cmd['status'] == 0
--- 
-2.31.1
-
-
-From 460494c4296db1a7529b44fe8f6597544c917c02 Mon Sep 17 00:00:00 2001
-From: Jake Hunsaker <jhunsake@redhat.com>
-Date: Mon, 11 Oct 2021 11:50:44 -0400
-Subject: [PATCH 3/4] [ocp] Create temporary project and restrict default node
- list to masters
-
-Adds explicit setup of a new project to use in the `ocp` cluster and
-adds better handling of cluster setup generally, which the `ocp` cluster
-is the first to make use of.
-
-Included in this change is a correction to
-`Cluster.exec_primary_cmd()`'s use of `get_pty` to now be determined on
-if the primary node is the local node or not.
-
-Additionally, based on feedback from the OCP engineering team, by
-default restrict node lists to masters.
-
-Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
----
- sos/collector/__init__.py          |  5 ++++
- sos/collector/clusters/__init__.py | 13 +++++++-
- sos/collector/clusters/ocp.py      | 48 ++++++++++++++++++++++++++++--
- sos/collector/transports/oc.py     |  4 +--
- 4 files changed, 64 insertions(+), 6 deletions(-)
-
-diff --git a/sos/collector/__init__.py b/sos/collector/__init__.py
-index fecfe6aa..a76f8a79 100644
---- a/sos/collector/__init__.py
-+++ b/sos/collector/__init__.py
-@@ -850,6 +850,7 @@ class SoSCollector(SoSComponent):
-                       "CTRL-C to quit\n")
-                 self.ui_log.info("")
-             except KeyboardInterrupt:
-+                self.cluster.cleanup()
-                 self.exit("Exiting on user cancel", 130)
- 
-     def configure_sos_cmd(self):
-@@ -1098,6 +1099,7 @@ this utility or remote systems that it connects to.
-         self.archive.makedirs('sos_logs', 0o755)
- 
-         self.collect()
-+        self.cluster.cleanup()
-         self.cleanup()
- 
-     def collect(self):
-@@ -1156,9 +1158,11 @@ this utility or remote systems that it connects to.
-             pool.shutdown(wait=True)
-         except KeyboardInterrupt:
-             self.log_error('Exiting on user cancel\n')
-+            self.cluster.cleanup()
-             os._exit(130)
-         except Exception as err:
-             self.log_error('Could not connect to nodes: %s' % err)
-+            self.cluster.cleanup()
-             os._exit(1)
- 
-         if hasattr(self.cluster, 'run_extra_cmd'):
-@@ -1199,6 +1199,7 @@ this utility or remote systems that it c
-             arc_name = self.create_cluster_archive()
-         else:
-             msg = 'No sosreports were collected, nothing to archive...'
-+            self.cluster.cleanup()
-             self.exit(msg, 1)
- 
-         if self.opts.upload and self.policy.get_upload_url():
-diff --git a/sos/collector/clusters/__init__.py b/sos/collector/clusters/__init__.py
-index cf1e7a0b..2a4665a1 100644
---- a/sos/collector/clusters/__init__.py
-+++ b/sos/collector/clusters/__init__.py
-@@ -192,7 +192,8 @@ class Cluster():
-         :returns: The output and status of `cmd`
-         :rtype: ``dict``
-         """
--        res = self.master.run_command(cmd, get_pty=True, need_root=need_root)
-+        pty = self.master.local is False
-+        res = self.master.run_command(cmd, get_pty=pty, need_root=need_root)
-         if res['output']:
-             res['output'] = res['output'].replace('Password:', '')
-         return res
-@@ -223,6 +224,16 @@ class Cluster():
-                 return True
-         return False
- 
-+    def cleanup(self):
-+        """
-+        This may be overridden by clusters
-+
-+        Perform any necessary cleanup steps required by the cluster profile.
-+        This helps ensure that sos does make lasting changes to the environment
-+        in which we are running
-+        """
-+        pass
-+
-     def get_nodes(self):
-         """
-         This MUST be overridden by a cluster profile subclassing this class
-diff --git a/sos/collector/clusters/ocp.py b/sos/collector/clusters/ocp.py
-index a9357dbf..92da4e6e 100644
---- a/sos/collector/clusters/ocp.py
-+++ b/sos/collector/clusters/ocp.py
-@@ -23,10 +23,12 @@ class ocp(Cluster):
- 
-     api_collect_enabled = False
-     token = None
-+    project = 'sos-collect-tmp'
-+    oc_cluster_admin = None
- 
-     option_list = [
-         ('label', '', 'Colon delimited list of labels to select nodes with'),
--        ('role', '', 'Colon delimited list of roles to select nodes with'),
-+        ('role', 'master', 'Colon delimited list of roles to filter on'),
-         ('kubeconfig', '', 'Path to the kubeconfig file'),
-         ('token', '', 'Service account token to use for oc authorization')
-     ]
-@@ -58,6 +58,42 @@ class ocp(Cluster):
-         _who = self.fmt_oc_cmd('whoami')
-         return self.exec_master_cmd(_who)['status'] == 0
- 
-+    def setup(self):
-+        """Create the project that we will be executing in for any nodes'
-+        collection via a container image
-+        """
-+        if not self.set_transport_type() == 'oc':
-+            return
-+
-+        out = self.exec_master_cmd(self.fmt_oc_cmd("auth can-i '*' '*'"))
-+        self.oc_cluster_admin = out['status'] == 0
-+        if not self.oc_cluster_admin:
-+            self.log_debug("Check for cluster-admin privileges returned false,"
-+                           " cannot create project in OCP cluster")
-+            raise Exception("Insufficient permissions to create temporary "
-+                            "collection project.\nAborting...")
-+
-+        self.log_info("Creating new temporary project '%s'" % self.project)
-+        ret = self.exec_master_cmd("oc new-project %s" % self.project)
-+        if ret['status'] == 0:
-+            return True
-+
-+        self.log_debug("Failed to create project: %s" % ret['output'])
-+        raise Exception("Failed to create temporary project for collection. "
-+                        "\nAborting...")
-+
-+    def cleanup(self):
-+        """Remove the project we created to execute within
-+        """
-+        if self.project:
-+            ret = self.exec_master_cmd("oc delete project %s" % self.project)
-+            if not ret['status'] == 0:
-+                self.log_error("Error deleting temporary project: %s"
-+                               % ret['output'])
-+            # don't leave the config on a non-existing project
-+            self.exec_master_cmd("oc project default")
-+        return True
-+
-     def _build_dict(self, nodelist):
-         """From the output of get_nodes(), construct an easier-to-reference
-         dict of nodes that will be used in determining labels, master status,
-@@ -85,10 +123,10 @@ class ocp(Cluster):
-         return nodes
- 
-     def set_transport_type(self):
--        if is_executable('oc'):
-+        if is_executable('oc') or self.opts.transport == 'oc':
-             return 'oc'
-         self.log_info("Local installation of 'oc' not found or is not "
--                      "correctly configured. Will use ControlPersist")
-+                      "correctly configured. Will use ControlPersist.")
-         self.ui_log.warn(
-             "Preferred transport 'oc' not available, will fallback to SSH."
-         )
-@@ -106,6 +144,10 @@ class ocp(Cluster):
-             cmd += " -l %s" % quote(labels)
-         res = self.exec_master_cmd(self.fmt_oc_cmd(cmd))
-         if res['status'] == 0:
-+            if self.get_option('role') == 'master':
-+                self.log_warn("NOTE: By default, only master nodes are listed."
-+                              "\nTo collect from all/more nodes, override the "
-+                              "role option with '-c ocp.role=role1:role2'")
-             roles = [r for r in self.get_option('role').split(':')]
-             self.node_dict = self._build_dict(res['output'].splitlines())
-             for node in self.node_dict:
-diff --git a/sos/collector/transports/oc.py b/sos/collector/transports/oc.py
-index 649037b9..de044ccb 100644
---- a/sos/collector/transports/oc.py
-+++ b/sos/collector/transports/oc.py
-@@ -37,7 +37,7 @@ class OCTransport(RemoteTransport):
-         execution
-         """
-         return sos_get_command_output(
--            "oc -n sos-collect-tmp %s" % cmd,
-+            "oc -n %s %s" % (self.project, cmd),
-             **kwargs
-         )
- 
-@@ -58,7 +58,7 @@ class OCTransport(RemoteTransport):
-             "apiVersion": "v1",
-             "metadata": {
-                 "name": "%s-sos-collector" % self.address.split('.')[0],
--                "namespace": "sos-collect-tmp"
-+                "namespace": self.project
-             },
-             "priorityClassName": "system-cluster-critical",
-             "spec": {
--- 
-2.31.1
-
-
-From 1bc0e9fe32491e764e622368bfe216f97bf32620 Mon Sep 17 00:00:00 2001
-From: Jake Hunsaker <jhunsake@redhat.com>
-Date: Mon, 4 Oct 2021 15:12:04 -0400
-Subject: [PATCH 4/4] [sosnode] Fix typo and small logic break
-
-Fixes a typo in setting the non-primary node options from the ocp
-profile against the sosnode object. Second, fixes a small break in
-checksum handling for the manifest discovered during `oc` transport
-testing for edge cases.
-
-Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
----
- sos/collector/clusters/ocp.py | 4 ++--
- sos/collector/sosnode.py      | 4 +++-
- 2 files changed, 5 insertions(+), 3 deletions(-)
-
-diff --git a/sos/collector/clusters/ocp.py b/sos/collector/clusters/ocp.py
-index 92da4e6e..22a7289a 100644
---- a/sos/collector/clusters/ocp.py
-+++ b/sos/collector/clusters/ocp.py
-@@ -183,7 +183,7 @@ class ocp(Cluster):
-         if self.api_collect_enabled:
-             # a primary has already been enabled for API collection, disable
-             # it among others
--            node.plugin_options.append('openshift.no-oc=on')
-+            node.plugopts.append('openshift.no-oc=on')
-         else:
-             _oc_cmd = 'oc'
-             if node.host.containerized:
-@@ -223,6 +223,6 @@ class ocp(Cluster):
- 
-     def set_node_options(self, node):
-         # don't attempt OC API collections on non-primary nodes
--        node.plugin_options.append('openshift.no-oc=on')
-+        node.plugopts.append('openshift.no-oc=on')
- 
- # vim: set et ts=4 sw=4 :
-diff --git a/sos/collector/sosnode.py b/sos/collector/sosnode.py
-index 8a9dbd7a..ab7f23cc 100644
---- a/sos/collector/sosnode.py
-+++ b/sos/collector/sosnode.py
-@@ -714,7 +714,7 @@ class SosNode():
-                     elif line.startswith("The checksum is: "):
-                         checksum = line.split()[3]
- 
--                if checksum is not None:
-+                if checksum:
-                     self.manifest.add_field('checksum', checksum)
-                     if len(checksum) == 32:
-                         self.manifest.add_field('checksum_type', 'md5')
-@@ -722,6 +722,8 @@ class SosNode():
-                         self.manifest.add_field('checksum_type', 'sha256')
-                     else:
-                         self.manifest.add_field('checksum_type', 'unknown')
-+                else:
-+                    self.manifest.add_field('checksum_type', 'unknown')
-             else:
-                 err = self.determine_sos_error(res['status'], res['output'])
-                 self.log_debug("Error running sos report. rc = %s msg = %s"
--- 
-2.31.1
-
-From 38a0533de3dd2613eefcc4865a2916e225e3ceed Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Tue, 9 Nov 2021 19:34:25 +0100
-Subject: [PATCH] [presets] Optimise OCP preset for hundreds of network
- namespaces
-
-Sos report on OCP having hundreds of namespaces timeouts in networking
-plugin, as it collects >10 commands for each namespace.
-
-Let use a balanced approach in:
-- increasing network.timeout
-- limiting namespaces to traverse
-- disabling ethtool per namespace
-
-to ensure sos report successfully finish in a reasonable time,
-collecting rasonable amount of data.
-
-Resolves: #2754
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/presets/redhat/__init__.py | 10 +++++++---
- 1 file changed, 7 insertions(+), 3 deletions(-)
-
-diff --git a/sos/presets/redhat/__init__.py b/sos/presets/redhat/__init__.py
-index e6d63611..865c9b6b 100644
---- a/sos/presets/redhat/__init__.py
-+++ b/sos/presets/redhat/__init__.py
-@@ -29,11 +29,15 @@ RHEL_DESC = RHEL_RELEASE_STR
- 
- RHOSP = "rhosp"
- RHOSP_DESC = "Red Hat OpenStack Platform"
-+RHOSP_OPTS = SoSOptions(plugopts=[
-+                             'process.lsof=off',
-+                             'networking.ethtool_namespaces=False',
-+                             'networking.namespaces=200'])
- 
- RHOCP = "ocp"
- RHOCP_DESC = "OpenShift Container Platform by Red Hat"
--RHOSP_OPTS = SoSOptions(plugopts=[
--                             'process.lsof=off',
-+RHOCP_OPTS = SoSOptions(all_logs=True, verify=True, plugopts=[
-+                             'networking.timeout=600',
-                              'networking.ethtool_namespaces=False',
-                              'networking.namespaces=200'])
- 
-@@ -62,7 +66,7 @@ RHEL_PRESETS = {
-     RHEL: PresetDefaults(name=RHEL, desc=RHEL_DESC),
-     RHOSP: PresetDefaults(name=RHOSP, desc=RHOSP_DESC, opts=RHOSP_OPTS),
-     RHOCP: PresetDefaults(name=RHOCP, desc=RHOCP_DESC, note=NOTE_SIZE_TIME,
--                          opts=_opts_all_logs_verify),
-+                          opts=RHOCP_OPTS),
-     RH_CFME: PresetDefaults(name=RH_CFME, desc=RH_CFME_DESC, note=NOTE_TIME,
-                             opts=_opts_verify),
-     RH_SATELLITE: PresetDefaults(name=RH_SATELLITE, desc=RH_SATELLITE_DESC,
--- 
-2.31.1
-
-From 97b93c7f8755d04bdeb4f93759c20dcb787f2046 Mon Sep 17 00:00:00 2001
-From: Jake Hunsaker <jhunsake@redhat.com>
-Date: Tue, 2 Nov 2021 11:34:13 -0400
-Subject: [PATCH] [Plugin] Rework get_container_logs to be more useful
-
-`get_container_logs()` is now `add_container_logs()` to align it better
-with our more common `add_*` methods for plugin collections.
-
-Additionally, it has been extended to accept either a single string or a
-list of strings like the other methods, and plugin authors may now
-specify either specific container names or regexes.
-
-Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
----
- sos/report/plugins/__init__.py | 22 +++++++++++++++++-----
- sos/report/plugins/rabbitmq.py |  2 +-
- 2 files changed, 18 insertions(+), 6 deletions(-)
-
-diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py
-index 08eee118..4b0e4fd5 100644
---- a/sos/report/plugins/__init__.py
-+++ b/sos/report/plugins/__init__.py
-@@ -2366,20 +2366,32 @@ class Plugin():
-             return _runtime.volumes
-         return []
- 
--    def get_container_logs(self, container, **kwargs):
--        """Helper to get the ``logs`` output for a given container
-+    def add_container_logs(self, containers, get_all=False, **kwargs):
-+        """Helper to get the ``logs`` output for a given container or list
-+        of container names and/or regexes.
- 
-         Supports passthru of add_cmd_output() options
- 
--        :param container:   The name of the container to retrieve logs from
--        :type container: ``str``
-+        :param containers:   The name of the container to retrieve logs from,
-+                             may be a single name or a regex
-+        :type containers:    ``str`` or ``list` of strs
-+
-+        :param get_all:     Should non-running containers also be queried?
-+                            Default: False
-+        :type get_all:      ``bool``
- 
-         :param kwargs:      Any kwargs supported by ``add_cmd_output()`` are
-                             supported here
-         """
-         _runtime = self._get_container_runtime()
-         if _runtime is not None:
--            self.add_cmd_output(_runtime.get_logs_command(container), **kwargs)
-+            if isinstance(containers, str):
-+                containers = [containers]
-+            for container in containers:
-+                _cons = self.get_all_containers_by_regex(container, get_all)
-+                for _con in _cons:
-+                    cmd = _runtime.get_logs_command(_con[1])
-+                    self.add_cmd_output(cmd, **kwargs)
- 
-     def fmt_container_cmd(self, container, cmd, quotecmd=False):
-         """Format a command to be executed by the loaded ``ContainerRuntime``
-diff --git a/sos/report/plugins/rabbitmq.py b/sos/report/plugins/rabbitmq.py
-index e84b52da..1bfa741f 100644
---- a/sos/report/plugins/rabbitmq.py
-+++ b/sos/report/plugins/rabbitmq.py
-@@ -32,7 +32,7 @@ class RabbitMQ(Plugin, IndependentPlugin):
- 
-         if in_container:
-             for container in container_names:
--                self.get_container_logs(container)
-+                self.add_container_logs(container)
-                 self.add_cmd_output(
-                     self.fmt_container_cmd(container, 'rabbitmqctl report'),
-                     foreground=True
--- 
-2.31.1
-
-From 8bf602108f75db10e449eff5e2266c6466504086 Mon Sep 17 00:00:00 2001
-From: Nadia Pinaeva <npinaeva@redhat.com>
-Date: Thu, 2 Dec 2021 16:30:44 +0100
-Subject: [PATCH] [clusters:ocp] fix get_nodes function
-
-Signed-off-by: Nadia Pinaeva <npinaeva@redhat.com>
----
- sos/collector/clusters/ocp.py | 8 ++++----
- 1 file changed, 4 insertions(+), 4 deletions(-)
-
-diff --git a/sos/collector/clusters/ocp.py b/sos/collector/clusters/ocp.py
-index 22a7289a..2ce4e977 100644
---- a/sos/collector/clusters/ocp.py
-+++ b/sos/collector/clusters/ocp.py
-@@ -150,13 +150,13 @@ class ocp(Cluster):
-                               "role option with '-c ocp.role=role1:role2'")
-             roles = [r for r in self.get_option('role').split(':')]
-             self.node_dict = self._build_dict(res['output'].splitlines())
--            for node in self.node_dict:
-+            for node_name, node in self.node_dict.items():
-                 if roles:
-                     for role in roles:
--                        if role in node:
--                            nodes.append(node)
-+                        if role == node['roles']:
-+                            nodes.append(node_name)
-                 else:
--                    nodes.append(node)
-+                    nodes.append(node_name)
-         else:
-             msg = "'oc' command failed"
-             if 'Missing or incomplete' in res['output']:
--- 
-2.31.1
-
-From 5d80ac6dc67e12ef00903436c088a1694f9a7dd7 Mon Sep 17 00:00:00 2001
-From: Jake Hunsaker <jhunsake@redhat.com>
-Date: Wed, 1 Dec 2021 14:13:16 -0500
-Subject: [PATCH] [collect] Catch command not found exceptions from pexpect
-
-When running a command that does not exist on the system, catch the
-resulting pexpect exception and return the proper status code rather
-than allowing an untrapped exception.
-
-Closes: #2768
-
-Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
----
- sos/collector/transports/__init__.py | 6 +++++-
- 1 file changed, 5 insertions(+), 1 deletion(-)
-
-diff --git a/sos/collector/transports/__init__.py b/sos/collector/transports/__init__.py
-index 7bffee62..33f2f66d 100644
---- a/sos/collector/transports/__init__.py
-+++ b/sos/collector/transports/__init__.py
-@@ -225,7 +225,11 @@ class RemoteTransport():
-         if not env:
-             env = None
- 
--        result = pexpect.spawn(cmd, encoding='utf-8', env=env)
-+        try:
-+            result = pexpect.spawn(cmd, encoding='utf-8', env=env)
-+        except pexpect.exceptions.ExceptionPexpect as err:
-+            self.log_debug(err.value)
-+            return {'status': 127, 'output': ''}
- 
-         _expects = [pexpect.EOF, pexpect.TIMEOUT]
-         if need_root and self.opts.ssh_user != 'root':
--- 
-2.31.1
-
-From decb5d26c165e664fa879a669f2d80165181f0e1 Mon Sep 17 00:00:00 2001
-From: Jake Hunsaker <jhunsake@redhat.com>
-Date: Thu, 2 Dec 2021 14:02:17 -0500
-Subject: [PATCH] [report,collect] Add option to control default container
- runtime
-
-Adds a new `--container-runtime` option that allows users to control
-what default container runtime is used by plugins for container based
-collections, effectively overriding policy defaults.
-
-If no runtimes are active, this option is effectively ignored. If
-however runtimes are active, but the requested one is not, raise an
-exception to abort collection with an appropriate message to the user.
-
-Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
----
- man/en/sos-collect.1      |  6 ++++++
- man/en/sos-report.1       | 19 +++++++++++++++++++
- sos/collector/__init__.py |  4 ++++
- sos/collector/sosnode.py  |  6 ++++++
- sos/report/__init__.py    | 36 ++++++++++++++++++++++++++++++++++++
- 5 files changed, 71 insertions(+)
-
-diff --git a/man/en/sos-collect.1 b/man/en/sos-collect.1
-index a1f6c10e..9b0a5d7b 100644
---- a/man/en/sos-collect.1
-+++ b/man/en/sos-collect.1
-@@ -11,6 +11,7 @@ sos collect \- Collect sosreports from multiple (cluster) nodes
-     [\-\-chroot CHROOT]
-     [\-\-case\-id CASE_ID]
-     [\-\-cluster\-type CLUSTER_TYPE]
-+    [\-\-container\-runtime RUNTIME]
-     [\-e ENABLE_PLUGINS]
-     [--encrypt-key KEY]\fR
-     [--encrypt-pass PASS]\fR
-@@ -113,6 +114,11 @@ Example: \fBsos collect --cluster-type=kubernetes\fR will force the kubernetes p
- to be run, and thus set sosreport options and attempt to determine a list of nodes using
- that profile. 
- .TP
-+\fB\-\-container\-runtime\fR RUNTIME
-+\fB sos report\fR option. Using this with \fBcollect\fR will pass this option thru
-+to nodes with sos version 4.3 or later. This option controls the default container
-+runtime plugins will use for collections. See \fBman sos-report\fR.
-+.TP
- \fB\-e\fR ENABLE_PLUGINS, \fB\-\-enable\-plugins\fR ENABLE_PLUGINS
- Sosreport option. Use this to enable a plugin that would otherwise not be run.
- 
-diff --git a/man/en/sos-report.1 b/man/en/sos-report.1
-index e8efc8f8..464a77e5 100644
---- a/man/en/sos-report.1
-+++ b/man/en/sos-report.1
-@@ -19,6 +19,7 @@ sos report \- Collect and package diagnostic and support data
-           [--plugin-timeout TIMEOUT]\fR
-           [--cmd-timeout TIMEOUT]\fR
-           [--namespaces NAMESPACES]\fR
-+          [--container-runtime RUNTIME]\fR
-           [-s|--sysroot SYSROOT]\fR
-           [-c|--chroot {auto|always|never}\fR
-           [--tmp-dir directory]\fR
-@@ -299,6 +300,24 @@ Use '0' (default) for no limit - all namespaces will be used for collections.
- 
- Note that specific plugins may provide a similar `namespaces` plugin option. If
- the plugin option is used, it will override this option.
-+.TP
-+.B \--container-runtime RUNTIME
-+Force the use of the specified RUNTIME as the default runtime that plugins will
-+use to collect data from and about containers and container images. By default,
-+the setting of \fBauto\fR results in the local policy determining what runtime
-+will be the default runtime (in configurations where multiple runtimes are installed
-+and active).
-+
-+If no container runtimes are active, this option is ignored. If there are runtimes
-+active, but not one with a name matching RUNTIME, sos will abort.
-+
-+Setting this to \fBnone\fR, \fBoff\fR, or \fBdisabled\fR will cause plugins to
-+\fBNOT\fR leverage any active runtimes for collections. Note that if disabled, plugins
-+specifically for runtimes (e.g. the podman or docker plugins) will still collect
-+general data about the runtime, but will not inspect existing containers or images.
-+
-+Default: 'auto' (policy determined)
-+.TP
- .B \--case-id NUMBER
- Specify a case identifier to associate with the archive.
- Identifiers may include alphanumeric characters, commas and periods ('.').
-diff --git a/sos/collector/__init__.py b/sos/collector/__init__.py
-index 42a7731d..3ad703d3 100644
---- a/sos/collector/__init__.py
-+++ b/sos/collector/__init__.py
-@@ -55,6 +55,7 @@ class SoSCollector(SoSComponent):
-         'clean': False,
-         'cluster_options': [],
-         'cluster_type': None,
-+        'container_runtime': 'auto',
-         'domains': [],
-         'enable_plugins': [],
-         'encrypt_key': '',
-@@ -268,6 +269,9 @@ class SoSCollector(SoSComponent):
-         sos_grp.add_argument('--chroot', default='',
-                              choices=['auto', 'always', 'never'],
-                              help="chroot executed commands to SYSROOT")
-+        sos_grp.add_argument("--container-runtime", default="auto",
-+                             help="Default container runtime to use for "
-+                                  "collections. 'auto' for policy control.")
-         sos_grp.add_argument('-e', '--enable-plugins', action="extend",
-                              help='Enable specific plugins for sosreport')
-         sos_grp.add_argument('-k', '--plugin-option', '--plugopts',
-diff --git a/sos/collector/sosnode.py b/sos/collector/sosnode.py
-index ab7f23cc..f5957e17 100644
---- a/sos/collector/sosnode.py
-+++ b/sos/collector/sosnode.py
-@@ -586,6 +586,12 @@ class SosNode():
-                 sos_opts.append('--cmd-timeout=%s'
-                                 % quote(str(self.opts.cmd_timeout)))
- 
-+        if self.check_sos_version('4.3'):
-+            if self.opts.container_runtime != 'auto':
-+                sos_opts.append(
-+                    "--container-runtime=%s" % self.opts.container_runtime
-+                )
-+
-         self.update_cmd_from_cluster()
- 
-         sos_cmd = sos_cmd.replace(
-diff --git a/sos/report/__init__.py b/sos/report/__init__.py
-index a6c72778..0daad82f 100644
---- a/sos/report/__init__.py
-+++ b/sos/report/__init__.py
-@@ -82,6 +82,7 @@ class SoSReport(SoSComponent):
-         'case_id': '',
-         'chroot': 'auto',
-         'clean': False,
-+        'container_runtime': 'auto',
-         'keep_binary_files': False,
-         'desc': '',
-         'domains': [],
-@@ -187,6 +188,7 @@ class SoSReport(SoSComponent):
-             self.tempfile_util.clean()
-             self._exit(1)
- 
-+        self._check_container_runtime()
-         self._get_hardware_devices()
-         self._get_namespaces()
- 
-@@ -218,6 +220,9 @@ class SoSReport(SoSComponent):
-                                 dest="chroot", default='auto',
-                                 help="chroot executed commands to SYSROOT "
-                                      "[auto, always, never] (default=auto)")
-+        report_grp.add_argument("--container-runtime", default="auto",
-+                                help="Default container runtime to use for "
-+                                     "collections. 'auto' for policy control.")
-         report_grp.add_argument("--desc", "--description", type=str,
-                                 action="store", default="",
-                                 help="Description for a new preset",)
-@@ -373,6 +378,37 @@ class SoSReport(SoSComponent):
-         }
-         # TODO: enumerate network devices, preferably with devtype info
- 
-+    def _check_container_runtime(self):
-+        """Check the loaded container runtimes, and the policy default runtime
-+        (if set), against any requested --container-runtime value. This can be
-+        useful for systems that have multiple runtimes, such as RHCOS, but do
-+        not have a clearly defined 'default' (or one that is determined based
-+        entirely on configuration).
-+        """
-+        if self.opts.container_runtime != 'auto':
-+            crun = self.opts.container_runtime.lower()
-+            if crun in ['none', 'off', 'diabled']:
-+                self.policy.runtimes = {}
-+                self.soslog.info(
-+                    "Disabled all container runtimes per user option."
-+                )
-+            elif not self.policy.runtimes:
-+                msg = ("WARNING: No container runtimes are active, ignoring "
-+                       "option to set default runtime to '%s'\n" % crun)
-+                self.soslog.warn(msg)
-+            elif crun not in self.policy.runtimes.keys():
-+                valid = ', '.join(p for p in self.policy.runtimes.keys()
-+                                  if p != 'default')
-+                raise Exception("Cannot use container runtime '%s': no such "
-+                                "runtime detected. Available runtimes: %s"
-+                                % (crun, valid))
-+            else:
-+                self.policy.runtimes['default'] = self.policy.runtimes[crun]
-+                self.soslog.info(
-+                    "Set default container runtime to '%s'"
-+                    % self.policy.runtimes['default'].name
-+                )
-+
-     def get_fibre_devs(self):
-         """Enumerate a list of fibrechannel devices on this system so that
-         plugins can iterate over them
--- 
-2.31.1
-
-From 9d4b5af39d76ac99afa40d004fe9888633218356 Mon Sep 17 00:00:00 2001
-From: Jake Hunsaker <jhunsake@redhat.com>
-Date: Fri, 3 Dec 2021 13:37:09 -0500
-Subject: [PATCH 1/2] [Plugin] Add container parameter for add_cmd_output()
-
-Adds a new `container` parameter for `Plugin.add_cmd_output()`, which if
-set will format all commands passed to that call for execution in the
-specified container.
-
-`Plugin.fmt_container_cmd()` is called for this purpose, and has been
-modified so that if the given container does not exist, an empty string
-is returned instead, thus preventing execution on the host.
-
-Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
----
- sos/report/plugins/__init__.py | 16 ++++++++++++++--
- 1 file changed, 14 insertions(+), 2 deletions(-)
-
-diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py
-index e180ae17..3ff7c191 100644
---- a/sos/report/plugins/__init__.py
-+++ b/sos/report/plugins/__init__.py
-@@ -1707,7 +1707,7 @@ class Plugin():
-                        chroot=True, runat=None, env=None, binary=False,
-                        sizelimit=None, pred=None, subdir=None,
-                        changes=False, foreground=False, tags=[],
--                       priority=10, cmd_as_tag=False):
-+                       priority=10, cmd_as_tag=False, container=None):
-         """Run a program or a list of programs and collect the output
- 
-         Output will be limited to `sizelimit`, collecting the last X amount
-@@ -1772,6 +1772,10 @@ class Plugin():
-         :param cmd_as_tag: Should the command string be automatically formatted
-                            to a tag?
-         :type cmd_as_tag: ``bool``
-+
-+        :param container: Run the specified `cmds` inside a container with this
-+                          ID or name
-+        :type container:  ``str``
-         """
-         if isinstance(cmds, str):
-             cmds = [cmds]
-@@ -1782,6 +1786,14 @@ class Plugin():
-         if pred is None:
-             pred = self.get_predicate(cmd=True)
-         for cmd in cmds:
-+            if container:
-+                ocmd = cmd
-+                cmd = self.fmt_container_cmd(container, cmd)
-+                if not cmd:
-+                    self._log_debug("Skipping command '%s' as the requested "
-+                                    "container '%s' does not exist."
-+                                    % (ocmd, container))
-+                    continue
-             self._add_cmd_output(cmd=cmd, suggest_filename=suggest_filename,
-                                  root_symlink=root_symlink, timeout=timeout,
-                                  stderr=stderr, chroot=chroot, runat=runat,
-@@ -2420,7 +2432,7 @@ class Plugin():
-         if self.container_exists(container):
-             _runtime = self._get_container_runtime()
-             return _runtime.fmt_container_cmd(container, cmd, quotecmd)
--        return cmd
-+        return ''
- 
-     def is_module_loaded(self, module_name):
-         """Determine whether specified module is loaded or not
--- 
-2.31.1
-
-
-From 874d2adfbff9e51dc902669af3c4a5083dbc19b1 Mon Sep 17 00:00:00 2001
-From: Jake Hunsaker <jhunsake@redhat.com>
-Date: Fri, 3 Dec 2021 14:49:43 -0500
-Subject: [PATCH 2/2] [plugins] Update existing plugins to use a_c_o container
- parameter
-
-Updates plugins currently calling `fmt_container_cmd()` in their
-`add_cmd_output()` calls to instead use the new `container` parameter
-and rely on the automatic formatting.
-
-Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
----
- sos/report/plugins/opencontrail.py        |  3 +--
- sos/report/plugins/openstack_database.py  | 20 ++++++--------------
- sos/report/plugins/openstack_designate.py |  6 ++----
- sos/report/plugins/openstack_ironic.py    |  3 +--
- sos/report/plugins/ovn_central.py         |  7 +++----
- sos/report/plugins/rabbitmq.py            | 11 ++++++-----
- 9 files changed, 47 insertions(+), 69 deletions(-)
-
-diff --git a/sos/report/plugins/opencontrail.py b/sos/report/plugins/opencontrail.py
-index b368bffe..76c03e21 100644
---- a/sos/report/plugins/opencontrail.py
-+++ b/sos/report/plugins/opencontrail.py
-@@ -25,8 +25,7 @@ class OpenContrail(Plugin, IndependentPlugin):
-             cnames = self.get_containers(get_all=True)
-             cnames = [c[1] for c in cnames if 'opencontrail' in c[1]]
-             for cntr in cnames:
--                _cmd = self.fmt_container_cmd(cntr, 'contrail-status')
--                self.add_cmd_output(_cmd)
-+                self.add_cmd_output('contrail-status', container=cntr)
-         else:
-             self.add_cmd_output("contrail-status")
- 
-diff --git a/sos/report/plugins/openstack_database.py b/sos/report/plugins/openstack_database.py
-index 1e98fabf..e9f84cf8 100644
---- a/sos/report/plugins/openstack_database.py
-+++ b/sos/report/plugins/openstack_database.py
-@@ -37,36 +37,28 @@ class OpenStackDatabase(Plugin):
-     ]
- 
-     def setup(self):
--
--        in_container = False
-         # determine if we're running databases on the host or in a container
-         _db_containers = [
-             'galera-bundle-.*',  # overcloud
-             'mysql'  # undercloud
-         ]
- 
-+        cname = None
-         for container in _db_containers:
-             cname = self.get_container_by_name(container)
--            if cname is not None:
--                in_container = True
-+            if cname:
-                 break
- 
--        if in_container:
--            fname = "clustercheck_%s" % cname
--            cmd = self.fmt_container_cmd(cname, 'clustercheck')
--            self.add_cmd_output(cmd, timeout=15, suggest_filename=fname)
--        else:
--            self.add_cmd_output('clustercheck', timeout=15)
-+        fname = "clustercheck_%s" % cname if cname else None
-+        self.add_cmd_output('clustercheck', container=cname, timeout=15,
-+                            suggest_filename=fname)
- 
-         if self.get_option('dump') or self.get_option('dumpall'):
-             db_dump = self.get_mysql_db_string(container=cname)
-             db_cmd = "mysqldump --opt %s" % db_dump
- 
--            if in_container:
--                db_cmd = self.fmt_container_cmd(cname, db_cmd)
--
-             self.add_cmd_output(db_cmd, suggest_filename='mysql_dump.sql',
--                                sizelimit=0)
-+                                sizelimit=0, container=cname)
- 
-     def get_mysql_db_string(self, container=None):
- 
-diff --git a/sos/report/plugins/openstack_designate.py b/sos/report/plugins/openstack_designate.py
-index 0ae991b0..a2ea37ab 100644
---- a/sos/report/plugins/openstack_designate.py
-+++ b/sos/report/plugins/openstack_designate.py
-@@ -20,12 +20,10 @@ class OpenStackDesignate(Plugin):
- 
-     def setup(self):
-         # collect current pool config
--        pools_cmd = self.fmt_container_cmd(
--            self.get_container_by_name(".*designate_central"),
--            "designate-manage pool generate_file --file /dev/stdout")
- 
-         self.add_cmd_output(
--            pools_cmd,
-+            "designate-manage pool generate_file --file /dev/stdout",
-+            container=self.get_container_by_name(".*designate_central"),
-             suggest_filename="openstack_designate_current_pools.yaml"
-         )
- 
-diff --git a/sos/report/plugins/openstack_ironic.py b/sos/report/plugins/openstack_ironic.py
-index c36fb6b6..49beb2d9 100644
---- a/sos/report/plugins/openstack_ironic.py
-+++ b/sos/report/plugins/openstack_ironic.py
-@@ -80,8 +80,7 @@ class OpenStackIronic(Plugin):
-                                    'ironic_pxe_tftp', 'ironic_neutron_agent',
-                                    'ironic_conductor', 'ironic_api']:
-                 if self.container_exists('.*' + container_name):
--                    self.add_cmd_output(self.fmt_container_cmd(container_name,
--                                                               'rpm -qa'))
-+                    self.add_cmd_output('rpm -qa', container=container_name)
- 
-         else:
-             self.conf_list = [
-diff --git a/sos/report/plugins/ovn_central.py b/sos/report/plugins/ovn_central.py
-index 914eda60..ddbf288d 100644
---- a/sos/report/plugins/ovn_central.py
-+++ b/sos/report/plugins/ovn_central.py
-@@ -123,11 +123,10 @@ class OVNCentral(Plugin):
- 
-         # If OVN is containerized, we need to run the above commands inside
-         # the container.
--        cmds = [
--            self.fmt_container_cmd(self._container_name, cmd) for cmd in cmds
--        ]
- 
--        self.add_cmd_output(cmds, foreground=True)
-+        self.add_cmd_output(
-+            cmds, foreground=True, container=self._container_name
-+        )
- 
-         self.add_copy_spec("/etc/sysconfig/ovn-northd")
- 
-diff --git a/sos/report/plugins/rabbitmq.py b/sos/report/plugins/rabbitmq.py
-index 1bfa741f..607802e4 100644
---- a/sos/report/plugins/rabbitmq.py
-+++ b/sos/report/plugins/rabbitmq.py
-@@ -34,14 +34,15 @@ class RabbitMQ(Plugin, IndependentPlugin):
-             for container in container_names:
-                 self.add_container_logs(container)
-                 self.add_cmd_output(
--                    self.fmt_container_cmd(container, 'rabbitmqctl report'),
-+                    'rabbitmqctl report',
-+                    container=container,
-                     foreground=True
-                 )
-                 self.add_cmd_output(
--                    self.fmt_container_cmd(
--                        container, "rabbitmqctl eval "
--                        "'rabbit_diagnostics:maybe_stuck().'"),
--                    foreground=True, timeout=10
-+                    "rabbitmqctl eval 'rabbit_diagnostics:maybe_stuck().'",
-+                    container=container,
-+                    foreground=True,
-+                    timeout=10
-                 )
-         else:
-             self.add_cmd_output("rabbitmqctl report")
--- 
-2.31.1
-
-From faa15754f82e9841cd624afe18dc2198644decdf Mon Sep 17 00:00:00 2001
-From: Jake Hunsaker <jhunsake@redhat.com>
-Date: Wed, 8 Dec 2021 13:51:20 -0500
-Subject: [PATCH] [Policy,collect] Prevent remote node policies from setting
- local PATH
-
-This commit fixes an issue where policies loaded for remote nodes when
-using `sos collect` would override the PATH setting for the local
-policy, which in turn could prevent successful execution of cluster
-profile operations.
-
-Related: #2777
-
-Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
----
- sos/policies/__init__.py         | 8 +++++---
- sos/policies/distros/__init__.py | 6 ++++--
- sos/policies/distros/debian.py   | 3 ++-
- sos/policies/distros/redhat.py   | 6 ++++--
- sos/policies/distros/suse.py     | 3 ++-
- 5 files changed, 17 insertions(+), 9 deletions(-)
-
-diff --git a/sos/policies/__init__.py b/sos/policies/__init__.py
-index ef9188de..826d03a1 100644
---- a/sos/policies/__init__.py
-+++ b/sos/policies/__init__.py
-@@ -45,7 +45,7 @@ def load(cache={}, sysroot=None, init=None, probe_runtime=True,
-     return cache['policy']
- 
- 
--class Policy(object):
-+class Policy():
-     """Policies represent distributions that sos supports, and define the way
-     in which sos behaves on those distributions. A policy should define at
-     minimum a way to identify the distribution, and a package manager to allow
-@@ -111,7 +111,7 @@ any third party.
-     presets_path = PRESETS_PATH
-     _in_container = False
- 
--    def __init__(self, sysroot=None, probe_runtime=True):
-+    def __init__(self, sysroot=None, probe_runtime=True, remote_exec=None):
-         """Subclasses that choose to override this initializer should call
-         super() to ensure that they get the required platform bits attached.
-         super(SubClass, self).__init__(). Policies that require runtime
-@@ -122,7 +122,9 @@ any third party.
-         self.probe_runtime = probe_runtime
-         self.package_manager = PackageManager()
-         self.valid_subclasses = [IndependentPlugin]
--        self.set_exec_path()
-+        self.remote_exec = remote_exec
-+        if not self.remote_exec:
-+            self.set_exec_path()
-         self.sysroot = sysroot
-         self.register_presets(GENERIC_PRESETS)
- 
-diff --git a/sos/policies/distros/__init__.py b/sos/policies/distros/__init__.py
-index c69fc1e7..9c91a918 100644
---- a/sos/policies/distros/__init__.py
-+++ b/sos/policies/distros/__init__.py
-@@ -68,9 +68,11 @@ class LinuxPolicy(Policy):
-     container_version_command = None
-     container_authfile = None
- 
--    def __init__(self, sysroot=None, init=None, probe_runtime=True):
-+    def __init__(self, sysroot=None, init=None, probe_runtime=True,
-+                 remote_exec=None):
-         super(LinuxPolicy, self).__init__(sysroot=sysroot,
--                                          probe_runtime=probe_runtime)
-+                                          probe_runtime=probe_runtime,
-+                                          remote_exec=remote_exec)
- 
-         if sysroot:
-             self.sysroot = sysroot
-diff --git a/sos/policies/distros/debian.py b/sos/policies/distros/debian.py
-index 639fd5eb..41f09428 100644
---- a/sos/policies/distros/debian.py
-+++ b/sos/policies/distros/debian.py
-@@ -26,7 +26,8 @@ class DebianPolicy(LinuxPolicy):
-     def __init__(self, sysroot=None, init=None, probe_runtime=True,
-                  remote_exec=None):
-         super(DebianPolicy, self).__init__(sysroot=sysroot, init=init,
--                                           probe_runtime=probe_runtime)
-+                                           probe_runtime=probe_runtime,
-+                                           remote_exec=remote_exec)
-         self.package_manager = DpkgPackageManager(chroot=self.sysroot,
-                                                   remote_exec=remote_exec)
-         self.valid_subclasses += [DebianPlugin]
-diff --git a/sos/policies/distros/redhat.py b/sos/policies/distros/redhat.py
-index 4b14abaf..eb75e15b 100644
---- a/sos/policies/distros/redhat.py
-+++ b/sos/policies/distros/redhat.py
-@@ -53,7 +53,8 @@ class RedHatPolicy(LinuxPolicy):
-     def __init__(self, sysroot=None, init=None, probe_runtime=True,
-                  remote_exec=None):
-         super(RedHatPolicy, self).__init__(sysroot=sysroot, init=init,
--                                           probe_runtime=probe_runtime)
-+                                           probe_runtime=probe_runtime,
-+                                           remote_exec=remote_exec)
-         self.usrmove = False
- 
-         self.package_manager = RpmPackageManager(chroot=self.sysroot,
-@@ -76,7 +77,8 @@ class RedHatPolicy(LinuxPolicy):
-             self.PATH = "/sbin:/bin:/usr/sbin:/usr/bin:/root/bin"
-         self.PATH += os.pathsep + "/usr/local/bin"
-         self.PATH += os.pathsep + "/usr/local/sbin"
--        self.set_exec_path()
-+        if not self.remote_exec:
-+            self.set_exec_path()
-         self.load_presets()
- 
-     @classmethod
-diff --git a/sos/policies/distros/suse.py b/sos/policies/distros/suse.py
-index 1c1feff5..b9d4a3b1 100644
---- a/sos/policies/distros/suse.py
-+++ b/sos/policies/distros/suse.py
-@@ -25,7 +25,8 @@ class SuSEPolicy(LinuxPolicy):
-     def __init__(self, sysroot=None, init=None, probe_runtime=True,
-                  remote_exec=None):
-         super(SuSEPolicy, self).__init__(sysroot=sysroot, init=init,
--                                         probe_runtime=probe_runtime)
-+                                         probe_runtime=probe_runtime,
-+                                         remote_exec=remote_exec)
-         self.valid_subclasses += [SuSEPlugin, RedHatPlugin]
- 
-         self.usrmove = False
--- 
-2.31.1
-
-From d4383fec5f8a80121aa4f5a37575b37988c51663 Mon Sep 17 00:00:00 2001
-From: Nadia Pinaeva <npinaeva@redhat.com>
-Date: Wed, 1 Dec 2021 12:23:34 +0100
-Subject: [PATCH] Add crio runtime and openshift_ovn plugin openshift_ovn
- plugin collects logs from crio containers Fix get_container_by_name function
- returning container_id and not name
-
-Signed-off-by: Nadia Pinaeva <npinaeva@redhat.com>
----
- sos/policies/distros/__init__.py    |  4 +-
- sos/policies/runtimes/__init__.py   |  2 +-
- sos/policies/runtimes/crio.py       | 79 +++++++++++++++++++++++++++++
- sos/report/plugins/openshift_ovn.py | 41 +++++++++++++++
- 4 files changed, 124 insertions(+), 2 deletions(-)
- create mode 100644 sos/policies/runtimes/crio.py
- create mode 100644 sos/report/plugins/openshift_ovn.py
-
-diff --git a/sos/policies/distros/__init__.py b/sos/policies/distros/__init__.py
-index 9c91a918..7acc7e49 100644
---- a/sos/policies/distros/__init__.py
-+++ b/sos/policies/distros/__init__.py
-@@ -17,6 +17,7 @@ from sos import _sos as _
- from sos.policies import Policy
- from sos.policies.init_systems import InitSystem
- from sos.policies.init_systems.systemd import SystemdInit
-+from sos.policies.runtimes.crio import CrioContainerRuntime
- from sos.policies.runtimes.podman import PodmanContainerRuntime
- from sos.policies.runtimes.docker import DockerContainerRuntime
- 
-@@ -92,7 +93,8 @@ class LinuxPolicy(Policy):
-         if self.probe_runtime:
-             _crun = [
-                 PodmanContainerRuntime(policy=self),
--                DockerContainerRuntime(policy=self)
-+                DockerContainerRuntime(policy=self),
-+                CrioContainerRuntime(policy=self)
-             ]
-             for runtime in _crun:
-                 if runtime.check_is_active():
-diff --git a/sos/policies/runtimes/__init__.py b/sos/policies/runtimes/__init__.py
-index 2e60ad23..4e9a45c1 100644
---- a/sos/policies/runtimes/__init__.py
-+++ b/sos/policies/runtimes/__init__.py
-@@ -100,7 +100,7 @@ class ContainerRuntime():
-             return None
-         for c in self.containers:
-             if re.match(name, c[1]):
--                return c[1]
-+                return c[0]
-         return None
- 
-     def get_images(self):
-diff --git a/sos/policies/runtimes/crio.py b/sos/policies/runtimes/crio.py
-new file mode 100644
-index 00000000..980c3ea1
---- /dev/null
-+++ b/sos/policies/runtimes/crio.py
-@@ -0,0 +1,79 @@
-+# Copyright (C) 2021 Red Hat, Inc., Nadia Pinaeva <npinaeva@redhat.com>
-+
-+# This file is part of the sos project: https://github.com/sosreport/sos
-+#
-+# This copyrighted material is made available to anyone wishing to use,
-+# modify, copy, or redistribute it subject to the terms and conditions of
-+# version 2 of the GNU General Public License.
-+#
-+# See the LICENSE file in the source distribution for further information.
-+
-+from sos.policies.runtimes import ContainerRuntime
-+from sos.utilities import sos_get_command_output
-+from pipes import quote
-+
-+
-+class CrioContainerRuntime(ContainerRuntime):
-+    """Runtime class to use for systems running crio"""
-+
-+    name = 'crio'
-+    binary = 'crictl'
-+
-+    def get_containers(self, get_all=False):
-+        """Get a list of containers present on the system.
-+
-+        :param get_all: If set, include stopped containers as well
-+        :type get_all: ``bool``
-+        """
-+        containers = []
-+        _cmd = "%s ps %s" % (self.binary, '-a' if get_all else '')
-+        if self.active:
-+            out = sos_get_command_output(_cmd, chroot=self.policy.sysroot)
-+            if out['status'] == 0:
-+                for ent in out['output'].splitlines()[1:]:
-+                    ent = ent.split()
-+                    # takes the form (container_id, container_name)
-+                    containers.append((ent[0], ent[-3]))
-+        return containers
-+
-+    def get_images(self):
-+        """Get a list of images present on the system
-+
-+        :returns: A list of 2-tuples containing (image_name, image_id)
-+        :rtype: ``list``
-+        """
-+        images = []
-+        if self.active:
-+            out = sos_get_command_output("%s images" % self.binary,
-+                                         chroot=self.policy.sysroot)
-+            if out['status'] == 0:
-+                for ent in out['output'].splitlines():
-+                    ent = ent.split()
-+                    # takes the form (image_name, image_id)
-+                    images.append((ent[0] + ':' + ent[1], ent[2]))
-+        return images
-+
-+    def fmt_container_cmd(self, container, cmd, quotecmd):
-+        """Format a command to run inside a container using the runtime
-+
-+        :param container: The name or ID of the container in which to run
-+        :type container: ``str``
-+
-+        :param cmd: The command to run inside `container`
-+        :type cmd: ``str``
-+
-+        :param quotecmd: Whether the cmd should be quoted.
-+        :type quotecmd: ``bool``
-+
-+        :returns: Formatted string to run `cmd` inside `container`
-+        :rtype: ``str``
-+        """
-+        if quotecmd:
-+            quoted_cmd = quote(cmd)
-+        else:
-+            quoted_cmd = cmd
-+        container_id = self.get_container_by_name(container)
-+        return "%s %s %s" % (self.run_cmd, container_id,
-+                             quoted_cmd) if container_id is not None else ''
-+
-+# vim: set et ts=4 sw=4 :
-diff --git a/sos/report/plugins/openshift_ovn.py b/sos/report/plugins/openshift_ovn.py
-new file mode 100644
-index 00000000..168f1dd3
---- /dev/null
-+++ b/sos/report/plugins/openshift_ovn.py
-@@ -0,0 +1,41 @@
-+# Copyright (C) 2021 Nadia Pinaeva <npinaeva@redhat.com>
-+
-+# This file is part of the sos project: https://github.com/sosreport/sos
-+#
-+# This copyrighted material is made available to anyone wishing to use,
-+# modify, copy, or redistribute it subject to the terms and conditions of
-+# version 2 of the GNU General Public License.
-+#
-+# See the LICENSE file in the source distribution for further information.
-+
-+from sos.report.plugins import Plugin, RedHatPlugin
-+
-+
-+class OpenshiftOVN(Plugin, RedHatPlugin):
-+    """This plugin is used to collect OCP 4.x OVN logs.
-+    """
-+    short_desc = 'Openshift OVN'
-+    plugin_name = "openshift_ovn"
-+    containers = ('ovnkube-master', 'ovn-ipsec')
-+    profiles = ('openshift',)
-+
-+    def setup(self):
-+        self.add_copy_spec([
-+            "/var/lib/ovn/etc/ovnnb_db.db",
-+            "/var/lib/ovn/etc/ovnsb_db.db",
-+            "/var/lib/openvswitch/etc/keys",
-+            "/var/log/openvswitch/libreswan.log",
-+            "/var/log/openvswitch/ovs-monitor-ipsec.log"
-+        ])
-+
-+        self.add_cmd_output([
-+            'ovn-appctl -t /var/run/ovn/ovnnb_db.ctl ' +
-+            'cluster/status OVN_Northbound',
-+            'ovn-appctl -t /var/run/ovn/ovnsb_db.ctl ' +
-+            'cluster/status OVN_Southbound'],
-+            container='ovnkube-master')
-+        self.add_cmd_output([
-+            'ovs-appctl -t ovs-monitor-ipsec tunnels/show',
-+            'ipsec status',
-+            'certutil -L -d sql:/etc/ipsec.d'],
-+            container='ovn-ipsec')
--- 
-2.31.1
-
-From 17218ca17e49cb8491c688095b56503d041c1ae9 Mon Sep 17 00:00:00 2001
-From: Jake Hunsaker <jhunsake@redhat.com>
-Date: Thu, 9 Dec 2021 15:07:23 -0500
-Subject: [PATCH 1/3] [ocp] Skip project setup whenever oc transport is not
- used
-
-Fixes a corner case where we would still attempt to create a new project
-within the OCP cluster even if we weren't using the `oc` transport.
-
-Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
----
- sos/collector/clusters/ocp.py | 4 +++-
- 1 file changed, 3 insertions(+), 1 deletion(-)
-
-diff --git a/sos/collector/clusters/ocp.py b/sos/collector/clusters/ocp.py
-index 2ce4e977..56f8cc47 100644
---- a/sos/collector/clusters/ocp.py
-+++ b/sos/collector/clusters/ocp.py
-@@ -123,7 +123,9 @@ class ocp(Cluster):
-         return nodes
- 
-     def set_transport_type(self):
--        if is_executable('oc') or self.opts.transport == 'oc':
-+        if self.opts.transport != 'auto':
-+            return self.opts.transport
-+        if is_executable('oc'):
-             return 'oc'
-         self.log_info("Local installation of 'oc' not found or is not "
-                       "correctly configured. Will use ControlPersist.")
--- 
-2.31.1
-
-
-From 9faabdc3df08516a91c1adb3326bbf43db155f71 Mon Sep 17 00:00:00 2001
-From: Jake Hunsaker <jhunsake@redhat.com>
-Date: Thu, 9 Dec 2021 16:04:39 -0500
-Subject: [PATCH 2/3] [crio] Put inspect output in the containers subdir
-
-Given the environments where crio is run, having `crictl inspect` output
-in the main plugin directory can be a bit overwhelming. As such, put
-this output into a `containers` subdir, and nest container log output in
-a `containers/logs/` subdir.
-
-Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
----
- sos/report/plugins/crio.py | 5 +++--
- 1 file changed, 3 insertions(+), 2 deletions(-)
-
-diff --git a/sos/report/plugins/crio.py b/sos/report/plugins/crio.py
-index cb2c9796..56cf64a7 100644
---- a/sos/report/plugins/crio.py
-+++ b/sos/report/plugins/crio.py
-@@ -79,10 +79,11 @@ class CRIO(Plugin, RedHatPlugin, UbuntuPlugin):
-         pods = self._get_crio_list(pod_cmd)
- 
-         for container in containers:
--            self.add_cmd_output("crictl inspect %s" % container)
-+            self.add_cmd_output("crictl inspect %s" % container,
-+                                subdir="containers")
-             if self.get_option('logs'):
-                 self.add_cmd_output("crictl logs -t %s" % container,
--                                    subdir="containers", priority=100)
-+                                    subdir="containers/logs", priority=100)
- 
-         for image in images:
-             self.add_cmd_output("crictl inspecti %s" % image, subdir="images")
--- 
-2.31.1
-
-
-From 9118562c47fb521da3eeeed1a8746d45aaa769e7 Mon Sep 17 00:00:00 2001
-From: Jake Hunsaker <jhunsake@redhat.com>
-Date: Thu, 9 Dec 2021 16:06:06 -0500
-Subject: [PATCH 3/3] [networking] Put namespaced commands into subdirs
-
-Where networking namespaces are used, there tend to be large numbers of
-namespaces used. This in turn results in sos running and collecting very
-large numbers of namespaced commands.
-
-To aid in consumability, place these collections under a subdir for the
-namespace under another "namespaces" subdir within the plugin directory.
-
-Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
----
- sos/report/plugins/networking.py | 27 ++++++++++++---------------
- 1 file changed, 12 insertions(+), 15 deletions(-)
-
-diff --git a/sos/report/plugins/networking.py b/sos/report/plugins/networking.py
-index 80e24abb..bcb5e6ae 100644
---- a/sos/report/plugins/networking.py
-+++ b/sos/report/plugins/networking.py
-@@ -198,6 +198,7 @@ class Networking(Plugin):
-                                   pred=SoSPredicate(self, cmd_outputs=co6))
-                                   else None)
-         for namespace in namespaces:
-+            _subdir = "namespaces/%s" % namespace
-             ns_cmd_prefix = cmd_prefix + namespace + " "
-             self.add_cmd_output([
-                 ns_cmd_prefix + "ip address show",
-@@ -213,29 +214,27 @@ class Networking(Plugin):
-                 ns_cmd_prefix + "netstat -s",
-                 ns_cmd_prefix + "netstat %s -agn" % self.ns_wide,
-                 ns_cmd_prefix + "nstat -zas",
--            ], priority=50)
-+            ], priority=50, subdir=_subdir)
-             self.add_cmd_output([ns_cmd_prefix + "iptables-save"],
-                                 pred=iptables_with_nft,
-+                                subdir=_subdir,
-                                 priority=50)
-             self.add_cmd_output([ns_cmd_prefix + "ip6tables-save"],
-                                 pred=ip6tables_with_nft,
-+                                subdir=_subdir,
-                                 priority=50)
- 
-             ss_cmd = ns_cmd_prefix + "ss -peaonmi"
-             # --allow-system-changes is handled directly in predicate
-             # evaluation, so plugin code does not need to separately
-             # check for it
--            self.add_cmd_output(ss_cmd, pred=ss_pred)
--
--        # Collect ethtool commands only when ethtool_namespaces
--        # is set to true.
--        if self.get_option("ethtool_namespaces"):
--            # Devices that exist in a namespace use less ethtool
--            # parameters. Run this per namespace.
--            for namespace in self.get_network_namespaces(
--                                self.get_option("namespace_pattern"),
--                                self.get_option("namespaces")):
--                ns_cmd_prefix = cmd_prefix + namespace + " "
-+            self.add_cmd_output(ss_cmd, pred=ss_pred, subdir=_subdir)
-+
-+            # Collect ethtool commands only when ethtool_namespaces
-+            # is set to true.
-+            if self.get_option("ethtool_namespaces"):
-+                # Devices that exist in a namespace use less ethtool
-+                # parameters. Run this per namespace.
-                 netns_netdev_list = self.exec_cmd(
-                     ns_cmd_prefix + "ls -1 /sys/class/net/"
-                 )
-@@ -250,9 +249,7 @@ class Networking(Plugin):
-                         ns_cmd_prefix + "ethtool -i " + eth,
-                         ns_cmd_prefix + "ethtool -k " + eth,
-                         ns_cmd_prefix + "ethtool -S " + eth
--                    ], priority=50)
--
--        return
-+                    ], priority=50, subdir=_subdir)
- 
- 
- class RedHatNetworking(Networking, RedHatPlugin):
--- 
-2.31.1
-
-From 4bf5f9143c962c839c1d27217ba74127551a5c00 Mon Sep 17 00:00:00 2001
-From: Jake Hunsaker <jhunsake@redhat.com>
-Date: Fri, 17 Dec 2021 11:10:15 -0500
-Subject: [PATCH] [transport] Detect retrieval failures and automatically retry
-
-If a paritcular attempt to retrieve a remote file fails, we should
-automatically retry that collection up to a certain point. This provides
-`sos collect` more resiliency for the collection of sos report archives.
-
-This change necessitates a change in how we handle the SoSNode flow for
-failed sos report retrievals, and as such contains minor fixes to
-transports to ensure that we do not incorrectly hit exceptions in error
-handling that were not previously possible with how we exited the
-SoSNode retrieval flow.
-
-Closes: #2777
-
-Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
----
- sos/collector/__init__.py            |  5 +++--
- sos/collector/clusters/ocp.py        |  1 +
- sos/collector/sosnode.py             | 17 ++++++++++-------
- sos/collector/transports/__init__.py | 15 ++++++++++++++-
- sos/collector/transports/local.py    |  1 +
- sos/collector/transports/oc.py       |  3 ++-
- 6 files changed, 31 insertions(+), 11 deletions(-)
-
-diff --git a/sos/collector/__init__.py b/sos/collector/__init__.py
-index b825d8fc..a25e794e 100644
---- a/sos/collector/__init__.py
-+++ b/sos/collector/__init__.py
-@@ -1221,8 +1221,9 @@ this utility or remote systems that it connects to.
-     def close_all_connections(self):
-         """Close all sessions for nodes"""
-         for client in self.client_list:
--            self.log_debug('Closing connection to %s' % client.address)
--            client.disconnect()
-+            if client.connected:
-+                self.log_debug('Closing connection to %s' % client.address)
-+                client.disconnect()
- 
-     def create_cluster_archive(self):
-         """Calls for creation of tar archive then cleans up the temporary
-diff --git a/sos/collector/clusters/ocp.py b/sos/collector/clusters/ocp.py
-index 56f8cc47..ae93ad58 100644
---- a/sos/collector/clusters/ocp.py
-+++ b/sos/collector/clusters/ocp.py
-@@ -92,6 +92,7 @@ class ocp(Cluster):
-                                % ret['output'])
-             # don't leave the config on a non-existing project
-             self.exec_master_cmd("oc project default")
-+            self.project = None
-         return True
- 
-     def _build_dict(self, nodelist):
-diff --git a/sos/collector/sosnode.py b/sos/collector/sosnode.py
-index 1341e39f..925f2790 100644
---- a/sos/collector/sosnode.py
-+++ b/sos/collector/sosnode.py
-@@ -751,12 +751,11 @@ class SosNode():
-             if self.file_exists(path):
-                 self.log_info("Copying remote %s to local %s" %
-                               (path, destdir))
--                self._transport.retrieve_file(path, dest)
-+                return self._transport.retrieve_file(path, dest)
-             else:
-                 self.log_debug("Attempting to copy remote file %s, but it "
-                                "does not exist on filesystem" % path)
-                 return False
--            return True
-         except Exception as err:
-             self.log_debug("Failed to retrieve %s: %s" % (path, err))
-             return False
-@@ -793,16 +792,20 @@ class SosNode():
-                 except Exception:
-                     self.log_error('Failed to make archive readable')
-                     return False
--            self.soslog.info('Retrieving sos report from %s' % self.address)
-+            self.log_info('Retrieving sos report from %s' % self.address)
-             self.ui_msg('Retrieving sos report...')
--            ret = self.retrieve_file(self.sos_path)
-+            try:
-+                ret = self.retrieve_file(self.sos_path)
-+            except Exception as err:
-+                self.log_error(err)
-+                return False
-             if ret:
-                 self.ui_msg('Successfully collected sos report')
-                 self.file_list.append(self.sos_path.split('/')[-1])
-+                return True
-             else:
--                self.log_error('Failed to retrieve sos report')
--                raise SystemExit
--            return True
-+                self.ui_msg('Failed to retrieve sos report')
-+                return False
-         else:
-             # sos sometimes fails but still returns a 0 exit code
-             if self.stderr.read():
-diff --git a/sos/collector/transports/__init__.py b/sos/collector/transports/__init__.py
-index 33f2f66d..dcdebdde 100644
---- a/sos/collector/transports/__init__.py
-+++ b/sos/collector/transports/__init__.py
-@@ -303,7 +303,20 @@ class RemoteTransport():
-         :returns:   True if file was successfully copied from remote, or False
-         :rtype:     ``bool``
-         """
--        return self._retrieve_file(fname, dest)
-+        attempts = 0
-+        try:
-+            while attempts < 5:
-+                attempts += 1
-+                ret = self._retrieve_file(fname, dest)
-+                if ret:
-+                    return True
-+                self.log_info("File retrieval attempt %s failed" % attempts)
-+            self.log_info("File retrieval failed after 5 attempts")
-+            return False
-+        except Exception as err:
-+            self.log_error("Exception encountered during retrieval attempt %s "
-+                           "for %s: %s" % (attempts, fname, err))
-+            raise err
- 
-     def _retrieve_file(self, fname, dest):
-         raise NotImplementedError("Transport %s does not support file copying"
-diff --git a/sos/collector/transports/local.py b/sos/collector/transports/local.py
-index a4897f19..2996d524 100644
---- a/sos/collector/transports/local.py
-+++ b/sos/collector/transports/local.py
-@@ -35,6 +35,7 @@ class LocalTransport(RemoteTransport):
-     def _retrieve_file(self, fname, dest):
-         self.log_debug("Moving %s to %s" % (fname, dest))
-         shutil.copy(fname, dest)
-+        return True
- 
-     def _format_cmd_for_exec(self, cmd):
-         return cmd
-diff --git a/sos/collector/transports/oc.py b/sos/collector/transports/oc.py
-index de044ccb..720dd61d 100644
---- a/sos/collector/transports/oc.py
-+++ b/sos/collector/transports/oc.py
-@@ -202,7 +202,8 @@ class OCTransport(RemoteTransport):
-                                                     env, False)
- 
-     def _disconnect(self):
--        os.unlink(self.pod_tmp_conf)
-+        if os.path.exists(self.pod_tmp_conf):
-+            os.unlink(self.pod_tmp_conf)
-         removed = self.run_oc("delete pod %s" % self.pod_name)
-         if "deleted" not in removed['output']:
-             self.log_debug("Calling delete on pod '%s' failed: %s"
--- 
-2.31.1
-
-From 304c9ef6c1015f1ebe1a8d569c3e16bada4d23f1 Mon Sep 17 00:00:00 2001
-From: Nadia Pinaeva <npinaeva@redhat.com>
-Date: Tue, 4 Jan 2022 16:37:09 +0100
-Subject: [PATCH] Add cluster cleanup for all exit() calls
-
-Signed-off-by: Nadia Pinaeva <npinaeva@redhat.com>
----
- sos/collector/__init__.py | 3 +--
- 1 file changed, 1 insertion(+), 2 deletions(-)
-
-diff --git a/sos/collector/__init__.py b/sos/collector/__init__.py
-index a25e794e1..ffd63bc63 100644
---- a/sos/collector/__init__.py
-+++ b/sos/collector/__init__.py
-@@ -443,6 +443,7 @@ def add_parser_options(cls, parser):
- 
-     def exit(self, msg, error=1):
-         """Used to safely terminate if sos-collector encounters an error"""
-+        self.cluster.cleanup()
-         self.log_error(msg)
-         try:
-             self.close_all_connections()
-@@ -858,8 +858,9 @@ class SoSCollector(SoSComponent):
-                       "CTRL-C to quit\n")
-                 self.ui_log.info("")
-             except KeyboardInterrupt:
--                self.cluster.cleanup()
-                 self.exit("Exiting on user cancel", 130)
-+            except Exception as e:
-+                self.exit(repr(e), 1)
- 
-     def configure_sos_cmd(self):
-         """Configures the sosreport command that is run on the nodes"""
-@@ -1185,7 +1185,6 @@ def collect(self):
-             arc_name = self.create_cluster_archive()
-         else:
-             msg = 'No sosreports were collected, nothing to archive...'
--            self.cluster.cleanup()
-             self.exit(msg, 1)
- 
-         if self.opts.upload and self.policy.get_upload_url():
-From 2c3a647817dfbac36be3768acf6026e91d1a6e8f Mon Sep 17 00:00:00 2001
-From: Jake Hunsaker <jhunsake@redhat.com>
-Date: Tue, 21 Dec 2021 14:20:19 -0500
-Subject: [PATCH] [options] Allow spaces in --keywords values in sos.conf
-
-The `--keywords` option supports spaces to allow for obfuscated phrases,
-not just words. This however breaks if a phrase is added to the config
-file *before* a run with the phrase in the cmdline option, due to the
-safeguards we have for all other values that do not support spaces.
-
-Add a check in our flow for updating options from the config file to not
-replace illegal spaces if we're checking the `keywords` option, for
-which spaces are legal.
-
-Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
----
- sos/options.py | 5 ++++-
- 1 file changed, 4 insertions(+), 1 deletion(-)
-
-diff --git a/sos/options.py b/sos/options.py
-index 7bea3ffc1..4846a5096 100644
---- a/sos/options.py
-+++ b/sos/options.py
-@@ -200,7 +200,10 @@ def _update_from_section(section, config):
-                         odict[rename_opts[key]] = odict.pop(key)
-                 # set the values according to the config file
-                 for key, val in odict.items():
--                    if isinstance(val, str):
-+                    # most option values do not tolerate spaces, special
-+                    # exception however for --keywords which we do want to
-+                    # support phrases, and thus spaces, for
-+                    if isinstance(val, str) and key != 'keywords':
-                         val = val.replace(' ', '')
-                     if key not in self.arg_defaults:
-                         # read an option that is not loaded by the current
-From f912fc9e31b406a24b7a9c012e12cda920632051 Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Mon, 10 Jan 2022 14:13:42 +0100
-Subject: [PATCH] [collect] Deal None sos_version properly
-
-In case collector cluster hits an error during init, sos_version
-is None what LooseVersion can't compare properly and raises exception
-
-'LooseVersion' object has no attribute 'version'
-
-Related: #2822
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/collector/sosnode.py | 3 ++-
- 1 file changed, 2 insertions(+), 1 deletion(-)
-
-diff --git a/sos/collector/sosnode.py b/sos/collector/sosnode.py
-index 925f27909..7bbe0cd1f 100644
---- a/sos/collector/sosnode.py
-+++ b/sos/collector/sosnode.py
-@@ -382,7 +382,8 @@ def check_sos_version(self, ver):
-         given ver. This means that if the installed version is greater than
-         ver, this will still return True
-         """
--        return LooseVersion(self.sos_info['version']) >= ver
-+        return self.sos_info['version'] is not None and \
-+            LooseVersion(self.sos_info['version']) >= ver
- 
-     def is_installed(self, pkg):
-         """Checks if a given package is installed on the node"""
-From 0c67e8ebaeef17dac3b5b9e42a59b4e673e4403b Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Mon, 10 Jan 2022 14:17:13 +0100
-Subject: [PATCH] [collector] Cleanup cluster only if defined
-
-In case cluster init fails, self.cluster = None and its cleanup
-must be skipped.
-
-Resolves: #2822
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/collector/__init__.py | 3 ++-
- 1 file changed, 2 insertions(+), 1 deletion(-)
-
-diff --git a/sos/collector/__init__.py b/sos/collector/__init__.py
-index ffd63bc63..3e22bca3e 100644
---- a/sos/collector/__init__.py
-+++ b/sos/collector/__init__.py
-@@ -443,7 +443,8 @@ def add_parser_options(cls, parser):
- 
-     def exit(self, msg, error=1):
-         """Used to safely terminate if sos-collector encounters an error"""
--        self.cluster.cleanup()
-+        if self.cluster:
-+            self.cluster.cleanup()
-         self.log_error(msg)
-         try:
-             self.close_all_connections()
-From ef27a6ee6737c23b3beda1437768a91679024697 Mon Sep 17 00:00:00 2001
-From: Nadia Pinaeva <npinaeva@redhat.com>
-Date: Fri, 3 Dec 2021 15:41:35 +0100
-Subject: [PATCH] Add journal logs for NetworkManager plugin
-
-Signed-off-by: Nadia Pinaeva <npinaeva@redhat.com>
----
- sos/report/plugins/networkmanager.py | 2 ++
- 1 file changed, 2 insertions(+)
-
-diff --git a/sos/report/plugins/networkmanager.py b/sos/report/plugins/networkmanager.py
-index 30f99a1140..3aca0c7460 100644
---- a/sos/report/plugins/networkmanager.py
-+++ b/sos/report/plugins/networkmanager.py
-@@ -25,6 +25,8 @@ def setup(self):
-             "/etc/NetworkManager/dispatcher.d"
-         ])
- 
-+        self.add_journal(units="NetworkManager")
-+
-         # There are some incompatible changes in nmcli since
-         # the release of NetworkManager >= 0.9.9. In addition,
-         # NetworkManager >= 0.9.9 will use the long names of
-From 9eb60f0bb6ea36f9c1cf099c1fd20cf3938b4b26 Mon Sep 17 00:00:00 2001
-From: Jake Hunsaker <jhunsake@redhat.com>
-Date: Mon, 17 Jan 2022 11:11:24 -0500
-Subject: [PATCH] [clean] Ignore empty items for obfuscation better
-
-This commit fixes a couple edge cases where an item empty (e.g. and
-empty string '') was not being properly ignored, which in turned caused
-failures in writing both obfuscations and replacement files.
-
-This should no longer be possible.
-
-Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
----
- sos/cleaner/mappings/__init__.py       | 5 ++++-
- sos/cleaner/mappings/username_map.py   | 2 +-
- sos/cleaner/parsers/username_parser.py | 2 +-
- 3 files changed, 6 insertions(+), 3 deletions(-)
-
-diff --git a/sos/cleaner/mappings/__init__.py b/sos/cleaner/mappings/__init__.py
-index 5cf5c8b2d..48171a052 100644
---- a/sos/cleaner/mappings/__init__.py
-+++ b/sos/cleaner/mappings/__init__.py
-@@ -49,6 +49,8 @@ def add(self, item):
-             :param item:        The plaintext object to obfuscate
-         """
-         with self.lock:
-+            if not item:
-+                return item
-             self.dataset[item] = self.sanitize_item(item)
-             return self.dataset[item]
- 
-@@ -67,7 +69,8 @@ def get(self, item):
-         """Retrieve an item's obfuscated counterpart from the map. If the item
-         does not yet exist in the map, add it by generating one on the fly
-         """
--        if self.ignore_item(item) or self.item_in_dataset_values(item):
-+        if (not item or self.ignore_item(item) or
-+                self.item_in_dataset_values(item)):
-             return item
-         if item not in self.dataset:
-             return self.add(item)
-diff --git a/sos/cleaner/mappings/username_map.py b/sos/cleaner/mappings/username_map.py
-index 7ecccd7bc..ed6dc0912 100644
---- a/sos/cleaner/mappings/username_map.py
-+++ b/sos/cleaner/mappings/username_map.py
-@@ -24,7 +24,7 @@ class SoSUsernameMap(SoSMap):
- 
-     def load_names_from_options(self, opt_names):
-         for name in opt_names:
--            if name not in self.dataset.keys():
-+            if name and name not in self.dataset.keys():
-                 self.add(name)
- 
-     def sanitize_item(self, username):
-diff --git a/sos/cleaner/parsers/username_parser.py b/sos/cleaner/parsers/username_parser.py
-index 49640f7fd..2853c860f 100644
---- a/sos/cleaner/parsers/username_parser.py
-+++ b/sos/cleaner/parsers/username_parser.py
-@@ -55,7 +55,7 @@ def load_usernames_into_map(self, content):
-                 user = line.split()[0]
-             except Exception:
-                 continue
--            if user.lower() in self.skip_list:
-+            if not user or user.lower() in self.skip_list:
-                 continue
-             users.add(user)
-         for each in users:
-From ed618678fd3d07e68e1a430eb7d225a9701332e0 Mon Sep 17 00:00:00 2001
-From: Jake Hunsaker <jhunsake@redhat.com>
-Date: Thu, 13 Jan 2022 13:52:34 -0500
-Subject: [PATCH] [clean,parsers] Build regex lists for static items only once
-
-For parsers such as the username and keyword parsers, we don't discover
-new items through parsing archives - these parsers use static lists
-determined before we begin the actual obfuscation process.
-
-As such, we can build a list of regexes for these static items once, and
-then reference those regexes during execution, rather than rebuilding
-the regex for each of these items for every obfuscation.
-
-For use cases where hundreds of items, e.g. hundreds of usernames, are
-being obfuscated this results in a significant performance increase.
-Individual per-file gains are minor - fractions of a second - however
-these gains build up over the course of the hundreds to thousands of
-files a typical archive can be expected to contain.
-
-Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
----
- sos/cleaner/__init__.py                |  9 +++++++++
- sos/cleaner/parsers/__init__.py        | 10 ++++++++++
- sos/cleaner/parsers/keyword_parser.py  | 15 ++++++++++-----
- sos/cleaner/parsers/username_parser.py | 14 ++++++++------
- tests/unittests/cleaner_tests.py       |  1 +
- 5 files changed, 38 insertions(+), 11 deletions(-)
-
-diff --git a/sos/cleaner/__init__.py b/sos/cleaner/__init__.py
-index 5686e2131..b76bef644 100644
---- a/sos/cleaner/__init__.py
-+++ b/sos/cleaner/__init__.py
-@@ -294,6 +294,7 @@ def execute(self):
-         # we have at least one valid target to obfuscate
-         self.completed_reports = []
-         self.preload_all_archives_into_maps()
-+        self.generate_parser_item_regexes()
-         self.obfuscate_report_paths()
- 
-         if not self.completed_reports:
-@@ -498,6 +499,14 @@ def _replace_obfuscated_archives(self):
-             shutil.move(archive.final_archive_path, dest)
-             archive.final_archive_path = dest_name
- 
-+    def generate_parser_item_regexes(self):
-+        """For the parsers that use prebuilt lists of items, generate those
-+        regexes now since all the parsers should be preloaded by the archive(s)
-+        as well as being handed cmdline options and mapping file configuration.
-+        """
-+        for parser in self.parsers:
-+            parser.generate_item_regexes()
-+
-     def preload_all_archives_into_maps(self):
-         """Before doing the actual obfuscation, if we have multiple archives
-         to obfuscate then we need to preload each of them into the mappings
-diff --git a/sos/cleaner/parsers/__init__.py b/sos/cleaner/parsers/__init__.py
-index e62fd9384..6def863a6 100644
---- a/sos/cleaner/parsers/__init__.py
-+++ b/sos/cleaner/parsers/__init__.py
-@@ -46,9 +46,19 @@ class SoSCleanerParser():
-     map_file_key = 'unset'
- 
-     def __init__(self, config={}):
-+        self.regexes = {}
-         if self.map_file_key in config:
-             self.mapping.conf_update(config[self.map_file_key])
- 
-+    def generate_item_regexes(self):
-+        """Generate regexes for items the parser will be searching for
-+        repeatedly without needing to generate them for every file and/or line
-+        we process
-+
-+        Not used by all parsers.
-+        """
-+        pass
-+
-     def parse_line(self, line):
-         """This will be called for every line in every file we process, so that
-         every parser has a chance to scrub everything.
-diff --git a/sos/cleaner/parsers/keyword_parser.py b/sos/cleaner/parsers/keyword_parser.py
-index 694c6073a..362a1929e 100644
---- a/sos/cleaner/parsers/keyword_parser.py
-+++ b/sos/cleaner/parsers/keyword_parser.py
-@@ -9,6 +9,7 @@
- # See the LICENSE file in the source distribution for further information.
- 
- import os
-+import re
- 
- from sos.cleaner.parsers import SoSCleanerParser
- from sos.cleaner.mappings.keyword_map import SoSKeywordMap
-@@ -33,16 +34,20 @@ def __init__(self, config, keywords=None, keyword_file=None):
-                     # pre-generate an obfuscation mapping for each keyword
-                     # this is necessary for cases where filenames are being
-                     # obfuscated before or instead of file content
--                    self.mapping.get(keyword)
-+                    self.mapping.get(keyword.lower())
-                     self.user_keywords.append(keyword)
-         if keyword_file and os.path.exists(keyword_file):
-             with open(keyword_file, 'r') as kwf:
-                 self.user_keywords.extend(kwf.read().splitlines())
- 
-+    def generate_item_regexes(self):
-+        for kw in self.user_keywords:
-+            self.regexes[kw] = re.compile(kw, re.I)
-+
-     def parse_line(self, line):
-         count = 0
--        for keyword in sorted(self.user_keywords, reverse=True):
--            if keyword in line:
--                line = line.replace(keyword, self.mapping.get(keyword))
--                count += 1
-+        for kwrd, reg in sorted(self.regexes.items(), key=len, reverse=True):
-+            if reg.search(line):
-+                line, _count = reg.subn(self.mapping.get(kwrd.lower()), line)
-+                count += _count
-         return line, count
-diff --git a/sos/cleaner/parsers/username_parser.py b/sos/cleaner/parsers/username_parser.py
-index 3208a6557..49640f7fd 100644
---- a/sos/cleaner/parsers/username_parser.py
-+++ b/sos/cleaner/parsers/username_parser.py
-@@ -61,12 +61,14 @@ def load_usernames_into_map(self, content):
-         for each in users:
-             self.mapping.get(each)
- 
-+    def generate_item_regexes(self):
-+        for user in self.mapping.dataset:
-+            self.regexes[user] = re.compile(user, re.I)
-+
-     def parse_line(self, line):
-         count = 0
--        for username in sorted(self.mapping.dataset.keys(), reverse=True):
--            _reg = re.compile(username, re.I)
--            if _reg.search(line):
--                line, count = _reg.subn(
--                    self.mapping.get(username.lower()), line
--                )
-+        for user, reg in sorted(self.regexes.items(), key=len, reverse=True):
-+            if reg.search(line):
-+                line, _count = reg.subn(self.mapping.get(user.lower()), line)
-+                count += _count
-         return line, count
-diff --git a/tests/unittests/cleaner_tests.py b/tests/unittests/cleaner_tests.py
-index cb20772fd..b59eade9a 100644
---- a/tests/unittests/cleaner_tests.py
-+++ b/tests/unittests/cleaner_tests.py
-@@ -105,6 +105,7 @@ def setUp(self):
-         self.host_parser = SoSHostnameParser(config={}, opt_domains='foobar.com')
-         self.kw_parser = SoSKeywordParser(config={}, keywords=['foobar'])
-         self.kw_parser_none = SoSKeywordParser(config={})
-+        self.kw_parser.generate_item_regexes()
- 
-     def test_ip_parser_valid_ipv4_line(self):
-         line = 'foobar foo 10.0.0.1/24 barfoo bar'
-From 2ae16e0245e1b01b8547e507abb69c11871a8467 Mon Sep 17 00:00:00 2001
-From: Jake Hunsaker <jhunsake@redhat.com>
-Date: Mon, 21 Feb 2022 14:37:09 -0500
-Subject: [PATCH] [sosnode] Handle downstream versioning for runtime option
- check
-
-First, adds parsing and formatting for an sos installation's release
-version according to the loaded package manager for that node.
-
-Adds a fallback version check for 4.2-13 for RHEL downstreams that
-backport the `container-runtime` option into sos-4.2.
-
-Carry this in upstream to account for use cases where a workstation used
-to run `collect` from may be from a different stream than those used by
-cluster nodes.
-
-Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
----
- sos/collector/sosnode.py | 60 ++++++++++++++++++++++++++++++++++------
- 1 file changed, 51 insertions(+), 9 deletions(-)
-
-diff --git a/sos/collector/sosnode.py b/sos/collector/sosnode.py
-index 7bbe0cd1..d9b998b0 100644
---- a/sos/collector/sosnode.py
-+++ b/sos/collector/sosnode.py
-@@ -275,21 +275,34 @@ class SosNode():
-     def _load_sos_info(self):
-         """Queries the node for information about the installed version of sos
-         """
-+        ver = None
-+        rel = None
-         if self.host.container_version_command is None:
-             pkg = self.host.package_manager.pkg_version(self.host.sos_pkg_name)
-             if pkg is not None:
-                 ver = '.'.join(pkg['version'])
--                self.sos_info['version'] = ver
-+                if pkg['release']:
-+                    rel = pkg['release']
-+
-         else:
-             # use the containerized policy's command
-             pkgs = self.run_command(self.host.container_version_command,
-                                     use_container=True, need_root=True)
-             if pkgs['status'] == 0:
--                ver = pkgs['output'].strip().split('-')[1]
--                if ver:
--                    self.sos_info['version'] = ver
--            else:
--                self.sos_info['version'] = None
-+                _, ver, rel = pkgs['output'].strip().split('-')
-+
-+        if ver:
-+            if len(ver.split('.')) == 2:
-+                # safeguard against maintenance releases throwing off the
-+                # comparison by LooseVersion
-+                ver += '.0'
-+            try:
-+                ver += '-%s' % rel.split('.')[0]
-+            except Exception as err:
-+                self.log_debug("Unable to fully parse sos release: %s" % err)
-+
-+        self.sos_info['version'] = ver
-+
-         if self.sos_info['version']:
-             self.log_info('sos version is %s' % self.sos_info['version'])
-         else:
-@@ -381,9 +394,37 @@ class SosNode():
-         """Checks to see if the sos installation on the node is AT LEAST the
-         given ver. This means that if the installed version is greater than
-         ver, this will still return True
-+
-+        :param ver: Version number we are trying to verify is installed
-+        :type ver:  ``str``
-+
-+        :returns:   True if installed version is at least ``ver``, else False
-+        :rtype:     ``bool``
-         """
--        return self.sos_info['version'] is not None and \
--            LooseVersion(self.sos_info['version']) >= ver
-+        def _format_version(ver):
-+            # format the version we're checking to a standard form of X.Y.Z-R
-+            try:
-+                _fver = ver.split('-')[0]
-+                _rel = ''
-+                if '-' in ver:
-+                    _rel = '-' + ver.split('-')[-1].split('.')[0]
-+                if len(_fver.split('.')) == 2:
-+                    _fver += '.0'
-+
-+                return _fver + _rel
-+            except Exception as err:
-+                self.log_debug("Unable to format '%s': %s" % (ver, err))
-+                return ver
-+
-+        _ver = _format_version(ver)
-+
-+        try:
-+            _node_ver = LooseVersion(self.sos_info['version'])
-+            _test_ver = LooseVersion(_ver)
-+            return _node_ver >= _test_ver
-+        except Exception as err:
-+            self.log_error("Error checking sos version: %s" % err)
-+            return False
- 
-     def is_installed(self, pkg):
-         """Checks if a given package is installed on the node"""
-@@ -587,7 +628,8 @@ class SosNode():
-                 sos_opts.append('--cmd-timeout=%s'
-                                 % quote(str(self.opts.cmd_timeout)))
- 
--        if self.check_sos_version('4.3'):
-+        # handle downstream versions that backported this option
-+        if self.check_sos_version('4.3') or self.check_sos_version('4.2-13'):
-             if self.opts.container_runtime != 'auto':
-                 sos_opts.append(
-                     "--container-runtime=%s" % self.opts.container_runtime
--- 
-2.34.1
-
diff --git a/SOURCES/sos-bz2041855-virsh-in-foreground.patch b/SOURCES/sos-bz2041855-virsh-in-foreground.patch
deleted file mode 100644
index 66bca13..0000000
--- a/SOURCES/sos-bz2041855-virsh-in-foreground.patch
+++ /dev/null
@@ -1,146 +0,0 @@
-From 137abd394f64a63b6633949b5c81159af12038b7 Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Fri, 14 Jan 2022 20:07:17 +0100
-Subject: [PATCH] [report] pass foreground argument to collect_cmd_output
-
-Related to: #2825
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/report/plugins/__init__.py | 12 +++++++++---
- 1 file changed, 9 insertions(+), 3 deletions(-)
-
-diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py
-index 98f163ab9..1bbdf28a4 100644
---- a/sos/report/plugins/__init__.py
-+++ b/sos/report/plugins/__init__.py
-@@ -1920,6 +1920,8 @@ class Plugin(object):
-             :param subdir:              Subdir in plugin directory to save to
-             :param changes:             Does this cmd potentially make a change
-                                         on the system?
-+            :param foreground:          Run the `cmd` in the foreground with a
-+                                        TTY
-             :param tags:                Add tags in the archive manifest
-             :param cmd_as_tag:          Format command string to tag
- 
-@@ -2145,7 +2147,8 @@ def collect_cmd_output(self, cmd, suggest_filename=None,
-                            root_symlink=False, timeout=None,
-                            stderr=True, chroot=True, runat=None, env=None,
-                            binary=False, sizelimit=None, pred=None,
--                           changes=False, subdir=None, tags=[]):
-+                           changes=False, foreground=False, subdir=None,
-+                           tags=[]):
-         """Execute a command and save the output to a file for inclusion in the
-         report, then return the results for further use by the plugin
- 
-@@ -2188,6 +2191,9 @@ def collect_cmd_output(self, cmd, suggest_filename=None,
-                                     on the system?
-         :type changes: ``bool``
- 
-+        :param foreground:          Run the `cmd` in the foreground with a TTY
-+        :type foreground: ``bool``
-+
-         :param tags:                Add tags in the archive manifest
-         :type tags: ``str`` or a ``list`` of strings
- 
-@@ -2206,8 +2212,8 @@ def collect_cmd_output(self, cmd, suggest_filename=None,
-         return self._collect_cmd_output(
-             cmd, suggest_filename=suggest_filename, root_symlink=root_symlink,
-             timeout=timeout, stderr=stderr, chroot=chroot, runat=runat,
--            env=env, binary=binary, sizelimit=sizelimit, subdir=subdir,
--            tags=tags
-+            env=env, binary=binary, sizelimit=sizelimit, foreground=foreground,
-+            subdir=subdir, tags=tags
-         )
- 
-     def exec_cmd(self, cmd, timeout=None, stderr=True, chroot=True,
-From 747fef695e4ff08f320c5f03090bdefa7154c761 Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Fri, 14 Jan 2022 20:10:22 +0100
-Subject: [PATCH] [virsh] Call virsh commands in the foreground / with a TTY
-
-In some virsh errors (like unable to connect to a hypervisor),
-the tool requires to communicate to TTY otherwise it can get stuck
-(when called via Popen with a timeout).
-
-Calling it on foreground prevents the stuck / waiting on cmd timeout.
-
-Resolves: #2825
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/report/plugins/virsh.py | 14 +++++++++-----
- 1 file changed, 9 insertions(+), 5 deletions(-)
-
-diff --git a/sos/report/plugins/virsh.py b/sos/report/plugins/virsh.py
-index d6b7c16761..08f9a8488c 100644
---- a/sos/report/plugins/virsh.py
-+++ b/sos/report/plugins/virsh.py
-@@ -39,26 +39,30 @@ def setup(self):
-         ]
- 
-         for subcmd in subcmds:
--            self.add_cmd_output('%s %s' % (cmd, subcmd))
-+            self.add_cmd_output('%s %s' % (cmd, subcmd), foreground=True)
- 
-         # get network, pool and nwfilter elements
-         for k in ['net', 'nwfilter', 'pool']:
--            k_list = self.collect_cmd_output('%s %s-list' % (cmd, k))
-+            k_list = self.collect_cmd_output('%s %s-list' % (cmd, k),
-+                                             foreground=True)
-             if k_list['status'] == 0:
-                 k_lines = k_list['output'].splitlines()
-                 # the 'Name' column position changes between virsh cmds
-                 pos = k_lines[0].split().index('Name')
-                 for j in filter(lambda x: x, k_lines[2:]):
-                     n = j.split()[pos]
--                    self.add_cmd_output('%s %s-dumpxml %s' % (cmd, k, n))
-+                    self.add_cmd_output('%s %s-dumpxml %s' % (cmd, k, n),
-+                                        foreground=True)
- 
-         # cycle through the VMs/domains list, ignore 2 header lines and latest
-         # empty line, and dumpxml domain name in 2nd column
--        domains_output = self.exec_cmd('%s list --all' % cmd)
-+        domains_output = self.exec_cmd('%s list --all' % cmd, foreground=True)
-         if domains_output['status'] == 0:
-             domains_lines = domains_output['output'].splitlines()[2:]
-             for domain in filter(lambda x: x, domains_lines):
-                 d = domain.split()[1]
-                 for x in ['dumpxml', 'dominfo', 'domblklist']:
--                    self.add_cmd_output('%s %s %s' % (cmd, x, d))
-+                    self.add_cmd_output('%s %s %s' % (cmd, x, d),
-+                                        foreground=True)
-+
- # vim: et ts=4 sw=4
-From 9bc032129ec66766f07349dd115335f104888efa Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Wed, 26 Jan 2022 09:44:01 +0100
-Subject: [PATCH] [virsh] Catch parsing exception
-
-In case virsh output is malformed or missing 'Name' otherwise,
-catch parsing exception and continue in next for loop iteration.
-
-Resolves: #2836
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/report/plugins/virsh.py | 6 +++++-
- 1 file changed, 5 insertions(+), 1 deletion(-)
-
-diff --git a/sos/report/plugins/virsh.py b/sos/report/plugins/virsh.py
-index 08f9a8488..2ce1df15c 100644
---- a/sos/report/plugins/virsh.py
-+++ b/sos/report/plugins/virsh.py
-@@ -48,7 +48,11 @@ def setup(self):
-             if k_list['status'] == 0:
-                 k_lines = k_list['output'].splitlines()
-                 # the 'Name' column position changes between virsh cmds
--                pos = k_lines[0].split().index('Name')
-+                # catch the rare exceptions when 'Name' is not found
-+                try:
-+                    pos = k_lines[0].split().index('Name')
-+                except Exception:
-+                    continue
-                 for j in filter(lambda x: x, k_lines[2:]):
-                     n = j.split()[pos]
-                     self.add_cmd_output('%s %s-dumpxml %s' % (cmd, k, n),
diff --git a/SOURCES/sos-bz2043104-foreman-tasks-msgpack.patch b/SOURCES/sos-bz2043104-foreman-tasks-msgpack.patch
deleted file mode 100644
index 900389c..0000000
--- a/SOURCES/sos-bz2043104-foreman-tasks-msgpack.patch
+++ /dev/null
@@ -1,59 +0,0 @@
-From 5634f7dd77eff821f37daa953fa86cc783d3b937 Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Fri, 21 Jan 2022 16:27:33 +0100
-Subject: [PATCH] [foreman] Use psql-msgpack-decode wrapper for dynflow >= 1.6
-
-In dynflow >=1.6.3, dynflow* tables in postgres are encoded by
-msgpack which makes plain CSV dumps unreadable. In such a case,
-psql-msgpack-decode wrapper tool from dynflow-utils (of any
-version) must be used instead of the plain psql command.
-
-Resolves: #2830
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/report/plugins/foreman.py | 16 ++++++++++++----
- 1 file changed, 12 insertions(+), 4 deletions(-)
-
-diff --git a/sos/report/plugins/foreman.py b/sos/report/plugins/foreman.py
-index 314a651d1..3fd80e6a8 100644
---- a/sos/report/plugins/foreman.py
-+++ b/sos/report/plugins/foreman.py
-@@ -244,8 +244,16 @@ def setup(self):
-             self.add_cmd_output(_cmd, suggest_filename=table, timeout=600,
-                                 sizelimit=100, env=self.env)
- 
-+        # dynflow* tables on dynflow >=1.6.3 are encoded and hence in that
-+        # case, psql-msgpack-decode wrapper tool from dynflow-utils (any
-+        # version) must be used instead of plain psql command
-+        dynutils = self.is_installed('dynflow-utils')
-         for dyn in foremancsv:
--            _cmd = self.build_query_cmd(foremancsv[dyn], csv=True)
-+            binary = "psql"
-+            if dyn != 'foreman_tasks_tasks' and dynutils:
-+                binary = "/usr/libexec/psql-msgpack-decode"
-+            _cmd = self.build_query_cmd(foremancsv[dyn], csv=True,
-+                                        binary=binary)
-             self.add_cmd_output(_cmd, suggest_filename=dyn, timeout=600,
-                                 sizelimit=100, env=self.env)
- 
-@@ -270,7 +278,7 @@ def setup(self):
-         # collect http[|s]_proxy env.variables
-         self.add_env_var(["http_proxy", "https_proxy"])
- 
--    def build_query_cmd(self, query, csv=False):
-+    def build_query_cmd(self, query, csv=False, binary="psql"):
-         """
-         Builds the command needed to invoke the pgsql query as the postgres
-         user.
-@@ -281,8 +289,8 @@ def build_query_cmd(self, query, csv=False):
-         if csv:
-             query = "COPY (%s) TO STDOUT " \
-                     "WITH (FORMAT 'csv', DELIMITER ',', HEADER)" % query
--        _dbcmd = "psql --no-password -h %s -p 5432 -U foreman -d foreman -c %s"
--        return _dbcmd % (self.dbhost, quote(query))
-+        _dbcmd = "%s --no-password -h %s -p 5432 -U foreman -d foreman -c %s"
-+        return _dbcmd % (binary, self.dbhost, quote(query))
- 
-     def postproc(self):
-         self.do_path_regex_sub(
diff --git a/SOURCES/sos-bz2043488-ovn-proper-package-enablement.patch b/SOURCES/sos-bz2043488-ovn-proper-package-enablement.patch
deleted file mode 100644
index 16c48c4..0000000
--- a/SOURCES/sos-bz2043488-ovn-proper-package-enablement.patch
+++ /dev/null
@@ -1,252 +0,0 @@
-From 210b83e1d1164d29b1f6198675b8b596c4af8336 Mon Sep 17 00:00:00 2001
-From: Daniel Alvarez Sanchez <dalvarez@redhat.com>
-Date: Thu, 20 Jan 2022 12:58:44 +0100
-Subject: [PATCH] [ovn_central] Account for Red Hat ovn package naming
-
-Previous ovn packages were 'ovn2xxx' and now they have
-been renamed to 'ovn-2xxx'. This causes sos tool to not
-recognize that the packages are installed and it won't
-collect the relevant data.
-
-This patch is changing the match to be compatible
-with the previous and newer naming conventions.
-
-Signed-off-by: Daniel Alvarez Sanchez <dalvarez@redhat.com>
----
- sos/report/plugins/ovn_central.py | 2 +-
- sos/report/plugins/ovn_host.py    | 2 +-
- 2 files changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/sos/report/plugins/ovn_central.py b/sos/report/plugins/ovn_central.py
-index ddbf288da..0f947d4c5 100644
---- a/sos/report/plugins/ovn_central.py
-+++ b/sos/report/plugins/ovn_central.py
-@@ -147,7 +147,7 @@ def setup(self):
- 
- class RedHatOVNCentral(OVNCentral, RedHatPlugin):
- 
--    packages = ('openvswitch-ovn-central', 'ovn2.*-central', )
-+    packages = ('openvswitch-ovn-central', 'ovn.*-central', )
-     ovn_sbdb_sock_path = '/var/run/openvswitch/ovnsb_db.ctl'
- 
- 
-diff --git a/sos/report/plugins/ovn_host.py b/sos/report/plugins/ovn_host.py
-index 78604a15a..25c38cccc 100644
---- a/sos/report/plugins/ovn_host.py
-+++ b/sos/report/plugins/ovn_host.py
-@@ -55,7 +55,7 @@ def check_enabled(self):
- 
- class RedHatOVNHost(OVNHost, RedHatPlugin):
- 
--    packages = ('openvswitch-ovn-host', 'ovn2.*-host', )
-+    packages = ('openvswitch-ovn-host', 'ovn.*-host', )
- 
- 
- class DebianOVNHost(OVNHost, DebianPlugin, UbuntuPlugin):
-From 21fc376d97a5f74743e2b7cf7069349e874b979e Mon Sep 17 00:00:00 2001
-From: Hemanth Nakkina <hemanth.nakkina@canonical.com>
-Date: Fri, 4 Feb 2022 07:57:59 +0530
-Subject: [PATCH] [ovn-central] collect NB/SB ovsdb-server cluster status
-
-Add commands to collect cluster status of Northbound and
-Southbound ovsdb servers.
-
-Resolves: #2840
-
-Signed-off-by: Hemanth Nakkina hemanth.nakkina@canonical.com
----
- sos/report/plugins/ovn_central.py | 13 ++++++++++++-
- 1 file changed, 12 insertions(+), 1 deletion(-)
-
-diff --git a/sos/report/plugins/ovn_central.py b/sos/report/plugins/ovn_central.py
-index 0f947d4c5..2f0438df3 100644
---- a/sos/report/plugins/ovn_central.py
-+++ b/sos/report/plugins/ovn_central.py
-@@ -84,6 +84,14 @@ def setup(self):
-         else:
-             self.add_copy_spec("/var/log/ovn/*.log")
- 
-+        # ovsdb nb/sb cluster status commands
-+        ovsdb_cmds = [
-+            'ovs-appctl -t {} cluster/status OVN_Northbound'.format(
-+                self.ovn_nbdb_sock_path),
-+            'ovs-appctl -t {} cluster/status OVN_Southbound'.format(
-+                self.ovn_sbdb_sock_path),
-+        ]
-+
-         # Some user-friendly versions of DB output
-         nbctl_cmds = [
-             'ovn-nbctl show',
-@@ -109,7 +117,8 @@ def setup(self):
- 
-         self.add_database_output(nb_tables, nbctl_cmds, 'ovn-nbctl')
- 
--        cmds = nbctl_cmds
-+        cmds = ovsdb_cmds
-+        cmds += nbctl_cmds
- 
-         # Can only run sbdb commands if we are the leader
-         co = {'cmd': "ovs-appctl -t {} cluster/status OVN_Southbound".
-@@ -148,10 +157,12 @@ def setup(self):
- class RedHatOVNCentral(OVNCentral, RedHatPlugin):
- 
-     packages = ('openvswitch-ovn-central', 'ovn.*-central', )
-+    ovn_nbdb_sock_path = '/var/run/openvswitch/ovnnb_db.ctl'
-     ovn_sbdb_sock_path = '/var/run/openvswitch/ovnsb_db.ctl'
- 
- 
- class DebianOVNCentral(OVNCentral, DebianPlugin, UbuntuPlugin):
- 
-     packages = ('ovn-central', )
-+    ovn_nbdb_sock_path = '/var/run/ovn/ovnnb_db.ctl'
-     ovn_sbdb_sock_path = '/var/run/ovn/ovnsb_db.ctl'
-From d0f9d507b0ec63c9e8f3e5d7b6507d9d0f97c038 Mon Sep 17 00:00:00 2001
-From: Jake Hunsaker <jhunsake@redhat.com>
-Date: Tue, 15 Feb 2022 16:24:47 -0500
-Subject: [PATCH] [runtimes] Allow container IDs to be used with
- `container_exists()`
-
-As container runtimes can interchange container names and container IDs,
-sos should also allow the use of container IDs when checking for the
-presence of a given container.
-
-In particular, this change unblocks the use of `Plugin.exec_cmd()` when
-used in conjunction with `Plugin.get_container_by_name()` to pick a
-container based on a provided regex that the container name may match.
-
-Related: #2856
-
-Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
----
- sos/policies/runtimes/__init__.py | 17 +++++++++++++++++
- sos/report/plugins/__init__.py    |  6 +++---
- 2 files changed, 20 insertions(+), 3 deletions(-)
-
-diff --git a/sos/policies/runtimes/__init__.py b/sos/policies/runtimes/__init__.py
-index 5ac673544..d28373496 100644
---- a/sos/policies/runtimes/__init__.py
-+++ b/sos/policies/runtimes/__init__.py
-@@ -147,6 +147,23 @@ def get_volumes(self):
-                     vols.append(ent[-1])
-         return vols
- 
-+    def container_exists(self, container):
-+        """Check if a given container ID or name exists on the system from the
-+        perspective of the container runtime.
-+
-+        Note that this will only check _running_ containers
-+
-+        :param container:       The name or ID of the container
-+        :type container:        ``str``
-+
-+        :returns:               True if the container exists, else False
-+        :rtype:                 ``bool``
-+        """
-+        for _contup in self.containers:
-+            if container in _contup:
-+                return True
-+        return False
-+
-     def fmt_container_cmd(self, container, cmd, quotecmd):
-         """Format a command to run inside a container using the runtime
- 
-diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py
-index 2988be089..cc5cb65bc 100644
---- a/sos/report/plugins/__init__.py
-+++ b/sos/report/plugins/__init__.py
-@@ -2593,7 +2593,7 @@ def container_exists(self, name):
-         """If a container runtime is present, check to see if a container with
-         a given name is currently running
- 
--        :param name:    The name of the container to check presence of
-+        :param name:    The name or ID of the container to check presence of
-         :type name: ``str``
- 
-         :returns: ``True`` if `name` exists, else ``False``
-@@ -2601,8 +2601,8 @@ def container_exists(self, name):
-         """
-         _runtime = self._get_container_runtime()
-         if _runtime is not None:
--            con = _runtime.get_container_by_name(name)
--            return con is not None
-+            return (_runtime.container_exists(name) or
-+                    _runtime.get_container_by_name(name) is not None)
-         return False
- 
-     def get_all_containers_by_regex(self, regex, get_all=False):
-
-From de9b020a72d1ceda39587db4c6d5acf72cd90da2 Mon Sep 17 00:00:00 2001
-From: Fernando Royo <froyo@redhat.com>
-Date: Tue, 15 Feb 2022 10:00:38 +0100
-Subject: [PATCH] [ovn_central] Rename container responsable of Red Hat
- ovn_central plugin
-
-ovn_central plugin is running by container with
-name 'ovn-dbs-bundle*', a typo has been identified and
-this cause plugin ovn_central not enabled by default as it
-does not recognize any container responsible of this.
-
-This patch fix this container name match, searching schema db
-keeping backward compatibility with openvswitch.
----
- sos/report/plugins/ovn_central.py | 23 ++++++++++++-----------
- 1 file changed, 12 insertions(+), 11 deletions(-)
-
-diff --git a/sos/report/plugins/ovn_central.py b/sos/report/plugins/ovn_central.py
-index 2f0438df..2f34bff0 100644
---- a/sos/report/plugins/ovn_central.py
-+++ b/sos/report/plugins/ovn_central.py
-@@ -24,7 +24,7 @@ class OVNCentral(Plugin):
-     short_desc = 'OVN Northd'
-     plugin_name = "ovn_central"
-     profiles = ('network', 'virt')
--    containers = ('ovs-db-bundle.*',)
-+    containers = ('ovn-dbs-bundle.*',)
- 
-     def get_tables_from_schema(self, filename, skip=[]):
-         if self._container_name:
-@@ -66,7 +66,7 @@ class OVNCentral(Plugin):
-             cmds.append('%s list %s' % (ovn_cmd, table))
- 
-     def setup(self):
--        self._container_name = self.get_container_by_name('ovs-dbs-bundle.*')
-+        self._container_name = self.get_container_by_name(self.containers[0])
- 
-         ovs_rundir = os.environ.get('OVS_RUNDIR')
-         for pidfile in ['ovnnb_db.pid', 'ovnsb_db.pid', 'ovn-northd.pid']:
-@@ -110,12 +110,11 @@ class OVNCentral(Plugin):
-             'ovn-sbctl get-connection',
-         ]
- 
--        schema_dir = '/usr/share/openvswitch'
--
--        nb_tables = self.get_tables_from_schema(self.path_join(
--            schema_dir, 'ovn-nb.ovsschema'))
--
--        self.add_database_output(nb_tables, nbctl_cmds, 'ovn-nbctl')
-+        # backward compatibility
-+        for path in ['/usr/share/openvswitch', '/usr/share/ovn']:
-+            nb_tables = self.get_tables_from_schema(self.path_join(
-+                path, 'ovn-nb.ovsschema'))
-+            self.add_database_output(nb_tables, nbctl_cmds, 'ovn-nbctl')
- 
-         cmds = ovsdb_cmds
-         cmds += nbctl_cmds
-@@ -125,9 +124,11 @@ class OVNCentral(Plugin):
-               format(self.ovn_sbdb_sock_path),
-               "output": "Leader: self"}
-         if self.test_predicate(self, pred=SoSPredicate(self, cmd_outputs=co)):
--            sb_tables = self.get_tables_from_schema(self.path_join(
--                schema_dir, 'ovn-sb.ovsschema'), ['Logical_Flow'])
--            self.add_database_output(sb_tables, sbctl_cmds, 'ovn-sbctl')
-+            # backward compatibility
-+            for path in ['/usr/share/openvswitch', '/usr/share/ovn']:
-+                sb_tables = self.get_tables_from_schema(self.path_join(
-+                    path, 'ovn-sb.ovsschema'), ['Logical_Flow'])
-+                self.add_database_output(sb_tables, sbctl_cmds, 'ovn-sbctl')
-             cmds += sbctl_cmds
- 
-         # If OVN is containerized, we need to run the above commands inside
--- 
-2.34.1
-
diff --git a/SOURCES/sos-bz2054883-plugopt-logging-effective-opts.patch b/SOURCES/sos-bz2054883-plugopt-logging-effective-opts.patch
deleted file mode 100644
index f8e7ed3..0000000
--- a/SOURCES/sos-bz2054883-plugopt-logging-effective-opts.patch
+++ /dev/null
@@ -1,94 +0,0 @@
-From 5824cd5d3bddf39e0382d568419e2453abc93d8a Mon Sep 17 00:00:00 2001
-From: Jake Hunsaker <jhunsake@redhat.com>
-Date: Mon, 30 Aug 2021 15:09:07 -0400
-Subject: [PATCH] [options] Fix logging on plugopts in effective sos command
-
-First, provide a special-case handling for plugin options specified in
-sos.conf in `SoSOptions.to_args().has_value()` that allows for plugin
-options to be included in the "effective options now" log message.
-
-Second, move the logging of said message (and thus the merging of
-preset options, if used), to being _prior_ to the loading of plugin
-options.
-
-Combined, plugin options specified in sos.conf will now be logged
-properly and this logging will occur before we set (and log the setting
-of) those options.
-
-Resolves: #2663
-
-Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
----
- sos/options.py         |  2 ++
- sos/report/__init__.py | 30 ++++++++++++++++--------------
- 2 files changed, 18 insertions(+), 14 deletions(-)
-
-diff --git a/sos/options.py b/sos/options.py
-index a014a022..7bea3ffc 100644
---- a/sos/options.py
-+++ b/sos/options.py
-@@ -281,6 +281,8 @@ class SoSOptions():
-             null_values = ("False", "None", "[]", '""', "''", "0")
-             if not value or value in null_values:
-                 return False
-+            if name == 'plugopts' and value:
-+                return True
-             if name in self.arg_defaults:
-                 if str(value) == str(self.arg_defaults[name]):
-                     return False
-diff --git a/sos/report/__init__.py b/sos/report/__init__.py
-index b0159e5b..82484f1d 100644
---- a/sos/report/__init__.py
-+++ b/sos/report/__init__.py
-@@ -925,20 +925,6 @@ class SoSReport(SoSComponent):
-         self._exit(1)
- 
-     def setup(self):
--        # Log command line options
--        msg = "[%s:%s] executing 'sos %s'"
--        self.soslog.info(msg % (__name__, "setup", " ".join(self.cmdline)))
--
--        # Log active preset defaults
--        preset_args = self.preset.opts.to_args()
--        msg = ("[%s:%s] using '%s' preset defaults (%s)" %
--               (__name__, "setup", self.preset.name, " ".join(preset_args)))
--        self.soslog.info(msg)
--
--        # Log effective options after applying preset defaults
--        self.soslog.info("[%s:%s] effective options now: %s" %
--                         (__name__, "setup", " ".join(self.opts.to_args())))
--
-         self.ui_log.info(_(" Setting up plugins ..."))
-         for plugname, plug in self.loaded_plugins:
-             try:
-@@ -1386,11 +1372,27 @@ class SoSReport(SoSComponent):
-         self.report_md.add_list('disabled_plugins', self.opts.skip_plugins)
-         self.report_md.add_section('plugins')
- 
-+    def _merge_preset_options(self):
-+        # Log command line options
-+        msg = "[%s:%s] executing 'sos %s'"
-+        self.soslog.info(msg % (__name__, "setup", " ".join(self.cmdline)))
-+
-+        # Log active preset defaults
-+        preset_args = self.preset.opts.to_args()
-+        msg = ("[%s:%s] using '%s' preset defaults (%s)" %
-+               (__name__, "setup", self.preset.name, " ".join(preset_args)))
-+        self.soslog.info(msg)
-+
-+        # Log effective options after applying preset defaults
-+        self.soslog.info("[%s:%s] effective options now: %s" %
-+                         (__name__, "setup", " ".join(self.opts.to_args())))
-+
-     def execute(self):
-         try:
-             self.policy.set_commons(self.get_commons())
-             self.load_plugins()
-             self._set_all_options()
-+            self._merge_preset_options()
-             self._set_tunables()
-             self._check_for_unknown_plugins()
-             self._set_plugin_options()
--- 
-2.34.1
-
diff --git a/SOURCES/sos-bz2055003-rebase-sos-add-sos-help.patch b/SOURCES/sos-bz2055003-rebase-sos-add-sos-help.patch
new file mode 100644
index 0000000..1b8af24
--- /dev/null
+++ b/SOURCES/sos-bz2055003-rebase-sos-add-sos-help.patch
@@ -0,0 +1,67 @@
+From b5389aa195675f473acdd22f20017a8854ff82d0 Mon Sep 17 00:00:00 2001
+From: Pavel Moravec <pmoravec@redhat.com>
+Date: Wed, 16 Feb 2022 08:43:32 +0100
+Subject: [PATCH] [man] Mention sos-help in main sos manpage
+
+Related to #2860
+
+Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
+---
+ man/en/sos.1 | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/man/en/sos.1 b/man/en/sos.1
+index ce4918f99..c335b7e10 100644
+--- a/man/en/sos.1
++++ b/man/en/sos.1
+@@ -67,6 +67,14 @@ May be invoked via either \fBsos clean\fR, \fBsos cleaner\fR, \fBsos mask\fR,
+ or via the \fB--clean\fR, \fB--cleaner\fR or \fB --mask\fR options
+ for \fBreport\fR and \fBcollect\fR.
+ 
++.TP
++.B help
++This subcommand is used to retrieve more detailed information on the various SoS
++commands and components than is directly available in either other manpages or
++--help output.
++
++See \fB sos help --help\fR and \fB man sos-help\fR for more information.
++
+ .SH GLOBAL OPTIONS
+ sos components provide their own set of options, however the following are available
+ to be set across all components.
+From ac4eb48fa35c13b99ada41540831412480babf8d Mon Sep 17 00:00:00 2001
+From: Pavel Moravec <pmoravec@redhat.com>
+Date: Wed, 16 Feb 2022 08:44:16 +0100
+Subject: [PATCH] [setup] Add sos-help to build process
+
+Resolves: #2860
+Closes: #2861
+
+Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
+---
+ setup.py | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/setup.py b/setup.py
+index 25e87a71b..8db8641f0 100644
+--- a/setup.py
++++ b/setup.py
+@@ -90,7 +90,7 @@ def copy_file (self, filename, dirname):
+         ('share/man/man1', ['man/en/sosreport.1', 'man/en/sos-report.1',
+                             'man/en/sos.1', 'man/en/sos-collect.1',
+                             'man/en/sos-collector.1', 'man/en/sos-clean.1',
+-                            'man/en/sos-mask.1']),
++                            'man/en/sos-mask.1', 'man/en/sos-help.1']),
+         ('share/man/man5', ['man/en/sos.conf.5']),
+         ('share/licenses/sos', ['LICENSE']),
+         ('share/doc/sos', ['AUTHORS', 'README.md']),
+@@ -102,7 +102,8 @@ def copy_file (self, filename, dirname):
+         'sos.policies.package_managers', 'sos.policies.init_systems',
+         'sos.report', 'sos.report.plugins', 'sos.collector',
+         'sos.collector.clusters', 'sos.collector.transports', 'sos.cleaner',
+-        'sos.cleaner.mappings', 'sos.cleaner.parsers', 'sos.cleaner.archives'
++        'sos.cleaner.mappings', 'sos.cleaner.parsers', 'sos.cleaner.archives',
++        'sos.help'
+     ],
+     cmdclass=cmdclass,
+     command_options=command_options,
diff --git a/SOURCES/sos-bz2055548-honour-plugins-timeout-hardcoded.patch b/SOURCES/sos-bz2055548-honour-plugins-timeout-hardcoded.patch
deleted file mode 100644
index 3adde40..0000000
--- a/SOURCES/sos-bz2055548-honour-plugins-timeout-hardcoded.patch
+++ /dev/null
@@ -1,39 +0,0 @@
-From 7069e99d1c5c443f96a98a7ed6db67fa14683e67 Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Thu, 17 Feb 2022 09:14:15 +0100
-Subject: [PATCH] [report] Honor plugins' hardcoded plugin_timeout
-
-Currently, plugin's plugin_timeout hardcoded default is superseded by
-whatever --plugin-timeout value, even when this option is not used and
-we eval it to TIMEOUT_DEFAULT.
-
-In this case of not setting --plugin-timeout either -k plugin.timeout,
-honour plugin's plugin_timeout instead.
-
-Resolves: #2863
-Closes: #2864
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/report/plugins/__init__.py | 5 ++++-
- 1 file changed, 4 insertions(+), 1 deletion(-)
-
-diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py
-index cc5cb65b..336b4d22 100644
---- a/sos/report/plugins/__init__.py
-+++ b/sos/report/plugins/__init__.py
-@@ -636,7 +636,10 @@ class Plugin():
-             if opt_timeout is None:
-                 _timeout = own_timeout
-             elif opt_timeout is not None and own_timeout == -1:
--                _timeout = int(opt_timeout)
-+                if opt_timeout == TIMEOUT_DEFAULT:
-+                    _timeout = default_timeout
-+                else:
-+                    _timeout = int(opt_timeout)
-             elif opt_timeout is not None and own_timeout > -1:
-                 _timeout = own_timeout
-             else:
--- 
-2.34.1
-
diff --git a/SOURCES/sos-bz2065563-ocp-backports.patch b/SOURCES/sos-bz2065563-ocp-backports.patch
new file mode 100644
index 0000000..7cfaa91
--- /dev/null
+++ b/SOURCES/sos-bz2065563-ocp-backports.patch
@@ -0,0 +1,1113 @@
+From d0f9d507b0ec63c9e8f3e5d7b6507d9d0f97c038 Mon Sep 17 00:00:00 2001
+From: Jake Hunsaker <jhunsake@redhat.com>
+Date: Tue, 15 Feb 2022 16:24:47 -0500
+Subject: [PATCH] [runtimes] Allow container IDs to be used with
+ `container_exists()`
+
+As container runtimes can interchange container names and container IDs,
+sos should also allow the use of container IDs when checking for the
+presence of a given container.
+
+In particular, this change unblocks the use of `Plugin.exec_cmd()` when
+used in conjunction with `Plugin.get_container_by_name()` to pick a
+container based on a provided regex that the container name may match.
+
+Related: #2856
+
+Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
+---
+ sos/policies/runtimes/__init__.py | 17 +++++++++++++++++
+ sos/report/plugins/__init__.py    |  6 +++---
+ 2 files changed, 20 insertions(+), 3 deletions(-)
+
+diff --git a/sos/policies/runtimes/__init__.py b/sos/policies/runtimes/__init__.py
+index 5ac67354..d2837349 100644
+--- a/sos/policies/runtimes/__init__.py
++++ b/sos/policies/runtimes/__init__.py
+@@ -147,6 +147,23 @@ class ContainerRuntime():
+                     vols.append(ent[-1])
+         return vols
+ 
++    def container_exists(self, container):
++        """Check if a given container ID or name exists on the system from the
++        perspective of the container runtime.
++
++        Note that this will only check _running_ containers
++
++        :param container:       The name or ID of the container
++        :type container:        ``str``
++
++        :returns:               True if the container exists, else False
++        :rtype:                 ``bool``
++        """
++        for _contup in self.containers:
++            if container in _contup:
++                return True
++        return False
++
+     def fmt_container_cmd(self, container, cmd, quotecmd):
+         """Format a command to run inside a container using the runtime
+ 
+diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py
+index 2988be08..cc5cb65b 100644
+--- a/sos/report/plugins/__init__.py
++++ b/sos/report/plugins/__init__.py
+@@ -2593,7 +2593,7 @@ class Plugin():
+         """If a container runtime is present, check to see if a container with
+         a given name is currently running
+ 
+-        :param name:    The name of the container to check presence of
++        :param name:    The name or ID of the container to check presence of
+         :type name: ``str``
+ 
+         :returns: ``True`` if `name` exists, else ``False``
+@@ -2601,8 +2601,8 @@ class Plugin():
+         """
+         _runtime = self._get_container_runtime()
+         if _runtime is not None:
+-            con = _runtime.get_container_by_name(name)
+-            return con is not None
++            return (_runtime.container_exists(name) or
++                    _runtime.get_container_by_name(name) is not None)
+         return False
+ 
+     def get_all_containers_by_regex(self, regex, get_all=False):
+-- 
+2.34.3
+
+From 2ae16e0245e1b01b8547e507abb69c11871a8467 Mon Sep 17 00:00:00 2001
+From: Jake Hunsaker <jhunsake@redhat.com>
+Date: Mon, 21 Feb 2022 14:37:09 -0500
+Subject: [PATCH] [sosnode] Handle downstream versioning for runtime option
+ check
+
+First, adds parsing and formatting for an sos installation's release
+version according to the loaded package manager for that node.
+
+Adds a fallback version check for 4.2-13 for RHEL downstreams that
+backport the `container-runtime` option into sos-4.2.
+
+Carry this in upstream to account for use cases where a workstation used
+to run `collect` from may be from a different stream than those used by
+cluster nodes.
+
+Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
+---
+ sos/collector/sosnode.py | 60 ++++++++++++++++++++++++++++++++++------
+ 1 file changed, 51 insertions(+), 9 deletions(-)
+
+diff --git a/sos/collector/sosnode.py b/sos/collector/sosnode.py
+index 7bbe0cd1..d9b998b0 100644
+--- a/sos/collector/sosnode.py
++++ b/sos/collector/sosnode.py
+@@ -275,21 +275,34 @@ class SosNode():
+     def _load_sos_info(self):
+         """Queries the node for information about the installed version of sos
+         """
++        ver = None
++        rel = None
+         if self.host.container_version_command is None:
+             pkg = self.host.package_manager.pkg_version(self.host.sos_pkg_name)
+             if pkg is not None:
+                 ver = '.'.join(pkg['version'])
+-                self.sos_info['version'] = ver
++                if pkg['release']:
++                    rel = pkg['release']
++
+         else:
+             # use the containerized policy's command
+             pkgs = self.run_command(self.host.container_version_command,
+                                     use_container=True, need_root=True)
+             if pkgs['status'] == 0:
+-                ver = pkgs['output'].strip().split('-')[1]
+-                if ver:
+-                    self.sos_info['version'] = ver
+-            else:
+-                self.sos_info['version'] = None
++                _, ver, rel = pkgs['output'].strip().split('-')
++
++        if ver:
++            if len(ver.split('.')) == 2:
++                # safeguard against maintenance releases throwing off the
++                # comparison by LooseVersion
++                ver += '.0'
++            try:
++                ver += '-%s' % rel.split('.')[0]
++            except Exception as err:
++                self.log_debug("Unable to fully parse sos release: %s" % err)
++
++        self.sos_info['version'] = ver
++
+         if self.sos_info['version']:
+             self.log_info('sos version is %s' % self.sos_info['version'])
+         else:
+@@ -381,9 +394,37 @@ class SosNode():
+         """Checks to see if the sos installation on the node is AT LEAST the
+         given ver. This means that if the installed version is greater than
+         ver, this will still return True
++
++        :param ver: Version number we are trying to verify is installed
++        :type ver:  ``str``
++
++        :returns:   True if installed version is at least ``ver``, else False
++        :rtype:     ``bool``
+         """
+-        return self.sos_info['version'] is not None and \
+-            LooseVersion(self.sos_info['version']) >= ver
++        def _format_version(ver):
++            # format the version we're checking to a standard form of X.Y.Z-R
++            try:
++                _fver = ver.split('-')[0]
++                _rel = ''
++                if '-' in ver:
++                    _rel = '-' + ver.split('-')[-1].split('.')[0]
++                if len(_fver.split('.')) == 2:
++                    _fver += '.0'
++
++                return _fver + _rel
++            except Exception as err:
++                self.log_debug("Unable to format '%s': %s" % (ver, err))
++                return ver
++
++        _ver = _format_version(ver)
++
++        try:
++            _node_ver = LooseVersion(self.sos_info['version'])
++            _test_ver = LooseVersion(_ver)
++            return _node_ver >= _test_ver
++        except Exception as err:
++            self.log_error("Error checking sos version: %s" % err)
++            return False
+ 
+     def is_installed(self, pkg):
+         """Checks if a given package is installed on the node"""
+@@ -587,7 +628,8 @@ class SosNode():
+                 sos_opts.append('--cmd-timeout=%s'
+                                 % quote(str(self.opts.cmd_timeout)))
+ 
+-        if self.check_sos_version('4.3'):
++        # handle downstream versions that backported this option
++        if self.check_sos_version('4.3') or self.check_sos_version('4.2-13'):
+             if self.opts.container_runtime != 'auto':
+                 sos_opts.append(
+                     "--container-runtime=%s" % self.opts.container_runtime
+-- 
+2.34.3
+
+From cc60fa5ee25bffed9203a4f786256185b7fe0115 Mon Sep 17 00:00:00 2001
+From: Nadia Pinaeva <npinaeva@redhat.com>
+Date: Tue, 15 Mar 2022 11:49:57 +0100
+Subject: [PATCH] Add ovs datapath and groups collection commands Add
+ ct-zone-list command for openshift-ovn
+
+Signed-off-by: Nadia Pinaeva <npinaeva@redhat.com>
+---
+ sos/report/plugins/openshift_ovn.py | 4 ++++
+ sos/report/plugins/openvswitch.py   | 3 +++
+ 2 files changed, 7 insertions(+)
+
+diff --git a/sos/report/plugins/openshift_ovn.py b/sos/report/plugins/openshift_ovn.py
+index 168f1dd3..b4787b8e 100644
+--- a/sos/report/plugins/openshift_ovn.py
++++ b/sos/report/plugins/openshift_ovn.py
+@@ -34,6 +34,10 @@ class OpenshiftOVN(Plugin, RedHatPlugin):
+             'ovn-appctl -t /var/run/ovn/ovnsb_db.ctl ' +
+             'cluster/status OVN_Southbound'],
+             container='ovnkube-master')
++        self.add_cmd_output([
++            'ovs-appctl -t /var/run/ovn/ovn-controller.*.ctl ' +
++            'ct-zone-list'],
++            container='ovnkube-node')
+         self.add_cmd_output([
+             'ovs-appctl -t ovs-monitor-ipsec tunnels/show',
+             'ipsec status',
+diff --git a/sos/report/plugins/openvswitch.py b/sos/report/plugins/openvswitch.py
+index 179d1532..159b0bd2 100644
+--- a/sos/report/plugins/openvswitch.py
++++ b/sos/report/plugins/openvswitch.py
+@@ -124,6 +124,8 @@ class OpenVSwitch(Plugin):
+             "ovs-vsctl -t 5 list interface",
+             # Capture OVS detailed information from all the bridges
+             "ovs-vsctl -t 5 list bridge",
++            # Capture OVS datapath list
++            "ovs-vsctl -t 5 list datapath",
+             # Capture DPDK queue to pmd mapping
+             "ovs-appctl dpif-netdev/pmd-rxq-show",
+             # Capture DPDK pmd stats
+@@ -229,6 +231,7 @@ class OpenVSwitch(Plugin):
+                     "ovs-ofctl queue-get-config %s" % br,
+                     "ovs-ofctl queue-stats %s" % br,
+                     "ovs-ofctl show %s" % br,
++                    "ovs-ofctl dump-groups %s" % br,
+                 ])
+ 
+                 # Flow protocols currently supported
+-- 
+2.34.3
+
+From af40be92f502b35fa9d39ce4d4fea7d80c367830 Mon Sep 17 00:00:00 2001
+From: Nadia Pinaeva <npinaeva@redhat.com>
+Date: Tue, 15 Mar 2022 13:09:55 +0100
+Subject: [PATCH] Improve sos collect for OCP: 1. wait for sos tmp project to
+ be deleted (just calling delete changes project state to Terminating, and
+ running a new sos collect is not possible before this project is fully
+ deleted) 2. use --retries flag to copy sos reports from the nodes more
+ reliably. The flag has been recently added to kubectl, and the most reliable
+ way to check if it's available or not is to check command error output for
+ "unknown flag" substring
+
+Signed-off-by: Nadia Pinaeva <npinaeva@redhat.com>
+---
+ sos/collector/clusters/ocp.py  | 5 +++++
+ sos/collector/transports/oc.py | 6 +++++-
+ 2 files changed, 10 insertions(+), 1 deletion(-)
+
+diff --git a/sos/collector/clusters/ocp.py b/sos/collector/clusters/ocp.py
+index f1714239..9beb2f9b 100644
+--- a/sos/collector/clusters/ocp.py
++++ b/sos/collector/clusters/ocp.py
+@@ -123,6 +123,11 @@ class ocp(Cluster):
+             if not ret['status'] == 0:
+                 self.log_error("Error deleting temporary project: %s"
+                                % ret['output'])
++            ret = self.exec_primary_cmd("oc wait namespace/%s --for=delete "
++                                        "--timeout=30s" % self.project)
++            if not ret['status'] == 0:
++                self.log_error("Error waiting for temporary project to be "
++                               "deleted: %s" % ret['output'])
+             # don't leave the config on a non-existing project
+             self.exec_primary_cmd("oc project default")
+             self.project = None
+diff --git a/sos/collector/transports/oc.py b/sos/collector/transports/oc.py
+index 0fc9eee8..90a802b2 100644
+--- a/sos/collector/transports/oc.py
++++ b/sos/collector/transports/oc.py
+@@ -231,5 +231,9 @@ class OCTransport(RemoteTransport):
+                 % (self.project, self.pod_name))
+ 
+     def _retrieve_file(self, fname, dest):
+-        cmd = self.run_oc("cp %s:%s %s" % (self.pod_name, fname, dest))
++        # check if --retries flag is available for given version of oc
++        result = self.run_oc("cp --retries", stderr=True)
++        flags = '' if "unknown flag" in result["output"] else '--retries=5'
++        cmd = self.run_oc("cp %s %s:%s %s"
++                          % (flags, self.pod_name, fname, dest))
+         return cmd['status'] == 0
+-- 
+2.34.3
+
+From 3b0676b90ff65f20eaba3062775ff72b89386ffc Mon Sep 17 00:00:00 2001
+From: Jake Hunsaker <jhunsake@redhat.com>
+Date: Tue, 22 Mar 2022 14:25:24 -0400
+Subject: [PATCH] [Plugin] Allow plugins to define default command environment
+ vars
+
+Adds the ability for plugins to define a default set of environment vars
+to pass to all commands executed by the plugin. This may be done either
+via the new `set_default_cmd_environment()` or
+`add_default_cmd_environment()` methods. The former will override any
+previously set values, whereas the latter will add/update/modify any
+existing values.
+
+Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
+---
+ sos/report/plugins/__init__.py                | 55 ++++++++++++++++++-
+ .../plugin_tests/plugin_environment.py        | 44 +++++++++++++++
+ .../fake_plugins/default_env_test.py          | 28 ++++++++++
+ tests/unittests/plugin_tests.py               | 15 +++++
+ 4 files changed, 140 insertions(+), 2 deletions(-)
+ create mode 100644 tests/report_tests/plugin_tests/plugin_environment.py
+ create mode 100644 tests/test_data/fake_plugins/default_env_test.py
+
+diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py
+index 336b4d22..74b4f4be 100644
+--- a/sos/report/plugins/__init__.py
++++ b/sos/report/plugins/__init__.py
+@@ -571,6 +571,7 @@ class Plugin():
+         self.manifest = None
+         self.skip_files = commons['cmdlineopts'].skip_files
+         self.skip_commands = commons['cmdlineopts'].skip_commands
++        self.default_environment = {}
+ 
+         self.soslog = self.commons['soslog'] if 'soslog' in self.commons \
+             else logging.getLogger('sos')
+@@ -624,6 +625,52 @@ class Plugin():
+         self.manifest.add_field('strings', {})
+         self.manifest.add_field('containers', {})
+ 
++    def set_default_cmd_environment(self, env_vars):
++        """
++        Specify a collection of environment variables that should always be
++        passed to commands being executed by this plugin.
++
++        :param env_vars:    The environment variables and their values to set
++        :type env_vars:     ``dict{ENV_VAR_NAME: ENV_VAR_VALUE}``
++        """
++        if not isinstance(env_vars, dict):
++            raise TypeError(
++                "Environment variables for Plugin must be specified by dict"
++            )
++        self.default_environment = env_vars
++        self._log_debug("Default environment for all commands now set to %s"
++                        % self.default_environment)
++
++    def add_default_cmd_environment(self, env_vars):
++        """
++        Add or modify a specific environment variable in the set of default
++        environment variables used by this Plugin.
++
++        :param env_vars:    The environment variables to add to the current
++                            set of env vars in use
++        :type env_vars:     ``dict``
++        """
++        if not isinstance(env_vars, dict):
++            raise TypeError("Environment variables must be added via dict")
++        self._log_debug("Adding %s to default environment" % env_vars)
++        self.default_environment.update(env_vars)
++
++    def _get_cmd_environment(self, env=None):
++        """
++        Get the merged set of environment variables for a command about to be
++        executed by this plugin.
++
++        :returns: The set of env vars to use for a command
++        :rtype: ``dict``
++        """
++        if env is None:
++            return self.default_environment
++        if not isinstance(env, dict):
++            raise TypeError("Command env vars must be passed as dict")
++        _env = self.default_environment.copy()
++        _env.update(env)
++        return _env
++
+     def timeout_from_options(self, optname, plugoptname, default_timeout):
+         """Returns either the default [plugin|cmd] timeout value, the value as
+         provided on the commandline via -k plugin.[|cmd-]timeout=value, or the
+@@ -2258,6 +2305,8 @@ class Plugin():
+ 
+         _tags = list(set(_tags))
+ 
++        _env = self._get_cmd_environment(env)
++
+         if chroot or self.commons['cmdlineopts'].chroot == 'always':
+             root = self.sysroot
+         else:
+@@ -2282,7 +2331,7 @@ class Plugin():
+ 
+         result = sos_get_command_output(
+             cmd, timeout=timeout, stderr=stderr, chroot=root,
+-            chdir=runat, env=env, binary=binary, sizelimit=sizelimit,
++            chdir=runat, env=_env, binary=binary, sizelimit=sizelimit,
+             poller=self.check_timeout, foreground=foreground,
+             to_file=out_file
+         )
+@@ -2510,6 +2559,8 @@ class Plugin():
+         else:
+             root = None
+ 
++        _env = self._get_cmd_environment(env)
++
+         if container:
+             if self._get_container_runtime() is None:
+                 self._log_info("Cannot run cmd '%s' in container %s: no "
+@@ -2522,7 +2573,7 @@ class Plugin():
+                                "container is running." % (cmd, container))
+ 
+         return sos_get_command_output(cmd, timeout=timeout, chroot=root,
+-                                      chdir=runat, binary=binary, env=env,
++                                      chdir=runat, binary=binary, env=_env,
+                                       foreground=foreground, stderr=stderr)
+ 
+     def _add_container_file_to_manifest(self, container, path, arcpath, tags):
+diff --git a/tests/report_tests/plugin_tests/plugin_environment.py b/tests/report_tests/plugin_tests/plugin_environment.py
+new file mode 100644
+index 00000000..3158437a
+--- /dev/null
++++ b/tests/report_tests/plugin_tests/plugin_environment.py
+@@ -0,0 +1,44 @@
++# This file is part of the sos project: https://github.com/sosreport/sos
++#
++# This copyrighted material is made available to anyone wishing to use,
++# modify, copy, or redistribute it subject to the terms and conditions of
++# version 2 of the GNU General Public License.
++#
++# See the LICENSE file in the source distribution for further information.
++
++import os
++
++from sos_tests import StageTwoReportTest
++
++
++class PluginDefaultEnvironmentTest(StageTwoReportTest):
++    """
++    Ensure that being able to set a default set of environment variables is
++    working correctly and does not leave a lingering env var on the system
++
++    :avocado: tags=stageone
++    """
++
++    install_plugins = ['default_env_test']
++    sos_cmd = '-o default_env_test'
++
++    def test_environment_used_in_cmd(self):
++        self.assertFileHasContent(
++            'sos_commands/default_env_test/env_var_test',
++            'Does Linus play hockey?'
++        )
++
++    def test_environment_setting_logged(self):
++        self.assertSosLogContains(
++            'Default environment for all commands now set to'
++        )
++
++    def test_environment_not_set_on_host(self):
++        self.assertTrue('TORVALDS' not in os.environ)
++        self.assertTrue('GREATESTSPORT' not in os.environ)
++
++    def test_environment_not_captured(self):
++        # we should still have an empty environment file
++        self.assertFileCollected('environment')
++        self.assertFileNotHasContent('environment', 'TORVALDS')
++        self.assertFileNotHasContent('environment', 'GREATESTSPORT')
+diff --git a/tests/test_data/fake_plugins/default_env_test.py b/tests/test_data/fake_plugins/default_env_test.py
+new file mode 100644
+index 00000000..d1d1fb78
+--- /dev/null
++++ b/tests/test_data/fake_plugins/default_env_test.py
+@@ -0,0 +1,28 @@
++# This file is part of the sos project: https://github.com/sosreport/sos
++#
++# This copyrighted material is made available to anyone wishing to use,
++# modify, copy, or redistribute it subject to the terms and conditions of
++# version 2 of the GNU General Public License.
++#
++# See the LICENSE file in the source distribution for further information.
++
++from sos.report.plugins import Plugin, IndependentPlugin
++
++
++class DefaultEnv(Plugin, IndependentPlugin):
++
++    plugin_name = 'default_env_test'
++    short_desc = 'Fake plugin to test default env var handling'
++
++    def setup(self):
++        self.set_default_cmd_environment({
++            'TORVALDS': 'Linus',
++            'GREATESTSPORT': 'hockey'
++        })
++
++        self.add_cmd_output(
++            "sh -c 'echo Does '$TORVALDS' play '$GREATESTSPORT'?'",
++            suggest_filename='env_var_test'
++        )
++
++        self.add_env_var(['TORVALDS', 'GREATESTSPORT'])
+diff --git a/tests/unittests/plugin_tests.py b/tests/unittests/plugin_tests.py
+index 0dfa243d..e469b78e 100644
+--- a/tests/unittests/plugin_tests.py
++++ b/tests/unittests/plugin_tests.py
+@@ -305,6 +305,21 @@ class PluginTests(unittest.TestCase):
+         p.postproc()
+         self.assertTrue(p.did_postproc)
+ 
++    def test_set_default_cmd_env(self):
++        p = MockPlugin({
++            'sysroot': self.sysroot,
++            'policy': LinuxPolicy(init=InitSystem(), probe_runtime=False),
++            'cmdlineopts': MockOptions(),
++            'devices': {}
++        })
++        e = {'TORVALDS': 'Linus'}
++        p.set_default_cmd_environment(e)
++        self.assertEquals(p.default_environment, e)
++        add_e = {'GREATESTSPORT': 'hockey'}
++        p.add_default_cmd_environment(add_e)
++        self.assertEquals(p.default_environment['GREATESTSPORT'], 'hockey')
++        self.assertEquals(p.default_environment['TORVALDS'], 'Linus')
++
+ 
+ class AddCopySpecTests(unittest.TestCase):
+ 
+-- 
+2.34.3
+
+From 1e12325efaa500d304dcbfbeeb50e72ed0f938f5 Mon Sep 17 00:00:00 2001
+From: Vladislav Walek <22072258+vwalek@users.noreply.github.com>
+Date: Thu, 17 Mar 2022 14:10:26 -0700
+Subject: [PATCH] [openshift] Adding ability to use the localhost.kubeconfig
+ and KUBECONFIG env to use system:admin
+
+Signed-off-by: Vladislav Walek <22072258+vwalek@users.noreply.github.com>
+---
+ sos/report/plugins/openshift.py | 45 +++++++++++++++++++++++++++++++--
+ 1 file changed, 43 insertions(+), 2 deletions(-)
+
+diff --git a/sos/report/plugins/openshift.py b/sos/report/plugins/openshift.py
+index 5ae38178..d643f04c 100644
+--- a/sos/report/plugins/openshift.py
++++ b/sos/report/plugins/openshift.py
+@@ -53,12 +53,19 @@ class Openshift(Plugin, RedHatPlugin):
+     profiles = ('openshift',)
+     packages = ('openshift-hyperkube',)
+ 
++    master_localhost_kubeconfig = (
++        '/etc/kubernetes/static-pod-resources/'
++        'kube-apiserver-certs/secrets/node-kubeconfigs/localhost.kubeconfig'
++        )
++
+     option_list = [
+         PluginOpt('token', default=None, val_type=str,
+                   desc='admin token to allow API queries'),
++        PluginOpt('kubeconfig', default=None, val_type=str,
++                  desc='Path to a locally available kubeconfig file'),
+         PluginOpt('host', default='https://localhost:6443',
+                   desc='host address to use for oc login, including port'),
+-        PluginOpt('no-oc', default=False, desc='do not collect `oc` output'),
++        PluginOpt('no-oc', default=True, desc='do not collect `oc` output'),
+         PluginOpt('podlogs', default=True, desc='collect logs from each pod'),
+         PluginOpt('podlogs-filter', default='', val_type=str,
+                   desc='only collect logs from pods matching this pattern'),
+@@ -73,6 +80,10 @@ class Openshift(Plugin, RedHatPlugin):
+         """Check to see if we can run `oc` commands"""
+         return self.exec_cmd('oc whoami')['status'] == 0
+ 
++    def _check_localhost_kubeconfig(self):
++        """Check if the localhost.kubeconfig exists with system:admin user"""
++        return self.path_exists(self.get_option('kubeconfig'))
++
+     def _check_oc_logged_in(self):
+         """See if we're logged in to the API service, and if not attempt to do
+         so using provided plugin options
+@@ -80,8 +91,38 @@ class Openshift(Plugin, RedHatPlugin):
+         if self._check_oc_function():
+             return True
+ 
+-        # Not logged in currently, attempt to do so
++        if self.get_option('kubeconfig') is None:
++            # If admin doesn't add the kubeconfig
++            # use default localhost.kubeconfig
++            self.set_option(
++                'kubeconfig',
++                self.master_localhost_kubeconfig
++            )
++
++        # Check first if we can use the localhost.kubeconfig before
++        # using token. We don't want to use 'host' option due we use
++        # cluster url from kubeconfig. Default is localhost.
++        if self._check_localhost_kubeconfig():
++            self.set_default_cmd_environment({
++                'KUBECONFIG': self.get_option('kubeconfig')
++            })
++
++            oc_res = self.exec_cmd(
++                "oc login -u system:admin "
++                "--insecure-skip-tls-verify=True"
++            )
++            if oc_res['status'] == 0 and self._check_oc_function():
++                return True
++
++            self._log_warn(
++                "The login command failed with status: %s and error: %s"
++                % (oc_res['status'], oc_res['output'])
++            )
++            return False
++
++        # If kubeconfig is not defined, check if token is provided.
+         token = self.get_option('token') or os.getenv('SOSOCPTOKEN', None)
++
+         if token:
+             oc_res = self.exec_cmd("oc login %s --token=%s "
+                                    "--insecure-skip-tls-verify=True"
+-- 
+2.34.3
+
+From 61765992812afb785e9552e01e3b5579118a6963 Mon Sep 17 00:00:00 2001
+From: Nadia Pinaeva <npinaeva@redhat.com>
+Date: Fri, 1 Apr 2022 12:05:36 +0200
+Subject: [PATCH] Add one more container for plugin enablement
+
+Signed-off-by: Nadia Pinaeva <npinaeva@redhat.com>
+---
+ sos/report/plugins/openshift_ovn.py | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sos/report/plugins/openshift_ovn.py b/sos/report/plugins/openshift_ovn.py
+index b4787b8e..98522b1e 100644
+--- a/sos/report/plugins/openshift_ovn.py
++++ b/sos/report/plugins/openshift_ovn.py
+@@ -16,7 +16,7 @@ class OpenshiftOVN(Plugin, RedHatPlugin):
+     """
+     short_desc = 'Openshift OVN'
+     plugin_name = "openshift_ovn"
+-    containers = ('ovnkube-master', 'ovn-ipsec')
++    containers = ('ovnkube-master', 'ovnkube-node', 'ovn-ipsec')
+     profiles = ('openshift',)
+ 
+     def setup(self):
+-- 
+2.34.3
+
+From d3aa071efc85507341cf65dd61414a734654f50a Mon Sep 17 00:00:00 2001
+From: Jake Hunsaker <jhunsake@redhat.com>
+Date: Mon, 28 Mar 2022 14:47:09 -0400
+Subject: [PATCH] [presets] Adjust OCP preset options
+
+Adjust the options used by the 'ocp' preset to better reflect the
+current collection needs and approach.
+
+This includes disabling the `cgroups` plugin due to the large amount of
+mostly irrelevant data captured due to the high number of containers
+present on OCP nodes, ensuring the `--container-runtime` option is set
+to `crio` to align container-based collections, disabling HTML report
+generation and increasing the base log size rather than blindly enabling
+all-logs.
+
+Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
+---
+ sos/presets/redhat/__init__.py | 13 +++++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+diff --git a/sos/presets/redhat/__init__.py b/sos/presets/redhat/__init__.py
+index 865c9b6b..0b9f6f11 100644
+--- a/sos/presets/redhat/__init__.py
++++ b/sos/presets/redhat/__init__.py
+@@ -36,10 +36,15 @@ RHOSP_OPTS = SoSOptions(plugopts=[
+ 
+ RHOCP = "ocp"
+ RHOCP_DESC = "OpenShift Container Platform by Red Hat"
+-RHOCP_OPTS = SoSOptions(all_logs=True, verify=True, plugopts=[
+-                             'networking.timeout=600',
+-                             'networking.ethtool_namespaces=False',
+-                             'networking.namespaces=200'])
++RHOCP_OPTS = SoSOptions(
++    verify=True, skip_plugins=['cgroups'], container_runtime='crio',
++    no_report=True, log_size=100,
++    plugopts=[
++        'crio.timeout=600',
++        'networking.timeout=600',
++        'networking.ethtool_namespaces=False',
++        'networking.namespaces=200'
++    ])
+ 
+ RH_CFME = "cfme"
+ RH_CFME_DESC = "Red Hat CloudForms"
+-- 
+2.34.3
+
+From f2b67ab820070063995689fed03492cdaa012d01 Mon Sep 17 00:00:00 2001
+From: Nadia Pinaeva <npinaeva@redhat.com>
+Date: Fri, 1 Apr 2022 17:01:35 +0200
+Subject: [PATCH] Use /etc/os-release instead of /etc/redhat-release as the
+ most compatible way to find host release
+
+Signed-off-by: Nadia Pinaeva <npinaeva@redhat.com>
+---
+ sos/policies/distros/redhat.py | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/sos/policies/distros/redhat.py b/sos/policies/distros/redhat.py
+index 0c72a5e4..2e117f37 100644
+--- a/sos/policies/distros/redhat.py
++++ b/sos/policies/distros/redhat.py
+@@ -40,7 +40,6 @@ class RedHatPolicy(LinuxPolicy):
+         ('Distribution Website', 'https://www.redhat.com/'),
+         ('Commercial Support', 'https://www.access.redhat.com/')
+     ]
+-    _redhat_release = '/etc/redhat-release'
+     _tmp_dir = "/var/tmp"
+     _in_container = False
+     default_scl_prefix = '/opt/rh'
+@@ -471,7 +470,7 @@ support representative.
+         atomic = False
+         if ENV_HOST_SYSROOT not in os.environ:
+             return atomic
+-        host_release = os.environ[ENV_HOST_SYSROOT] + cls._redhat_release
++        host_release = os.environ[ENV_HOST_SYSROOT] + OS_RELEASE
+         if not os.path.exists(host_release):
+             return False
+         try:
+@@ -558,7 +557,7 @@ support representative.
+         coreos = False
+         if ENV_HOST_SYSROOT not in os.environ:
+             return coreos
+-        host_release = os.environ[ENV_HOST_SYSROOT] + cls._redhat_release
++        host_release = os.environ[ENV_HOST_SYSROOT] + OS_RELEASE
+         try:
+             for line in open(host_release, 'r').read().splitlines():
+                 coreos |= 'Red Hat Enterprise Linux CoreOS' in line
+-- 
+2.34.3
+
+From ee0dd68199a2c9296eafe64ead5b2263c8270e4a Mon Sep 17 00:00:00 2001
+From: Nadia Pinaeva <npinaeva@redhat.com>
+Date: Wed, 6 Apr 2022 11:56:41 +0200
+Subject: [PATCH] Use --force-pull-image option for pods created with oc. Set
+ --force-pull-image=True by default, can be turned off with
+ --force-pull-image=False
+
+Signed-off-by: Nadia Pinaeva <npinaeva@redhat.com>
+---
+ man/en/sos-collect.1           | 16 +++++++++++-----
+ sos/collector/__init__.py      |  9 +++++----
+ sos/collector/transports/oc.py |  2 ++
+ sos/options.py                 | 20 ++++++++++++++------
+ 4 files changed, 32 insertions(+), 15 deletions(-)
+
+diff --git a/man/en/sos-collect.1 b/man/en/sos-collect.1
+index 9b0a5d7b..2f60332b 100644
+--- a/man/en/sos-collect.1
++++ b/man/en/sos-collect.1
+@@ -28,7 +28,7 @@ sos collect \- Collect sosreports from multiple (cluster) nodes
+     [\-\-no\-local]
+     [\-\-primary PRIMARY]
+     [\-\-image IMAGE]
+-    [\-\-force-pull-image]
++    [\-\-force-pull-image TOGGLE, --pull TOGGLE]
+     [\-\-registry-user USER]
+     [\-\-registry-password PASSWORD]
+     [\-\-registry-authfile FILE]
+@@ -262,10 +262,16 @@ Specify an image to use for the temporary container created for collections on
+ containerized host, if you do not want to use the default image specifed by the
+ host's policy. Note that this should include the registry.
+ .TP
+-\fB\-\-force-pull-image\fR
+-Use this option to force the container runtime to pull the specified image (even
+-if it is the policy default image) even if the image already exists on the host.
+-This may be useful to update an older container image on containerized hosts.
++\fB\-\-force-pull-image TOGGLE, \-\-pull TOGGLE\fR
++When collecting an sos report from a containerized host, force the host to always
++pull the specified image, even if that image already exists on the host.
++This is useful to ensure that the latest version of that image is always in use.
++Disabling this option will use whatever version of the image is present on the node,
++and only attempt a pull if there is no copy of the image present at all.
++
++Enable with true/on/yes or disable with false/off/no
++
++Default: true
+ .TP
+ \fB\-\-registry-user USER\fR
+ Specify the username to authenticate to the registry with in order to pull the container
+diff --git a/sos/collector/__init__.py b/sos/collector/__init__.py
+index d898ca34..66c3d932 100644
+--- a/sos/collector/__init__.py
++++ b/sos/collector/__init__.py
+@@ -27,7 +27,7 @@ from pipes import quote
+ from textwrap import fill
+ from sos.cleaner import SoSCleaner
+ from sos.collector.sosnode import SosNode
+-from sos.options import ClusterOption
++from sos.options import ClusterOption, str_to_bool
+ from sos.component import SoSComponent
+ from sos.utilities import bold
+ from sos import __version__
+@@ -85,7 +85,7 @@ class SoSCollector(SoSComponent):
+         'encrypt_pass': '',
+         'group': None,
+         'image': '',
+-        'force_pull_image': False,
++        'force_pull_image': True,
+         'jobs': 4,
+         'keywords': [],
+         'keyword_file': None,
+@@ -357,8 +357,9 @@ class SoSCollector(SoSComponent):
+         collect_grp.add_argument('--image',
+                                  help=('Specify the container image to use for'
+                                        ' containerized hosts.'))
+-        collect_grp.add_argument('--force-pull-image', '--pull', default=False,
+-                                 action='store_true',
++        collect_grp.add_argument('--force-pull-image', '--pull',
++                                 default=True, choices=(True, False),
++                                 type=str_to_bool,
+                                  help='Force pull the container image even if '
+                                       'it already exists on the host')
+         collect_grp.add_argument('--registry-user', default=None,
+diff --git a/sos/collector/transports/oc.py b/sos/collector/transports/oc.py
+index 90a802b2..8f6aa9b4 100644
+--- a/sos/collector/transports/oc.py
++++ b/sos/collector/transports/oc.py
+@@ -147,6 +147,8 @@ class OCTransport(RemoteTransport):
+                         "tty": True
+                     }
+                 ],
++                "imagePullPolicy":
++                    "Always" if self.opts.force_pull_image else "IfNotPresent",
+                 "restartPolicy": "Never",
+                 "nodeName": self.address,
+                 "hostNetwork": True,
+diff --git a/sos/options.py b/sos/options.py
+index 4846a509..2d5a5135 100644
+--- a/sos/options.py
++++ b/sos/options.py
+@@ -18,6 +18,16 @@ def _is_seq(val):
+     return val_type is list or val_type is tuple
+ 
+ 
++def str_to_bool(val):
++    _val = val.lower()
++    if _val in ['true', 'on', 'yes']:
++        return True
++    elif _val in ['false', 'off', 'no']:
++        return False
++    else:
++        return None
++
++
+ class SoSOptions():
+ 
+     def _merge_opt(self, opt, src, is_default):
+@@ -153,15 +163,13 @@ class SoSOptions():
+         if isinstance(self.arg_defaults[key], list):
+             return [v for v in val.split(',')]
+         if isinstance(self.arg_defaults[key], bool):
+-            _val = val.lower()
+-            if _val in ['true', 'on', 'yes']:
+-                return True
+-            elif _val in ['false', 'off', 'no']:
+-                return False
+-            else:
++            val = str_to_bool(val)
++            if val is None:
+                 raise Exception(
+                     "Value of '%s' in %s must be True or False or analagous"
+                     % (key, conf))
++            else:
++                return val
+         if isinstance(self.arg_defaults[key], int):
+             try:
+                 return int(val)
+-- 
+2.34.3
+
+From ce289a3ae7101a898efdb84ddfd575576ba5819b Mon Sep 17 00:00:00 2001
+From: Jake Hunsaker <jhunsake@redhat.com>
+Date: Tue, 5 Apr 2022 11:32:11 -0400
+Subject: [PATCH] [ocp, openshift] Re-align API collection options and rename
+ option
+
+Previously, in #2888, the `openshift` plugin was extended to allow API
+collections by using a default-available kubeconfig file rather than
+relying on user-provided tokens. This also included flipping the default
+value of the `no-oc` plugin option to `True` (meaning do not collect API
+output by default).
+
+This worked for the plugin, but it introduced a gap in `sos collect`
+whereby the cluster profile could no longer reliably enable API
+collections when trying to leverage the new functionality of not
+requiring a user token.
+
+Fix this by updating the cluster profile to align with the new
+default-off approach of API collections.
+
+Along with this, add a toggle to the cluster profile directly to allow
+users to toggle API collections on or off (default off) directly. This
+is done via a new `with-api` cluster option (e.g. `-c ocp.with-api`).
+Further, rename the `openshift` plugin option from `no-oc` to
+`with-api`. This change not only makes the option use case far more
+obvious, it will also align the use of the option to both `collect` and
+`report` so that users need only be aware of a single option for either
+method.
+
+The cluster profile also has logic to detect which plugin option,
+`no-oc` or `with-api` to use based on the (RHEL) sos version installed
+on the nodes being inspected by the `ocp` cluster profile.
+
+Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
+---
+ sos/collector/clusters/ocp.py   | 72 +++++++++++++++++++++++++++------
+ sos/report/plugins/openshift.py | 26 +++++++-----
+ 2 files changed, 77 insertions(+), 21 deletions(-)
+
+diff --git a/sos/collector/clusters/ocp.py b/sos/collector/clusters/ocp.py
+index 9beb2f9b..e31d1903 100644
+--- a/sos/collector/clusters/ocp.py
++++ b/sos/collector/clusters/ocp.py
+@@ -30,7 +30,11 @@ class ocp(Cluster):
+     clusterAdmin privileges.
+ 
+     If this requires the use of a secondary configuration file, specify that
+-    path with the 'kubeconfig' cluster option.
++    path with the 'kubeconfig' cluster option. This config file will also be
++    used on a single master node to perform API collections if the `with-api`
++    option is enabled (default disabled). If no `kubeconfig` option is given,
++    but `with-api` is enabled, the cluster profile will attempt to use a
++    well-known default kubeconfig file if it is available on the host.
+ 
+     Alternatively, provide a clusterAdmin access token either via the 'token'
+     cluster option or, preferably, the SOSOCPTOKEN environment variable.
+@@ -45,7 +49,7 @@ class ocp(Cluster):
+     option mentioned above.
+ 
+     To avoid redundant collections of OCP API information (e.g. 'oc get'
+-    commands), this profile will attempt to enable the openshift plugin on only
++    commands), this profile will attempt to enable the API collections on only
+     a single master node. If the none of the master nodes have a functional
+     'oc' binary available, *and* the --no-local option is used, that means that
+     no API data will be collected.
+@@ -63,7 +67,8 @@ class ocp(Cluster):
+         ('label', '', 'Colon delimited list of labels to select nodes with'),
+         ('role', 'master', 'Colon delimited list of roles to filter on'),
+         ('kubeconfig', '', 'Path to the kubeconfig file'),
+-        ('token', '', 'Service account token to use for oc authorization')
++        ('token', '', 'Service account token to use for oc authorization'),
++        ('with-api', False, 'Collect OCP API data from a master node')
+     ]
+ 
+     def fmt_oc_cmd(self, cmd):
+@@ -219,13 +224,52 @@ class ocp(Cluster):
+             return False
+         return 'master' in self.node_dict[sosnode.address]['roles']
+ 
++    def _toggle_api_opt(self, node, use_api):
++        """In earlier versions of sos, the openshift plugin option that is
++        used to toggle the API collections was called `no-oc` rather than
++        `with-api`. This older plugin option had the inverse logic of the
++        current `with-api` option.
++
++        Use this to toggle the correct plugin option given the node's sos
++        version. Note that the use of version 4.2 here is tied to the RHEL
++        release (the only usecase for this cluster profile) rather than
++        the upstream version given the backports for that downstream.
++
++        :param node:    The node being inspected for API collections
++        :type node:     ``SoSNode``
++
++        :param use_api: Should this node enable API collections?
++        :type use_api:  ``bool``
++        """
++        if node.check_sos_version('4.2-16'):
++            _opt = 'with-api'
++            _val = 'on' if use_api else 'off'
++        else:
++            _opt = 'no-oc'
++            _val = 'off' if use_api else 'on'
++        node.plugopts.append("openshift.%s=%s" % (_opt, _val))
++
+     def set_primary_options(self, node):
++
+         node.enable_plugins.append('openshift')
++        if not self.get_option('with-api'):
++            self._toggle_api_opt(node, False)
++            return
+         if self.api_collect_enabled:
+             # a primary has already been enabled for API collection, disable
+             # it among others
+-            node.plugopts.append('openshift.no-oc=on')
++            self._toggle_api_opt(node, False)
+         else:
++            # running in a container, so reference the /host mount point
++            master_kube = (
++                '/host/etc/kubernetes/static-pod-resources/'
++                'kube-apiserver-certs/secrets/node-kubeconfigs/'
++                'localhost.kubeconfig'
++            )
++            _optconfig = self.get_option('kubeconfig')
++            if _optconfig and not _optconfig.startswith('/host'):
++                _optconfig = '/host/' + _optconfig
++            _kubeconfig = _optconfig or master_kube
+             _oc_cmd = 'oc'
+             if node.host.containerized:
+                 _oc_cmd = '/host/bin/oc'
+@@ -244,17 +288,21 @@ class ocp(Cluster):
+                                       need_root=True)
+             if can_oc['status'] == 0:
+                 # the primary node can already access the API
++                self._toggle_api_opt(node, True)
+                 self.api_collect_enabled = True
+             elif self.token:
+                 node.sos_env_vars['SOSOCPTOKEN'] = self.token
++                self._toggle_api_opt(node, True)
++                self.api_collect_enabled = True
++            elif node.file_exists(_kubeconfig):
++                # if the file exists, then the openshift sos plugin will use it
++                # if the with-api option is turned on
++                if not _kubeconfig == master_kube:
++                    node.plugopts.append(
++                        "openshift.kubeconfig=%s" % _kubeconfig
++                    )
++                self._toggle_api_opt(node, True)
+                 self.api_collect_enabled = True
+-            elif self.get_option('kubeconfig'):
+-                kc = self.get_option('kubeconfig')
+-                if node.file_exists(kc):
+-                    if node.host.containerized:
+-                        kc = "/host/%s" % kc
+-                    node.sos_env_vars['KUBECONFIG'] = kc
+-                    self.api_collect_enabled = True
+             if self.api_collect_enabled:
+                 msg = ("API collections will be performed on %s\nNote: API "
+                        "collections may extend runtime by 10s of minutes\n"
+@@ -264,6 +312,6 @@ class ocp(Cluster):
+ 
+     def set_node_options(self, node):
+         # don't attempt OC API collections on non-primary nodes
+-        node.plugopts.append('openshift.no-oc=on')
++        self._toggle_api_opt(node, False)
+ 
+ # vim: set et ts=4 sw=4 :
+diff --git a/sos/report/plugins/openshift.py b/sos/report/plugins/openshift.py
+index d643f04c..a41ab62b 100644
+--- a/sos/report/plugins/openshift.py
++++ b/sos/report/plugins/openshift.py
+@@ -19,7 +19,10 @@ class Openshift(Plugin, RedHatPlugin):
+     further extending the kubernetes plugin (or the OCP 3.x extensions included
+     in the Red Hat version of the kube plugin).
+ 
+-    By default, this plugin will collect cluster information and inspect the
++    This plugin may collect OCP API information when the `with-api` option is
++    enabled. This option is disabled by default.
++
++    When enabled, this plugin will collect cluster information and inspect the
+     default namespaces/projects that are created during deployment - i.e. the
+     namespaces of the cluster projects matching openshift.* and kube.*. At the
+     time of this plugin's creation that number of default projects is already
+@@ -34,16 +37,20 @@ class Openshift(Plugin, RedHatPlugin):
+ 
+     Users will need to either:
+ 
+-        1) Provide the bearer token via the `-k openshift.token` option
+-        2) Provide the bearer token via the `SOSOCPTOKEN` environment variable
+-        3) Otherwise ensure that the root user can successfully run `oc` and
++        1) Accept the use of a well-known stock kubeconfig file provided via a
++           static pod resource for the kube-apiserver
++        2) Provide the bearer token via the `-k openshift.token` option
++        3) Provide the bearer token via the `SOSOCPTOKEN` environment variable
++        4) Otherwise ensure that the root user can successfully run `oc` and
+            get proper output prior to running this plugin
+ 
+ 
+-    It is highly suggested that option #2 be used first, as this will prevent
+-    the token from being recorded in output saved to the archive. Option #1 may
++    It is highly suggested that option #1 be used first, as this uses well
++    known configurations and requires the least information from the user. If
++    using a token, it is recommended to use option #3 as this will prevent
++    the token from being recorded in output saved to the archive. Option #2 may
+     be used if this is considered an acceptable risk. It is not recommended to
+-    rely on option #3, though it will provide the functionality needed.
++    rely on option #4, though it will provide the functionality needed.
+     """
+ 
+     short_desc = 'Openshift Container Platform 4.x'
+@@ -65,7 +72,8 @@ class Openshift(Plugin, RedHatPlugin):
+                   desc='Path to a locally available kubeconfig file'),
+         PluginOpt('host', default='https://localhost:6443',
+                   desc='host address to use for oc login, including port'),
+-        PluginOpt('no-oc', default=True, desc='do not collect `oc` output'),
++        PluginOpt('with-api', default=False,
++                  desc='collect output from the OCP API'),
+         PluginOpt('podlogs', default=True, desc='collect logs from each pod'),
+         PluginOpt('podlogs-filter', default='', val_type=str,
+                   desc='only collect logs from pods matching this pattern'),
+@@ -212,7 +220,7 @@ class Openshift(Plugin, RedHatPlugin):
+         self.add_copy_spec('/etc/kubernetes/*')
+ 
+         # see if we run `oc` commands
+-        if not self.get_option('no-oc'):
++        if self.get_option('with-api'):
+             can_run_oc = self._check_oc_logged_in()
+         else:
+             can_run_oc = False
+-- 
+2.34.3
+
diff --git a/SOURCES/sos-bz2066181-tigervnc-update-collections.patch b/SOURCES/sos-bz2066181-tigervnc-update-collections.patch
new file mode 100644
index 0000000..f2767c9
--- /dev/null
+++ b/SOURCES/sos-bz2066181-tigervnc-update-collections.patch
@@ -0,0 +1,67 @@
+From 4c92968ce461cdfc6a5d913748b2ce4f148ff4a9 Mon Sep 17 00:00:00 2001
+From: Jake Hunsaker <jhunsake@redhat.com>
+Date: Thu, 10 Mar 2022 12:31:49 -0500
+Subject: [PATCH] [tigervnc] Update collections for newer versions of TigerVNC
+
+First, relaxes the file specifications for collection by capturing the
+entire `/etc/tigervnc/` directory.
+
+Second, adds collection of service status and journal output for each
+configured vnc server. Collection of `vncserver -list` is kept for
+backwards compatibility.
+
+Finally, add a short docstring for the plugin for --help output.
+
+Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
+---
+ sos/report/plugins/tigervnc.py | 28 +++++++++++++++++++++++-----
+ 1 file changed, 23 insertions(+), 5 deletions(-)
+
+diff --git a/sos/report/plugins/tigervnc.py b/sos/report/plugins/tigervnc.py
+index 1302f6d4..e31aee25 100644
+--- a/sos/report/plugins/tigervnc.py
++++ b/sos/report/plugins/tigervnc.py
+@@ -12,17 +12,35 @@ from sos.report.plugins import Plugin, RedHatPlugin
+ 
+ 
+ class TigerVNC(Plugin, RedHatPlugin):
++    """
++    This plugin gathers information for VNC servers provided by the tigervnc
++    package. This is explicitly for server-side collections, not clients.
++
++    By default, this plugin will capture the contents of /etc/tigervnc, which
++    may include usernames. If usernames are sensitive information for end
++    users of sos, consider using the `--clean` option to obfuscate these
++    names.
++    """
+ 
+     short_desc = 'TigerVNC server configuration'
+     plugin_name = 'tigervnc'
+     packages = ('tigervnc-server',)
+ 
+     def setup(self):
+-        self.add_copy_spec([
+-            '/etc/tigervnc/vncserver-config-defaults',
+-            '/etc/tigervnc/vncserver-config-mandatory',
+-            '/etc/tigervnc/vncserver.users'
+-        ])
++        self.add_copy_spec('/etc/tigervnc/')
++
++        # service names are 'vncserver@$port' where $port is :1,, :2, etc...
++        # however they are not reported via list-unit-files, only list-units
++        vncs = self.exec_cmd(
++            'systemctl list-units --type=service --no-legend vncserver*'
++        )
++        if vncs['status'] == 0:
++            for serv in vncs['output'].splitlines():
++                vnc = serv.split()
++                if not vnc:
++                    continue
++                self.add_service_status(vnc[0])
++                self.add_journal(vnc[0])
+ 
+         self.add_cmd_output('vncserver -list')
+ 
+-- 
+2.34.3
+
diff --git a/SOURCES/sos-bz2079188-honor-default-plugin-timeout.patch b/SOURCES/sos-bz2079188-honor-default-plugin-timeout.patch
new file mode 100644
index 0000000..822565d
--- /dev/null
+++ b/SOURCES/sos-bz2079188-honor-default-plugin-timeout.patch
@@ -0,0 +1,39 @@
+From 7069e99d1c5c443f96a98a7ed6db67fa14683e67 Mon Sep 17 00:00:00 2001
+From: Pavel Moravec <pmoravec@redhat.com>
+Date: Thu, 17 Feb 2022 09:14:15 +0100
+Subject: [PATCH] [report] Honor plugins' hardcoded plugin_timeout
+
+Currently, plugin's plugin_timeout hardcoded default is superseded by
+whatever --plugin-timeout value, even when this option is not used and
+we eval it to TIMEOUT_DEFAULT.
+
+In this case of not setting --plugin-timeout either -k plugin.timeout,
+honour plugin's plugin_timeout instead.
+
+Resolves: #2863
+Closes: #2864
+
+Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
+---
+ sos/report/plugins/__init__.py | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py
+index cc5cb65b..336b4d22 100644
+--- a/sos/report/plugins/__init__.py
++++ b/sos/report/plugins/__init__.py
+@@ -636,7 +636,10 @@ class Plugin():
+             if opt_timeout is None:
+                 _timeout = own_timeout
+             elif opt_timeout is not None and own_timeout == -1:
+-                _timeout = int(opt_timeout)
++                if opt_timeout == TIMEOUT_DEFAULT:
++                    _timeout = default_timeout
++                else:
++                    _timeout = int(opt_timeout)
+             elif opt_timeout is not None and own_timeout > -1:
+                 _timeout = own_timeout
+             else:
+-- 
+2.34.3
+
diff --git a/SOURCES/sos-bz2079490-list-plugins-ignore-options.patch b/SOURCES/sos-bz2079490-list-plugins-ignore-options.patch
new file mode 100644
index 0000000..f0bda41
--- /dev/null
+++ b/SOURCES/sos-bz2079490-list-plugins-ignore-options.patch
@@ -0,0 +1,68 @@
+From f3dc8cd574614572d441f76c02453fd85d0c57e2 Mon Sep 17 00:00:00 2001
+From: Jake Hunsaker <jhunsake@redhat.com>
+Date: Wed, 27 Apr 2022 10:40:55 -0400
+Subject: [PATCH] [report] --list-plugins should report used, not default,
+ option values
+
+When using `--list-plugins`, sos should report the values that will be
+used in a given command, or with a given config file, not what the
+default values are.
+
+By reporting the set value, users can be sure their configuration or
+commandline settings are being honored correctly before executing a
+report collection.
+
+Closes: #2921
+
+Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
+---
+ sos/report/__init__.py | 22 +++++++++++++++-------
+ 1 file changed, 15 insertions(+), 7 deletions(-)
+
+diff --git a/sos/report/__init__.py b/sos/report/__init__.py
+index 74c7973a..8735c903 100644
+--- a/sos/report/__init__.py
++++ b/sos/report/__init__.py
+@@ -868,24 +868,32 @@ class SoSReport(SoSComponent):
+             _defaults = self.loaded_plugins[0][1].get_default_plugin_opts()
+             for _opt in _defaults:
+                 opt = _defaults[_opt]
+-                val = opt.default
+-                if opt.default == -1:
+-                    val = TIMEOUT_DEFAULT
++                val = opt.value
++                if opt.value == -1:
++                    if _opt == 'timeout':
++                        val = self.opts.plugin_timeout or TIMEOUT_DEFAULT
++                    elif _opt == 'cmd-timeout':
++                        val = self.opts.cmd_timeout or TIMEOUT_DEFAULT
++                    else:
++                        val = TIMEOUT_DEFAULT
++                if opt.name == 'postproc':
++                    val = not self.opts.no_postproc
+                 self.ui_log.info(" %-25s %-15s %s" % (opt.name, val, opt.desc))
+             self.ui_log.info("")
+ 
+             self.ui_log.info(_("The following plugin options are available:"))
+             for opt in self.all_options:
+                 if opt.name in ('timeout', 'postproc', 'cmd-timeout'):
+-                    continue
++                    if opt.value == opt.default:
++                        continue
+                 # format option value based on its type (int or bool)
+-                if isinstance(opt.default, bool):
+-                    if opt.default is True:
++                if isinstance(opt.value, bool):
++                    if opt.value is True:
+                         tmpopt = "on"
+                     else:
+                         tmpopt = "off"
+                 else:
+-                    tmpopt = opt.default
++                    tmpopt = opt.value
+ 
+                 if tmpopt is None:
+                     tmpopt = 0
+-- 
+2.34.3
+
diff --git a/SOURCES/sos-bz2079491-plugopts-valtype-str.patch b/SOURCES/sos-bz2079491-plugopts-valtype-str.patch
new file mode 100644
index 0000000..eaf42ab
--- /dev/null
+++ b/SOURCES/sos-bz2079491-plugopts-valtype-str.patch
@@ -0,0 +1,34 @@
+From 9b10abcdd4aaa41e2549438d5bc52ece86dcb21f Mon Sep 17 00:00:00 2001
+From: Pavel Moravec <pmoravec@redhat.com>
+Date: Sat, 7 May 2022 14:23:04 +0200
+Subject: [PATCH] [plugins] Allow 'str' PlugOpt type to accept any value
+
+For PlugOpt type 'str', we should allow any content including e.g.
+numbers, and interpret it as a string.
+
+Resolves: #2922
+Closes: #2935
+
+Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
+---
+ sos/report/plugins/__init__.py | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py
+index d6be42b9..2a42e6b0 100644
+--- a/sos/report/plugins/__init__.py
++++ b/sos/report/plugins/__init__.py
+@@ -452,6 +452,10 @@ class PluginOpt():
+         return self.__str__()
+ 
+     def set_value(self, val):
++        # 'str' type accepts any value, incl. numbers
++        if type('') in self.val_type:
++            self.value = str(val)
++            return
+         if not any([type(val) == _t for _t in self.val_type]):
+             valid = []
+             for t in self.val_type:
+-- 
+2.34.3
+
diff --git a/SOURCES/sos-bz2079492-timeouted-exec-cmd-exception.patch b/SOURCES/sos-bz2079492-timeouted-exec-cmd-exception.patch
new file mode 100644
index 0000000..dc58a67
--- /dev/null
+++ b/SOURCES/sos-bz2079492-timeouted-exec-cmd-exception.patch
@@ -0,0 +1,31 @@
+From 5e27b92a8a9f066af4c41ddd0bedc7c69187ff52 Mon Sep 17 00:00:00 2001
+From: Pavel Moravec <pmoravec@redhat.com>
+Date: Mon, 2 May 2022 22:13:34 +0200
+Subject: [PATCH] [utilities] Close file only when storing to file
+
+Call _output.close() only when to_file=true.
+
+Closes: #2925
+
+Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
+---
+ sos/utilities.py | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/sos/utilities.py b/sos/utilities.py
+index d2f73d86..1075d1d4 100644
+--- a/sos/utilities.py
++++ b/sos/utilities.py
+@@ -212,7 +212,8 @@ def sos_get_command_output(command, timeout=TIMEOUT_DEFAULT, stderr=False,
+                 p.wait(timeout if timeout else None)
+             except Exception:
+                 p.terminate()
+-                _output.close()
++                if to_file:
++                    _output.close()
+                 # until we separate timeouts from the `timeout` command
+                 # handle per-cmd timeouts via Plugin status checks
+                 return {'status': 124, 'output': reader.get_contents(),
+-- 
+2.34.3
+
diff --git a/SOURCES/sos-bz2082914-collect-pacemaker-cluster.patch b/SOURCES/sos-bz2082914-collect-pacemaker-cluster.patch
new file mode 100644
index 0000000..d573ea2
--- /dev/null
+++ b/SOURCES/sos-bz2082914-collect-pacemaker-cluster.patch
@@ -0,0 +1,230 @@
+From 3b84b4ccfa9e4924a5a3829d3810568dfb69bf63 Mon Sep 17 00:00:00 2001
+From: Jake Hunsaker <jhunsake@redhat.com>
+Date: Fri, 18 Mar 2022 16:25:35 -0400
+Subject: [PATCH 1/2] [pacemaker] Redesign node enumeration logic
+
+It has been found that `pcs status` output is liable to change, which
+ends up breaking our parsing of node lists when using it on newer
+versions.
+
+Instead, first try to parse through `crm_mon` output, which is what `pcs
+status` uses under the hood, but as a stable and reliable xml format.
+
+Failing that, for example if the `--primary` node is not functioning as
+part of the cluster, source `/etc/corosync/corosync.conf` instead.
+
+Related: RHBZ2065805
+Related: RHBZ2065811
+
+Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
+---
+ sos/collector/clusters/pacemaker.py | 110 +++++++++++++++++++---------
+ 1 file changed, 76 insertions(+), 34 deletions(-)
+
+diff --git a/sos/collector/clusters/pacemaker.py b/sos/collector/clusters/pacemaker.py
+index 55024314..49d0ce51 100644
+--- a/sos/collector/clusters/pacemaker.py
++++ b/sos/collector/clusters/pacemaker.py
+@@ -8,7 +8,11 @@
+ #
+ # See the LICENSE file in the source distribution for further information.
+ 
++import re
++
+ from sos.collector.clusters import Cluster
++from setuptools._vendor.packaging import version
++from xml.etree import ElementTree
+ 
+ 
+ class pacemaker(Cluster):
+@@ -18,42 +22,80 @@ class pacemaker(Cluster):
+     packages = ('pacemaker',)
+     option_list = [
+         ('online', True, 'Collect nodes listed as online'),
+-        ('offline', True, 'Collect nodes listed as offline')
++        ('offline', True, 'Collect nodes listed as offline'),
++        ('only-corosync', False, 'Only use corosync.conf to enumerate nodes')
+     ]
+ 
+     def get_nodes(self):
+-        self.res = self.exec_primary_cmd('pcs status')
+-        if self.res['status'] != 0:
+-            self.log_error('Cluster status could not be determined. Is the '
+-                           'cluster running on this node?')
+-            return []
+-        if 'node names do not match' in self.res['output']:
+-            self.log_warn('Warning: node name mismatch reported. Attempts to '
+-                          'connect to some nodes may fail.\n')
+-        return self.parse_pcs_output()
+-
+-    def parse_pcs_output(self):
+-        nodes = []
+-        if self.get_option('online'):
+-            nodes += self.get_online_nodes()
+-        if self.get_option('offline'):
+-            nodes += self.get_offline_nodes()
+-        return nodes
+-
+-    def get_online_nodes(self):
+-        for line in self.res['output'].splitlines():
+-            if line.startswith('Online:'):
+-                nodes = line.split('[')[1].split(']')[0]
+-                return [n for n in nodes.split(' ') if n]
+-
+-    def get_offline_nodes(self):
+-        offline = []
+-        for line in self.res['output'].splitlines():
+-            if line.startswith('Node') and line.endswith('(offline)'):
+-                offline.append(line.split()[1].replace(':', ''))
+-            if line.startswith('OFFLINE:'):
+-                nodes = line.split('[')[1].split(']')[0]
+-                offline.extend([n for n in nodes.split(' ') if n])
+-        return offline
++        self.nodes = []
++        # try crm_mon first
++        try:
++            if not self.get_option('only-corosync'):
++                try:
++                    self.get_nodes_from_crm()
++                except Exception as err:
++                    self.log_warn("Falling back to sourcing corosync.conf. "
++                                  "Could not parse crm_mon output: %s" % err)
++            if not self.nodes:
++                # fallback to corosync.conf, in case the node we're inspecting
++                # is offline from the cluster
++                self.get_nodes_from_corosync()
++        except Exception as err:
++            self.log_error("Could not determine nodes from cluster: %s" % err)
++
++        _shorts = [n for n in self.nodes if '.' not in n]
++        if _shorts:
++            self.log_warn(
++                "WARNING: Node addresses '%s' may not resolve locally if you "
++                "are not running on a node in the cluster. Try using option "
++                "'-c pacemaker.only-corosync' if these connections fail."
++                % ','.join(_shorts)
++            )
++        return self.nodes
++
++    def get_nodes_from_crm(self):
++        """
++        Try to parse crm_mon output for node list and status.
++        """
++        xmlopt = '--output-as=xml'
++        # older pacemaker had a different option for xml output
++        _ver = self.exec_primary_cmd('crm_mon --version')
++        if _ver['status'] == 0:
++            cver = _ver['output'].split()[1].split('-')[0]
++            if not version.parse(cver) > version.parse('2.0.3'):
++                xmlopt = '--as-xml'
++        else:
++            return
++        _out = self.exec_primary_cmd(
++            "crm_mon --one-shot --inactive %s" % xmlopt,
++            need_root=True
++        )
++        if _out['status'] == 0:
++            self.parse_crm_xml(_out['output'])
++
++    def parse_crm_xml(self, xmlstring):
++        """
++        Parse the xml output string provided by crm_mon
++        """
++        _xml = ElementTree.fromstring(xmlstring)
++        nodes = _xml.find('nodes')
++        for node in nodes:
++            _node = node.attrib
++            if self.get_option('online') and _node['online'] == 'true':
++                self.nodes.append(_node['name'])
++            elif self.get_option('offline') and _node['online'] == 'false':
++                self.nodes.append(_node['name'])
++
++    def get_nodes_from_corosync(self):
++        """
++        As a fallback measure, read corosync.conf to get the node list. Note
++        that this prevents us from separating online nodes from offline nodes.
++        """
++        self.log_warn("WARNING: unable to distinguish online nodes from "
++                      "offline nodes when sourcing from corosync.conf")
++        cc = self.primary.read_file('/etc/corosync/corosync.conf')
++        nodes = re.findall(r'((\sring0_addr:)(.*))', cc)
++        for node in nodes:
++            self.nodes.append(node[-1].strip())
+ 
+ # vim: set et ts=4 sw=4 :
+-- 
+2.34.3
+
+
+From 6701a7d77ecc998b018b54ecc00f9fd102ae9518 Mon Sep 17 00:00:00 2001
+From: Jake Hunsaker <jhunsake@redhat.com>
+Date: Mon, 21 Mar 2022 12:05:59 -0400
+Subject: [PATCH 2/2] [clusters] Allow clusters to not add localhost to node
+ list
+
+For most of our supported clusters, we end up needing to add the
+local host executing `sos collect` to the node list (unless `--no-local`
+is used) as that accounts for the primary node that may otherwise be
+left off. However, this is not helpful for clusters that may reports
+node names as something other than resolveable names. In those cases,
+such as with pacemaker, adding the local hostname may result in
+duplicate collections.
+
+Add a toggle to cluster profiles via a new `strict_node_list` class attr
+that, if True, will skip this addition. This toggle is default `False`
+to preserve existing behavior, and is now enabled for `pacemaker`
+specifically.
+
+Related: RHBZ#2065821
+
+Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
+---
+ sos/collector/__init__.py           | 3 ++-
+ sos/collector/clusters/__init__.py  | 4 ++++
+ sos/collector/clusters/pacemaker.py | 1 +
+ 3 files changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/sos/collector/__init__.py b/sos/collector/__init__.py
+index a8bb0064..d898ca34 100644
+--- a/sos/collector/__init__.py
++++ b/sos/collector/__init__.py
+@@ -1073,7 +1073,8 @@ class SoSCollector(SoSComponent):
+             for node in self.node_list:
+                 if host == node.split('.')[0]:
+                     self.node_list.remove(node)
+-            self.node_list.append(self.hostname)
++            if not self.cluster.strict_node_list:
++                self.node_list.append(self.hostname)
+         self.reduce_node_list()
+         try:
+             _node_max = len(max(self.node_list, key=len))
+diff --git a/sos/collector/clusters/__init__.py b/sos/collector/clusters/__init__.py
+index f3f550ad..f00677b8 100644
+--- a/sos/collector/clusters/__init__.py
++++ b/sos/collector/clusters/__init__.py
+@@ -57,6 +57,10 @@ class Cluster():
+     sos_plugin_options = {}
+     sos_preset = ''
+     cluster_name = None
++    # set this to True if the local host running collect should *not* be
++    # forcibly added to the node list. This can be helpful in situations where
++    # the host's fqdn and the name the cluster uses are different
++    strict_node_list = False
+ 
+     def __init__(self, commons):
+         self.primary = None
+diff --git a/sos/collector/clusters/pacemaker.py b/sos/collector/clusters/pacemaker.py
+index 49d0ce51..bebcb265 100644
+--- a/sos/collector/clusters/pacemaker.py
++++ b/sos/collector/clusters/pacemaker.py
+@@ -20,6 +20,7 @@ class pacemaker(Cluster):
+     cluster_name = 'Pacemaker High Availability Cluster Manager'
+     sos_plugins = ['pacemaker']
+     packages = ('pacemaker',)
++    strict_node_list = True
+     option_list = [
+         ('online', True, 'Collect nodes listed as online'),
+         ('offline', True, 'Collect nodes listed as offline'),
+-- 
+2.34.3
+
diff --git a/SOURCES/sos-bz2095267-ovirt-answer-files-passwords.patch b/SOURCES/sos-bz2095267-ovirt-answer-files-passwords.patch
new file mode 100644
index 0000000..67eb6a0
--- /dev/null
+++ b/SOURCES/sos-bz2095267-ovirt-answer-files-passwords.patch
@@ -0,0 +1,66 @@
+From 5fd872c64c53af37015f366295e0c2418c969757 Mon Sep 17 00:00:00 2001
+From: Yedidyah Bar David <didi@redhat.com>
+Date: Thu, 26 May 2022 16:43:21 +0300
+Subject: [PATCH] [ovirt] answer files: Filter out all password keys
+
+Instead of hard-coding specific keys and having to maintain them over
+time, replace the values of all keys that have 'password' in their name.
+I think this covers all our current and hopefully future keys. It might
+add "false positives" - keys that are not passwords but have 'password'
+in their name - and I think that's a risk worth taking.
+
+Sadly, the engine admin password prompt's name is
+'OVESETUP_CONFIG_ADMIN_SETUP', which does not include 'password', so has
+to be listed specifically.
+
+A partial list of keys added since the replaced code was written:
+- grafana-related stuff
+- keycloak-related stuff
+- otopi-style answer files
+
+Signed-off-by: Yedidyah Bar David <didi@redhat.com>
+Change-Id: I416c6e4078e7c3638493eb271d08d73a0c22b5ba
+---
+ sos/report/plugins/ovirt.py | 23 +++++++++++++----------
+ 1 file changed, 13 insertions(+), 10 deletions(-)
+
+diff --git a/sos/report/plugins/ovirt.py b/sos/report/plugins/ovirt.py
+index 09647bf1..3b1bb29b 100644
+--- a/sos/report/plugins/ovirt.py
++++ b/sos/report/plugins/ovirt.py
+@@ -241,19 +241,22 @@ class Ovirt(Plugin, RedHatPlugin):
+                 r'{key}=********'.format(key=key)
+             )
+ 
+-        # Answer files contain passwords
+-        for key in (
+-            'OVESETUP_CONFIG/adminPassword',
+-            'OVESETUP_CONFIG/remoteEngineHostRootPassword',
+-            'OVESETUP_DWH_DB/password',
+-            'OVESETUP_DB/password',
+-            'OVESETUP_REPORTS_CONFIG/adminPassword',
+-            'OVESETUP_REPORTS_DB/password',
++        # Answer files contain passwords.
++        # Replace all keys that have 'password' in them, instead of hard-coding
++        # here the list of keys, which changes between versions.
++        # Sadly, the engine admin password prompt name does not contain
++        # 'password'... so neither does the env key.
++        for item in (
++            'password',
++            'OVESETUP_CONFIG_ADMIN_SETUP',
+         ):
+             self.do_path_regex_sub(
+                 r'/var/lib/ovirt-engine/setup/answers/.*',
+-                r'{key}=(.*)'.format(key=key),
+-                r'{key}=********'.format(key=key)
++                re.compile(
++                    r'(?P<key>[^=]*{item}[^=]*)=.*'.format(item=item),
++                    flags=re.IGNORECASE
++                ),
++                r'\g<key>=********'
+             )
+ 
+         # aaa profiles contain passwords
+-- 
+2.34.3
+
diff --git a/SOURCES/sos-bz2097674-openshift-ovn-disabled.patch b/SOURCES/sos-bz2097674-openshift-ovn-disabled.patch
new file mode 100644
index 0000000..29241ba
--- /dev/null
+++ b/SOURCES/sos-bz2097674-openshift-ovn-disabled.patch
@@ -0,0 +1,73 @@
+From c2e66fa4dae51f03c7310ba5278897ddecac1aad Mon Sep 17 00:00:00 2001
+From: Nadia Pinaeva <npinaeva@redhat.com>
+Date: Thu, 2 Jun 2022 15:43:09 +0200
+Subject: [PATCH] crio: switch from parsing output in table format to json
+
+Signed-off-by: Nadia Pinaeva <npinaeva@redhat.com>
+---
+ sos/policies/runtimes/crio.py | 30 ++++++++++++++++++++----------
+ 1 file changed, 20 insertions(+), 10 deletions(-)
+
+diff --git a/sos/policies/runtimes/crio.py b/sos/policies/runtimes/crio.py
+index 55082d07..4cae1ecc 100644
+--- a/sos/policies/runtimes/crio.py
++++ b/sos/policies/runtimes/crio.py
+@@ -7,6 +7,7 @@
+ # version 2 of the GNU General Public License.
+ #
+ # See the LICENSE file in the source distribution for further information.
++import json
+ 
+ from sos.policies.runtimes import ContainerRuntime
+ from sos.utilities import sos_get_command_output
+@@ -29,14 +30,15 @@ class CrioContainerRuntime(ContainerRuntime):
+         :type get_all: ``bool``
+         """
+         containers = []
+-        _cmd = "%s ps %s" % (self.binary, '-a' if get_all else '')
++        _cmd = "%s ps %s -o json" % (self.binary, '-a' if get_all else '')
+         if self.active:
+             out = sos_get_command_output(_cmd, chroot=self.policy.sysroot)
+-            if out['status'] == 0:
+-                for ent in out['output'].splitlines()[1:]:
+-                    ent = ent.split()
++            if out["status"] == 0:
++                out_json = json.loads(out["output"])
++                for container in out_json["containers"]:
+                     # takes the form (container_id, container_name)
+-                    containers.append((ent[0], ent[-3]))
++                    containers.append(
++                        (container["id"], container["metadata"]["name"]))
+         return containers
+ 
+     def get_images(self):
+@@ -47,13 +49,21 @@ class CrioContainerRuntime(ContainerRuntime):
+         """
+         images = []
+         if self.active:
+-            out = sos_get_command_output("%s images" % self.binary,
++            out = sos_get_command_output("%s images -o json" % self.binary,
+                                          chroot=self.policy.sysroot)
+             if out['status'] == 0:
+-                for ent in out['output'].splitlines():
+-                    ent = ent.split()
+-                    # takes the form (image_name, image_id)
+-                    images.append((ent[0] + ':' + ent[1], ent[2]))
++                out_json = json.loads(out["output"])
++                for image in out_json["images"]:
++                    # takes the form (repository:tag, image_id)
++                    if len(image["repoTags"]) > 0:
++                        for repo_tag in image["repoTags"]:
++                            images.append((repo_tag, image["id"]))
++                    else:
++                        if len(image["repoDigests"]) == 0:
++                            image_name = "<none>"
++                        else:
++                            image_name = image["repoDigests"][0].split("@")[0]
++                        images.append((image_name + ":<none>", image["id"]))
+         return images
+ 
+     def fmt_container_cmd(self, container, cmd, quotecmd):
+-- 
+2.34.3
+
diff --git a/SPECS/sos.spec b/SPECS/sos.spec
index e468c18..43d79ba 100644
--- a/SPECS/sos.spec
+++ b/SPECS/sos.spec
@@ -4,8 +4,8 @@
 
 Summary: A set of tools to gather troubleshooting information from a system
 Name: sos
-Version: 4.2
-Release: 15%{?dist}
+Version: 4.3
+Release: 2%{?dist}
 Group: Applications/System
 Source0: https://github.com/sosreport/sos/archive/%{version}/sos-%{version}.tar.gz
 Source1: sos-audit-%{auditversion}.tgz
@@ -24,29 +24,17 @@ Conflicts: vdsm < 4.40
 Obsoletes: sos-collector <= 1.9
 Recommends: python3-pexpect
 Recommends: python3-requests
-Patch1: sos-bz1869561-cpuX-individual-sizelimits.patch
-Patch2: sos-bz2011533-unpackaged-recursive-symlink.patch
-Patch3: sos-bz2011534-opacapture-under-allow-system-changes.patch
-Patch4: sos-bz2011535-kernel-psi.patch
-Patch5: sos-bz2011538-iptables-save-under-nf_tables-kmod.patch
-Patch6: sos-bz2011537-estimate-only-option.patch
-Patch7: sos-bz2011536-iptables-based-on-ntf.patch
-Patch8: sos-bz2011507-foreman-puma-status.patch
-Patch9: sos-bz2012858-dryrun-uncaught-exception.patch
-Patch10: sos-bz2019697-openvswitch-offline-analysis.patch
-Patch11: sos-bz2012859-plugin-timeout-unhandled-exception.patch
-Patch12: sos-bz2023481-plugin-timeouts-proper-handling.patch
-Patch13: sos-bz2020778-filter-namespace-per-pattern.patch
-Patch14: sos-bz2024893-cleaner-hostnames-improvements.patch
-Patch15: sos-bz2025611-RHTS-api-change.patch
-Patch16: sos-bz2034001-nvidia-GPU-info.patch
-Patch17: sos-bz2031777-rhui-logs.patch
-Patch18: sos-bz2037350-ocp-backports.patch
-Patch19: sos-bz2043104-foreman-tasks-msgpack.patch
-Patch20: sos-bz2041855-virsh-in-foreground.patch
-Patch21: sos-bz2043488-ovn-proper-package-enablement.patch
-Patch22: sos-bz2054883-plugopt-logging-effective-opts.patch
-Patch23: sos-bz2055548-honour-plugins-timeout-hardcoded.patch
+Patch1: sos-bz2055003-rebase-sos-add-sos-help.patch
+Patch2: sos-bz2095267-ovirt-answer-files-passwords.patch
+Patch3: sos-bz2079491-plugopts-valtype-str.patch
+Patch4: sos-bz2066181-tigervnc-update-collections.patch
+Patch5: sos-bz2082914-collect-pacemaker-cluster.patch
+Patch6: sos-bz2079188-honor-default-plugin-timeout.patch
+Patch7: sos-bz2079490-list-plugins-ignore-options.patch
+Patch8: sos-bz2079492-timeouted-exec-cmd-exception.patch
+Patch9: sos-bz2065563-ocp-backports.patch
+Patch10: sos-bz2097674-openshift-ovn-disabled.patch
+
 
 %description
 Sos is a set of tools that gathers information about system
@@ -67,19 +55,6 @@ support technicians and developers.
 %patch8 -p1
 %patch9 -p1
 %patch10 -p1
-%patch11 -p1
-%patch12 -p1
-%patch13 -p1
-%patch14 -p1
-%patch15 -p1
-%patch16 -p1
-%patch17 -p1
-%patch18 -p1
-%patch19 -p1
-%patch20 -p1
-%patch21 -p1
-%patch22 -p1
-%patch23 -p1
 
 %build
 %py3_build
@@ -147,6 +122,44 @@ of the system.  Currently storage and filesystem commands are audited.
 
 
 %changelog
+* Thu Jun 16 2022 Pavel Moravec <pmoravec@redhat.com> = 4.3-2
+- [ocp, openshift] Re-align API collection options and rename
+  Resolves: bz2065563
+- [utilities] Close file only when storing to file
+  Resolves: bz2079492
+- [report] --list-plugins should report used, not default,
+  Resolves: bz2079490
+- [report] Honor plugins' hardcoded plugin_timeout
+  Resolves: bz2079188
+- crio: switch from parsing output in table format to json
+  Resolves: bz2097674
+- [pacemaker] Redesign node enumeration logic
+  Resolves: bz2082914
+- [tigervnc] Update collections for newer versions of TigerVNC
+  Resolves: bz2066181
+- [plugins] Allow 'str' PlugOpt type to accept any value
+  Resolves: bz2079491
+- [ovirt] answer files: Filter out all password keys
+  Resolves: bz2095267
+
+* Thu Mar 24 2022 Pavel Moravec <pmoravec@redhat.com> = 4.3-1
+- Rebase on upstream 4.3
+  Resolves: 2055003
+- [sapnw] Fix IndexError exception
+  Resolves: 2065551
+- [subscription_manager] collect syspurpose data via sub-man
+  Resolves: 2002333
+- [Plugin, utilities] Allow writing command output directly to disk
+  Resolves: 2065564
+- [Ceph] Add support for containerized Ceph setup
+  Resolves: 2065562
+- [unbound] Add new plugin for Unbound DNS resolver
+  Resolves: 2065560
+- [discovery] Add new discovery plugin
+  Resolves: 2065558
+- [system] Collect glibc tuning decisions
+  Resolves: 2032913
+
 * Wed Feb 23 2022 Pavel Moravec <pmoravec@redhat.com> = 4.2-15
 - [sosnode] Handle downstream versioning for runtime option
   Resolves: bz2037350