Blob Blame History Raw
From d0f9d507b0ec63c9e8f3e5d7b6507d9d0f97c038 Mon Sep 17 00:00:00 2001
From: Jake Hunsaker <jhunsake@redhat.com>
Date: Tue, 15 Feb 2022 16:24:47 -0500
Subject: [PATCH] [runtimes] Allow container IDs to be used with
 `container_exists()`

As container runtimes can interchange container names and container IDs,
sos should also allow the use of container IDs when checking for the
presence of a given container.

In particular, this change unblocks the use of `Plugin.exec_cmd()` when
used in conjunction with `Plugin.get_container_by_name()` to pick a
container based on a provided regex that the container name may match.

Related: #2856

Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
---
 sos/policies/runtimes/__init__.py | 17 +++++++++++++++++
 sos/report/plugins/__init__.py    |  6 +++---
 2 files changed, 20 insertions(+), 3 deletions(-)

diff --git a/sos/policies/runtimes/__init__.py b/sos/policies/runtimes/__init__.py
index 5ac67354..d2837349 100644
--- a/sos/policies/runtimes/__init__.py
+++ b/sos/policies/runtimes/__init__.py
@@ -147,6 +147,23 @@ class ContainerRuntime():
                     vols.append(ent[-1])
         return vols
 
+    def container_exists(self, container):
+        """Check if a given container ID or name exists on the system from the
+        perspective of the container runtime.
+
+        Note that this will only check _running_ containers
+
+        :param container:       The name or ID of the container
+        :type container:        ``str``
+
+        :returns:               True if the container exists, else False
+        :rtype:                 ``bool``
+        """
+        for _contup in self.containers:
+            if container in _contup:
+                return True
+        return False
+
     def fmt_container_cmd(self, container, cmd, quotecmd):
         """Format a command to run inside a container using the runtime
 
diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py
index 2988be08..cc5cb65b 100644
--- a/sos/report/plugins/__init__.py
+++ b/sos/report/plugins/__init__.py
@@ -2593,7 +2593,7 @@ class Plugin():
         """If a container runtime is present, check to see if a container with
         a given name is currently running
 
-        :param name:    The name of the container to check presence of
+        :param name:    The name or ID of the container to check presence of
         :type name: ``str``
 
         :returns: ``True`` if `name` exists, else ``False``
@@ -2601,8 +2601,8 @@ class Plugin():
         """
         _runtime = self._get_container_runtime()
         if _runtime is not None:
-            con = _runtime.get_container_by_name(name)
-            return con is not None
+            return (_runtime.container_exists(name) or
+                    _runtime.get_container_by_name(name) is not None)
         return False
 
     def get_all_containers_by_regex(self, regex, get_all=False):
-- 
2.34.3

From 2ae16e0245e1b01b8547e507abb69c11871a8467 Mon Sep 17 00:00:00 2001
From: Jake Hunsaker <jhunsake@redhat.com>
Date: Mon, 21 Feb 2022 14:37:09 -0500
Subject: [PATCH] [sosnode] Handle downstream versioning for runtime option
 check

First, adds parsing and formatting for an sos installation's release
version according to the loaded package manager for that node.

Adds a fallback version check for 4.2-13 for RHEL downstreams that
backport the `container-runtime` option into sos-4.2.

Carry this in upstream to account for use cases where a workstation used
to run `collect` from may be from a different stream than those used by
cluster nodes.

Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
---
 sos/collector/sosnode.py | 60 ++++++++++++++++++++++++++++++++++------
 1 file changed, 51 insertions(+), 9 deletions(-)

diff --git a/sos/collector/sosnode.py b/sos/collector/sosnode.py
index 7bbe0cd1..d9b998b0 100644
--- a/sos/collector/sosnode.py
+++ b/sos/collector/sosnode.py
@@ -275,21 +275,34 @@ class SosNode():
     def _load_sos_info(self):
         """Queries the node for information about the installed version of sos
         """
+        ver = None
+        rel = None
         if self.host.container_version_command is None:
             pkg = self.host.package_manager.pkg_version(self.host.sos_pkg_name)
             if pkg is not None:
                 ver = '.'.join(pkg['version'])
-                self.sos_info['version'] = ver
+                if pkg['release']:
+                    rel = pkg['release']
+
         else:
             # use the containerized policy's command
             pkgs = self.run_command(self.host.container_version_command,
                                     use_container=True, need_root=True)
             if pkgs['status'] == 0:
-                ver = pkgs['output'].strip().split('-')[1]
-                if ver:
-                    self.sos_info['version'] = ver
-            else:
-                self.sos_info['version'] = None
+                _, ver, rel = pkgs['output'].strip().split('-')
+
+        if ver:
+            if len(ver.split('.')) == 2:
+                # safeguard against maintenance releases throwing off the
+                # comparison by LooseVersion
+                ver += '.0'
+            try:
+                ver += '-%s' % rel.split('.')[0]
+            except Exception as err:
+                self.log_debug("Unable to fully parse sos release: %s" % err)
+
+        self.sos_info['version'] = ver
+
         if self.sos_info['version']:
             self.log_info('sos version is %s' % self.sos_info['version'])
         else:
@@ -381,9 +394,37 @@ class SosNode():
         """Checks to see if the sos installation on the node is AT LEAST the
         given ver. This means that if the installed version is greater than
         ver, this will still return True
+
+        :param ver: Version number we are trying to verify is installed
+        :type ver:  ``str``
+
+        :returns:   True if installed version is at least ``ver``, else False
+        :rtype:     ``bool``
         """
-        return self.sos_info['version'] is not None and \
-            LooseVersion(self.sos_info['version']) >= ver
+        def _format_version(ver):
+            # format the version we're checking to a standard form of X.Y.Z-R
+            try:
+                _fver = ver.split('-')[0]
+                _rel = ''
+                if '-' in ver:
+                    _rel = '-' + ver.split('-')[-1].split('.')[0]
+                if len(_fver.split('.')) == 2:
+                    _fver += '.0'
+
+                return _fver + _rel
+            except Exception as err:
+                self.log_debug("Unable to format '%s': %s" % (ver, err))
+                return ver
+
+        _ver = _format_version(ver)
+
+        try:
+            _node_ver = LooseVersion(self.sos_info['version'])
+            _test_ver = LooseVersion(_ver)
+            return _node_ver >= _test_ver
+        except Exception as err:
+            self.log_error("Error checking sos version: %s" % err)
+            return False
 
     def is_installed(self, pkg):
         """Checks if a given package is installed on the node"""
@@ -587,7 +628,8 @@ class SosNode():
                 sos_opts.append('--cmd-timeout=%s'
                                 % quote(str(self.opts.cmd_timeout)))
 
-        if self.check_sos_version('4.3'):
+        # handle downstream versions that backported this option
+        if self.check_sos_version('4.3') or self.check_sos_version('4.2-13'):
             if self.opts.container_runtime != 'auto':
                 sos_opts.append(
                     "--container-runtime=%s" % self.opts.container_runtime
-- 
2.34.3

From cc60fa5ee25bffed9203a4f786256185b7fe0115 Mon Sep 17 00:00:00 2001
From: Nadia Pinaeva <npinaeva@redhat.com>
Date: Tue, 15 Mar 2022 11:49:57 +0100
Subject: [PATCH] Add ovs datapath and groups collection commands Add
 ct-zone-list command for openshift-ovn

Signed-off-by: Nadia Pinaeva <npinaeva@redhat.com>
---
 sos/report/plugins/openshift_ovn.py | 4 ++++
 sos/report/plugins/openvswitch.py   | 3 +++
 2 files changed, 7 insertions(+)

diff --git a/sos/report/plugins/openshift_ovn.py b/sos/report/plugins/openshift_ovn.py
index 168f1dd3..b4787b8e 100644
--- a/sos/report/plugins/openshift_ovn.py
+++ b/sos/report/plugins/openshift_ovn.py
@@ -34,6 +34,10 @@ class OpenshiftOVN(Plugin, RedHatPlugin):
             'ovn-appctl -t /var/run/ovn/ovnsb_db.ctl ' +
             'cluster/status OVN_Southbound'],
             container='ovnkube-master')
+        self.add_cmd_output([
+            'ovs-appctl -t /var/run/ovn/ovn-controller.*.ctl ' +
+            'ct-zone-list'],
+            container='ovnkube-node')
         self.add_cmd_output([
             'ovs-appctl -t ovs-monitor-ipsec tunnels/show',
             'ipsec status',
diff --git a/sos/report/plugins/openvswitch.py b/sos/report/plugins/openvswitch.py
index 179d1532..159b0bd2 100644
--- a/sos/report/plugins/openvswitch.py
+++ b/sos/report/plugins/openvswitch.py
@@ -124,6 +124,8 @@ class OpenVSwitch(Plugin):
             "ovs-vsctl -t 5 list interface",
             # Capture OVS detailed information from all the bridges
             "ovs-vsctl -t 5 list bridge",
+            # Capture OVS datapath list
+            "ovs-vsctl -t 5 list datapath",
             # Capture DPDK queue to pmd mapping
             "ovs-appctl dpif-netdev/pmd-rxq-show",
             # Capture DPDK pmd stats
@@ -229,6 +231,7 @@ class OpenVSwitch(Plugin):
                     "ovs-ofctl queue-get-config %s" % br,
                     "ovs-ofctl queue-stats %s" % br,
                     "ovs-ofctl show %s" % br,
+                    "ovs-ofctl dump-groups %s" % br,
                 ])
 
                 # Flow protocols currently supported
-- 
2.34.3

From af40be92f502b35fa9d39ce4d4fea7d80c367830 Mon Sep 17 00:00:00 2001
From: Nadia Pinaeva <npinaeva@redhat.com>
Date: Tue, 15 Mar 2022 13:09:55 +0100
Subject: [PATCH] Improve sos collect for OCP: 1. wait for sos tmp project to
 be deleted (just calling delete changes project state to Terminating, and
 running a new sos collect is not possible before this project is fully
 deleted) 2. use --retries flag to copy sos reports from the nodes more
 reliably. The flag has been recently added to kubectl, and the most reliable
 way to check if it's available or not is to check command error output for
 "unknown flag" substring

Signed-off-by: Nadia Pinaeva <npinaeva@redhat.com>
---
 sos/collector/clusters/ocp.py  | 5 +++++
 sos/collector/transports/oc.py | 6 +++++-
 2 files changed, 10 insertions(+), 1 deletion(-)

diff --git a/sos/collector/clusters/ocp.py b/sos/collector/clusters/ocp.py
index f1714239..9beb2f9b 100644
--- a/sos/collector/clusters/ocp.py
+++ b/sos/collector/clusters/ocp.py
@@ -123,6 +123,11 @@ class ocp(Cluster):
             if not ret['status'] == 0:
                 self.log_error("Error deleting temporary project: %s"
                                % ret['output'])
+            ret = self.exec_primary_cmd("oc wait namespace/%s --for=delete "
+                                        "--timeout=30s" % self.project)
+            if not ret['status'] == 0:
+                self.log_error("Error waiting for temporary project to be "
+                               "deleted: %s" % ret['output'])
             # don't leave the config on a non-existing project
             self.exec_primary_cmd("oc project default")
             self.project = None
diff --git a/sos/collector/transports/oc.py b/sos/collector/transports/oc.py
index 0fc9eee8..90a802b2 100644
--- a/sos/collector/transports/oc.py
+++ b/sos/collector/transports/oc.py
@@ -231,5 +231,9 @@ class OCTransport(RemoteTransport):
                 % (self.project, self.pod_name))
 
     def _retrieve_file(self, fname, dest):
-        cmd = self.run_oc("cp %s:%s %s" % (self.pod_name, fname, dest))
+        # check if --retries flag is available for given version of oc
+        result = self.run_oc("cp --retries", stderr=True)
+        flags = '' if "unknown flag" in result["output"] else '--retries=5'
+        cmd = self.run_oc("cp %s %s:%s %s"
+                          % (flags, self.pod_name, fname, dest))
         return cmd['status'] == 0
-- 
2.34.3

From 3b0676b90ff65f20eaba3062775ff72b89386ffc Mon Sep 17 00:00:00 2001
From: Jake Hunsaker <jhunsake@redhat.com>
Date: Tue, 22 Mar 2022 14:25:24 -0400
Subject: [PATCH] [Plugin] Allow plugins to define default command environment
 vars

Adds the ability for plugins to define a default set of environment vars
to pass to all commands executed by the plugin. This may be done either
via the new `set_default_cmd_environment()` or
`add_default_cmd_environment()` methods. The former will override any
previously set values, whereas the latter will add/update/modify any
existing values.

Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
---
 sos/report/plugins/__init__.py                | 55 ++++++++++++++++++-
 .../plugin_tests/plugin_environment.py        | 44 +++++++++++++++
 .../fake_plugins/default_env_test.py          | 28 ++++++++++
 tests/unittests/plugin_tests.py               | 15 +++++
 4 files changed, 140 insertions(+), 2 deletions(-)
 create mode 100644 tests/report_tests/plugin_tests/plugin_environment.py
 create mode 100644 tests/test_data/fake_plugins/default_env_test.py

diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py
index 336b4d22..74b4f4be 100644
--- a/sos/report/plugins/__init__.py
+++ b/sos/report/plugins/__init__.py
@@ -571,6 +571,7 @@ class Plugin():
         self.manifest = None
         self.skip_files = commons['cmdlineopts'].skip_files
         self.skip_commands = commons['cmdlineopts'].skip_commands
+        self.default_environment = {}
 
         self.soslog = self.commons['soslog'] if 'soslog' in self.commons \
             else logging.getLogger('sos')
@@ -624,6 +625,52 @@ class Plugin():
         self.manifest.add_field('strings', {})
         self.manifest.add_field('containers', {})
 
+    def set_default_cmd_environment(self, env_vars):
+        """
+        Specify a collection of environment variables that should always be
+        passed to commands being executed by this plugin.
+
+        :param env_vars:    The environment variables and their values to set
+        :type env_vars:     ``dict{ENV_VAR_NAME: ENV_VAR_VALUE}``
+        """
+        if not isinstance(env_vars, dict):
+            raise TypeError(
+                "Environment variables for Plugin must be specified by dict"
+            )
+        self.default_environment = env_vars
+        self._log_debug("Default environment for all commands now set to %s"
+                        % self.default_environment)
+
+    def add_default_cmd_environment(self, env_vars):
+        """
+        Add or modify a specific environment variable in the set of default
+        environment variables used by this Plugin.
+
+        :param env_vars:    The environment variables to add to the current
+                            set of env vars in use
+        :type env_vars:     ``dict``
+        """
+        if not isinstance(env_vars, dict):
+            raise TypeError("Environment variables must be added via dict")
+        self._log_debug("Adding %s to default environment" % env_vars)
+        self.default_environment.update(env_vars)
+
+    def _get_cmd_environment(self, env=None):
+        """
+        Get the merged set of environment variables for a command about to be
+        executed by this plugin.
+
+        :returns: The set of env vars to use for a command
+        :rtype: ``dict``
+        """
+        if env is None:
+            return self.default_environment
+        if not isinstance(env, dict):
+            raise TypeError("Command env vars must be passed as dict")
+        _env = self.default_environment.copy()
+        _env.update(env)
+        return _env
+
     def timeout_from_options(self, optname, plugoptname, default_timeout):
         """Returns either the default [plugin|cmd] timeout value, the value as
         provided on the commandline via -k plugin.[|cmd-]timeout=value, or the
@@ -2258,6 +2305,8 @@ class Plugin():
 
         _tags = list(set(_tags))
 
+        _env = self._get_cmd_environment(env)
+
         if chroot or self.commons['cmdlineopts'].chroot == 'always':
             root = self.sysroot
         else:
@@ -2282,7 +2331,7 @@ class Plugin():
 
         result = sos_get_command_output(
             cmd, timeout=timeout, stderr=stderr, chroot=root,
-            chdir=runat, env=env, binary=binary, sizelimit=sizelimit,
+            chdir=runat, env=_env, binary=binary, sizelimit=sizelimit,
             poller=self.check_timeout, foreground=foreground,
             to_file=out_file
         )
@@ -2510,6 +2559,8 @@ class Plugin():
         else:
             root = None
 
+        _env = self._get_cmd_environment(env)
+
         if container:
             if self._get_container_runtime() is None:
                 self._log_info("Cannot run cmd '%s' in container %s: no "
@@ -2522,7 +2573,7 @@ class Plugin():
                                "container is running." % (cmd, container))
 
         return sos_get_command_output(cmd, timeout=timeout, chroot=root,
-                                      chdir=runat, binary=binary, env=env,
+                                      chdir=runat, binary=binary, env=_env,
                                       foreground=foreground, stderr=stderr)
 
     def _add_container_file_to_manifest(self, container, path, arcpath, tags):
diff --git a/tests/report_tests/plugin_tests/plugin_environment.py b/tests/report_tests/plugin_tests/plugin_environment.py
new file mode 100644
index 00000000..3158437a
--- /dev/null
+++ b/tests/report_tests/plugin_tests/plugin_environment.py
@@ -0,0 +1,44 @@
+# This file is part of the sos project: https://github.com/sosreport/sos
+#
+# This copyrighted material is made available to anyone wishing to use,
+# modify, copy, or redistribute it subject to the terms and conditions of
+# version 2 of the GNU General Public License.
+#
+# See the LICENSE file in the source distribution for further information.
+
+import os
+
+from sos_tests import StageTwoReportTest
+
+
+class PluginDefaultEnvironmentTest(StageTwoReportTest):
+    """
+    Ensure that being able to set a default set of environment variables is
+    working correctly and does not leave a lingering env var on the system
+
+    :avocado: tags=stageone
+    """
+
+    install_plugins = ['default_env_test']
+    sos_cmd = '-o default_env_test'
+
+    def test_environment_used_in_cmd(self):
+        self.assertFileHasContent(
+            'sos_commands/default_env_test/env_var_test',
+            'Does Linus play hockey?'
+        )
+
+    def test_environment_setting_logged(self):
+        self.assertSosLogContains(
+            'Default environment for all commands now set to'
+        )
+
+    def test_environment_not_set_on_host(self):
+        self.assertTrue('TORVALDS' not in os.environ)
+        self.assertTrue('GREATESTSPORT' not in os.environ)
+
+    def test_environment_not_captured(self):
+        # we should still have an empty environment file
+        self.assertFileCollected('environment')
+        self.assertFileNotHasContent('environment', 'TORVALDS')
+        self.assertFileNotHasContent('environment', 'GREATESTSPORT')
diff --git a/tests/test_data/fake_plugins/default_env_test.py b/tests/test_data/fake_plugins/default_env_test.py
new file mode 100644
index 00000000..d1d1fb78
--- /dev/null
+++ b/tests/test_data/fake_plugins/default_env_test.py
@@ -0,0 +1,28 @@
+# This file is part of the sos project: https://github.com/sosreport/sos
+#
+# This copyrighted material is made available to anyone wishing to use,
+# modify, copy, or redistribute it subject to the terms and conditions of
+# version 2 of the GNU General Public License.
+#
+# See the LICENSE file in the source distribution for further information.
+
+from sos.report.plugins import Plugin, IndependentPlugin
+
+
+class DefaultEnv(Plugin, IndependentPlugin):
+
+    plugin_name = 'default_env_test'
+    short_desc = 'Fake plugin to test default env var handling'
+
+    def setup(self):
+        self.set_default_cmd_environment({
+            'TORVALDS': 'Linus',
+            'GREATESTSPORT': 'hockey'
+        })
+
+        self.add_cmd_output(
+            "sh -c 'echo Does '$TORVALDS' play '$GREATESTSPORT'?'",
+            suggest_filename='env_var_test'
+        )
+
+        self.add_env_var(['TORVALDS', 'GREATESTSPORT'])
diff --git a/tests/unittests/plugin_tests.py b/tests/unittests/plugin_tests.py
index 0dfa243d..e469b78e 100644
--- a/tests/unittests/plugin_tests.py
+++ b/tests/unittests/plugin_tests.py
@@ -305,6 +305,21 @@ class PluginTests(unittest.TestCase):
         p.postproc()
         self.assertTrue(p.did_postproc)
 
+    def test_set_default_cmd_env(self):
+        p = MockPlugin({
+            'sysroot': self.sysroot,
+            'policy': LinuxPolicy(init=InitSystem(), probe_runtime=False),
+            'cmdlineopts': MockOptions(),
+            'devices': {}
+        })
+        e = {'TORVALDS': 'Linus'}
+        p.set_default_cmd_environment(e)
+        self.assertEquals(p.default_environment, e)
+        add_e = {'GREATESTSPORT': 'hockey'}
+        p.add_default_cmd_environment(add_e)
+        self.assertEquals(p.default_environment['GREATESTSPORT'], 'hockey')
+        self.assertEquals(p.default_environment['TORVALDS'], 'Linus')
+
 
 class AddCopySpecTests(unittest.TestCase):
 
-- 
2.34.3

From 1e12325efaa500d304dcbfbeeb50e72ed0f938f5 Mon Sep 17 00:00:00 2001
From: Vladislav Walek <22072258+vwalek@users.noreply.github.com>
Date: Thu, 17 Mar 2022 14:10:26 -0700
Subject: [PATCH] [openshift] Adding ability to use the localhost.kubeconfig
 and KUBECONFIG env to use system:admin

Signed-off-by: Vladislav Walek <22072258+vwalek@users.noreply.github.com>
---
 sos/report/plugins/openshift.py | 45 +++++++++++++++++++++++++++++++--
 1 file changed, 43 insertions(+), 2 deletions(-)

diff --git a/sos/report/plugins/openshift.py b/sos/report/plugins/openshift.py
index 5ae38178..d643f04c 100644
--- a/sos/report/plugins/openshift.py
+++ b/sos/report/plugins/openshift.py
@@ -53,12 +53,19 @@ class Openshift(Plugin, RedHatPlugin):
     profiles = ('openshift',)
     packages = ('openshift-hyperkube',)
 
+    master_localhost_kubeconfig = (
+        '/etc/kubernetes/static-pod-resources/'
+        'kube-apiserver-certs/secrets/node-kubeconfigs/localhost.kubeconfig'
+        )
+
     option_list = [
         PluginOpt('token', default=None, val_type=str,
                   desc='admin token to allow API queries'),
+        PluginOpt('kubeconfig', default=None, val_type=str,
+                  desc='Path to a locally available kubeconfig file'),
         PluginOpt('host', default='https://localhost:6443',
                   desc='host address to use for oc login, including port'),
-        PluginOpt('no-oc', default=False, desc='do not collect `oc` output'),
+        PluginOpt('no-oc', default=True, desc='do not collect `oc` output'),
         PluginOpt('podlogs', default=True, desc='collect logs from each pod'),
         PluginOpt('podlogs-filter', default='', val_type=str,
                   desc='only collect logs from pods matching this pattern'),
@@ -73,6 +80,10 @@ class Openshift(Plugin, RedHatPlugin):
         """Check to see if we can run `oc` commands"""
         return self.exec_cmd('oc whoami')['status'] == 0
 
+    def _check_localhost_kubeconfig(self):
+        """Check if the localhost.kubeconfig exists with system:admin user"""
+        return self.path_exists(self.get_option('kubeconfig'))
+
     def _check_oc_logged_in(self):
         """See if we're logged in to the API service, and if not attempt to do
         so using provided plugin options
@@ -80,8 +91,38 @@ class Openshift(Plugin, RedHatPlugin):
         if self._check_oc_function():
             return True
 
-        # Not logged in currently, attempt to do so
+        if self.get_option('kubeconfig') is None:
+            # If admin doesn't add the kubeconfig
+            # use default localhost.kubeconfig
+            self.set_option(
+                'kubeconfig',
+                self.master_localhost_kubeconfig
+            )
+
+        # Check first if we can use the localhost.kubeconfig before
+        # using token. We don't want to use 'host' option due we use
+        # cluster url from kubeconfig. Default is localhost.
+        if self._check_localhost_kubeconfig():
+            self.set_default_cmd_environment({
+                'KUBECONFIG': self.get_option('kubeconfig')
+            })
+
+            oc_res = self.exec_cmd(
+                "oc login -u system:admin "
+                "--insecure-skip-tls-verify=True"
+            )
+            if oc_res['status'] == 0 and self._check_oc_function():
+                return True
+
+            self._log_warn(
+                "The login command failed with status: %s and error: %s"
+                % (oc_res['status'], oc_res['output'])
+            )
+            return False
+
+        # If kubeconfig is not defined, check if token is provided.
         token = self.get_option('token') or os.getenv('SOSOCPTOKEN', None)
+
         if token:
             oc_res = self.exec_cmd("oc login %s --token=%s "
                                    "--insecure-skip-tls-verify=True"
-- 
2.34.3

From 61765992812afb785e9552e01e3b5579118a6963 Mon Sep 17 00:00:00 2001
From: Nadia Pinaeva <npinaeva@redhat.com>
Date: Fri, 1 Apr 2022 12:05:36 +0200
Subject: [PATCH] Add one more container for plugin enablement

Signed-off-by: Nadia Pinaeva <npinaeva@redhat.com>
---
 sos/report/plugins/openshift_ovn.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/sos/report/plugins/openshift_ovn.py b/sos/report/plugins/openshift_ovn.py
index b4787b8e..98522b1e 100644
--- a/sos/report/plugins/openshift_ovn.py
+++ b/sos/report/plugins/openshift_ovn.py
@@ -16,7 +16,7 @@ class OpenshiftOVN(Plugin, RedHatPlugin):
     """
     short_desc = 'Openshift OVN'
     plugin_name = "openshift_ovn"
-    containers = ('ovnkube-master', 'ovn-ipsec')
+    containers = ('ovnkube-master', 'ovnkube-node', 'ovn-ipsec')
     profiles = ('openshift',)
 
     def setup(self):
-- 
2.34.3

From d3aa071efc85507341cf65dd61414a734654f50a Mon Sep 17 00:00:00 2001
From: Jake Hunsaker <jhunsake@redhat.com>
Date: Mon, 28 Mar 2022 14:47:09 -0400
Subject: [PATCH] [presets] Adjust OCP preset options

Adjust the options used by the 'ocp' preset to better reflect the
current collection needs and approach.

This includes disabling the `cgroups` plugin due to the large amount of
mostly irrelevant data captured due to the high number of containers
present on OCP nodes, ensuring the `--container-runtime` option is set
to `crio` to align container-based collections, disabling HTML report
generation and increasing the base log size rather than blindly enabling
all-logs.

Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
---
 sos/presets/redhat/__init__.py | 13 +++++++++----
 1 file changed, 9 insertions(+), 4 deletions(-)

diff --git a/sos/presets/redhat/__init__.py b/sos/presets/redhat/__init__.py
index 865c9b6b..0b9f6f11 100644
--- a/sos/presets/redhat/__init__.py
+++ b/sos/presets/redhat/__init__.py
@@ -36,10 +36,15 @@ RHOSP_OPTS = SoSOptions(plugopts=[
 
 RHOCP = "ocp"
 RHOCP_DESC = "OpenShift Container Platform by Red Hat"
-RHOCP_OPTS = SoSOptions(all_logs=True, verify=True, plugopts=[
-                             'networking.timeout=600',
-                             'networking.ethtool_namespaces=False',
-                             'networking.namespaces=200'])
+RHOCP_OPTS = SoSOptions(
+    verify=True, skip_plugins=['cgroups'], container_runtime='crio',
+    no_report=True, log_size=100,
+    plugopts=[
+        'crio.timeout=600',
+        'networking.timeout=600',
+        'networking.ethtool_namespaces=False',
+        'networking.namespaces=200'
+    ])
 
 RH_CFME = "cfme"
 RH_CFME_DESC = "Red Hat CloudForms"
-- 
2.34.3

From f2b67ab820070063995689fed03492cdaa012d01 Mon Sep 17 00:00:00 2001
From: Nadia Pinaeva <npinaeva@redhat.com>
Date: Fri, 1 Apr 2022 17:01:35 +0200
Subject: [PATCH] Use /etc/os-release instead of /etc/redhat-release as the
 most compatible way to find host release

Signed-off-by: Nadia Pinaeva <npinaeva@redhat.com>
---
 sos/policies/distros/redhat.py | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/sos/policies/distros/redhat.py b/sos/policies/distros/redhat.py
index 0c72a5e4..2e117f37 100644
--- a/sos/policies/distros/redhat.py
+++ b/sos/policies/distros/redhat.py
@@ -40,7 +40,6 @@ class RedHatPolicy(LinuxPolicy):
         ('Distribution Website', 'https://www.redhat.com/'),
         ('Commercial Support', 'https://www.access.redhat.com/')
     ]
-    _redhat_release = '/etc/redhat-release'
     _tmp_dir = "/var/tmp"
     _in_container = False
     default_scl_prefix = '/opt/rh'
@@ -471,7 +470,7 @@ support representative.
         atomic = False
         if ENV_HOST_SYSROOT not in os.environ:
             return atomic
-        host_release = os.environ[ENV_HOST_SYSROOT] + cls._redhat_release
+        host_release = os.environ[ENV_HOST_SYSROOT] + OS_RELEASE
         if not os.path.exists(host_release):
             return False
         try:
@@ -558,7 +557,7 @@ support representative.
         coreos = False
         if ENV_HOST_SYSROOT not in os.environ:
             return coreos
-        host_release = os.environ[ENV_HOST_SYSROOT] + cls._redhat_release
+        host_release = os.environ[ENV_HOST_SYSROOT] + OS_RELEASE
         try:
             for line in open(host_release, 'r').read().splitlines():
                 coreos |= 'Red Hat Enterprise Linux CoreOS' in line
-- 
2.34.3

From ee0dd68199a2c9296eafe64ead5b2263c8270e4a Mon Sep 17 00:00:00 2001
From: Nadia Pinaeva <npinaeva@redhat.com>
Date: Wed, 6 Apr 2022 11:56:41 +0200
Subject: [PATCH] Use --force-pull-image option for pods created with oc. Set
 --force-pull-image=True by default, can be turned off with
 --force-pull-image=False

Signed-off-by: Nadia Pinaeva <npinaeva@redhat.com>
---
 man/en/sos-collect.1           | 16 +++++++++++-----
 sos/collector/__init__.py      |  9 +++++----
 sos/collector/transports/oc.py |  2 ++
 sos/options.py                 | 20 ++++++++++++++------
 4 files changed, 32 insertions(+), 15 deletions(-)

diff --git a/man/en/sos-collect.1 b/man/en/sos-collect.1
index 9b0a5d7b..2f60332b 100644
--- a/man/en/sos-collect.1
+++ b/man/en/sos-collect.1
@@ -28,7 +28,7 @@ sos collect \- Collect sosreports from multiple (cluster) nodes
     [\-\-no\-local]
     [\-\-primary PRIMARY]
     [\-\-image IMAGE]
-    [\-\-force-pull-image]
+    [\-\-force-pull-image TOGGLE, --pull TOGGLE]
     [\-\-registry-user USER]
     [\-\-registry-password PASSWORD]
     [\-\-registry-authfile FILE]
@@ -262,10 +262,16 @@ Specify an image to use for the temporary container created for collections on
 containerized host, if you do not want to use the default image specifed by the
 host's policy. Note that this should include the registry.
 .TP
-\fB\-\-force-pull-image\fR
-Use this option to force the container runtime to pull the specified image (even
-if it is the policy default image) even if the image already exists on the host.
-This may be useful to update an older container image on containerized hosts.
+\fB\-\-force-pull-image TOGGLE, \-\-pull TOGGLE\fR
+When collecting an sos report from a containerized host, force the host to always
+pull the specified image, even if that image already exists on the host.
+This is useful to ensure that the latest version of that image is always in use.
+Disabling this option will use whatever version of the image is present on the node,
+and only attempt a pull if there is no copy of the image present at all.
+
+Enable with true/on/yes or disable with false/off/no
+
+Default: true
 .TP
 \fB\-\-registry-user USER\fR
 Specify the username to authenticate to the registry with in order to pull the container
diff --git a/sos/collector/__init__.py b/sos/collector/__init__.py
index d898ca34..66c3d932 100644
--- a/sos/collector/__init__.py
+++ b/sos/collector/__init__.py
@@ -27,7 +27,7 @@ from pipes import quote
 from textwrap import fill
 from sos.cleaner import SoSCleaner
 from sos.collector.sosnode import SosNode
-from sos.options import ClusterOption
+from sos.options import ClusterOption, str_to_bool
 from sos.component import SoSComponent
 from sos.utilities import bold
 from sos import __version__
@@ -85,7 +85,7 @@ class SoSCollector(SoSComponent):
         'encrypt_pass': '',
         'group': None,
         'image': '',
-        'force_pull_image': False,
+        'force_pull_image': True,
         'jobs': 4,
         'keywords': [],
         'keyword_file': None,
@@ -357,8 +357,9 @@ class SoSCollector(SoSComponent):
         collect_grp.add_argument('--image',
                                  help=('Specify the container image to use for'
                                        ' containerized hosts.'))
-        collect_grp.add_argument('--force-pull-image', '--pull', default=False,
-                                 action='store_true',
+        collect_grp.add_argument('--force-pull-image', '--pull',
+                                 default=True, choices=(True, False),
+                                 type=str_to_bool,
                                  help='Force pull the container image even if '
                                       'it already exists on the host')
         collect_grp.add_argument('--registry-user', default=None,
diff --git a/sos/collector/transports/oc.py b/sos/collector/transports/oc.py
index 90a802b2..8f6aa9b4 100644
--- a/sos/collector/transports/oc.py
+++ b/sos/collector/transports/oc.py
@@ -147,6 +147,8 @@ class OCTransport(RemoteTransport):
                         "tty": True
                     }
                 ],
+                "imagePullPolicy":
+                    "Always" if self.opts.force_pull_image else "IfNotPresent",
                 "restartPolicy": "Never",
                 "nodeName": self.address,
                 "hostNetwork": True,
diff --git a/sos/options.py b/sos/options.py
index 4846a509..2d5a5135 100644
--- a/sos/options.py
+++ b/sos/options.py
@@ -18,6 +18,16 @@ def _is_seq(val):
     return val_type is list or val_type is tuple
 
 
+def str_to_bool(val):
+    _val = val.lower()
+    if _val in ['true', 'on', 'yes']:
+        return True
+    elif _val in ['false', 'off', 'no']:
+        return False
+    else:
+        return None
+
+
 class SoSOptions():
 
     def _merge_opt(self, opt, src, is_default):
@@ -153,15 +163,13 @@ class SoSOptions():
         if isinstance(self.arg_defaults[key], list):
             return [v for v in val.split(',')]
         if isinstance(self.arg_defaults[key], bool):
-            _val = val.lower()
-            if _val in ['true', 'on', 'yes']:
-                return True
-            elif _val in ['false', 'off', 'no']:
-                return False
-            else:
+            val = str_to_bool(val)
+            if val is None:
                 raise Exception(
                     "Value of '%s' in %s must be True or False or analagous"
                     % (key, conf))
+            else:
+                return val
         if isinstance(self.arg_defaults[key], int):
             try:
                 return int(val)
-- 
2.34.3

From ce289a3ae7101a898efdb84ddfd575576ba5819b Mon Sep 17 00:00:00 2001
From: Jake Hunsaker <jhunsake@redhat.com>
Date: Tue, 5 Apr 2022 11:32:11 -0400
Subject: [PATCH] [ocp, openshift] Re-align API collection options and rename
 option

Previously, in #2888, the `openshift` plugin was extended to allow API
collections by using a default-available kubeconfig file rather than
relying on user-provided tokens. This also included flipping the default
value of the `no-oc` plugin option to `True` (meaning do not collect API
output by default).

This worked for the plugin, but it introduced a gap in `sos collect`
whereby the cluster profile could no longer reliably enable API
collections when trying to leverage the new functionality of not
requiring a user token.

Fix this by updating the cluster profile to align with the new
default-off approach of API collections.

Along with this, add a toggle to the cluster profile directly to allow
users to toggle API collections on or off (default off) directly. This
is done via a new `with-api` cluster option (e.g. `-c ocp.with-api`).
Further, rename the `openshift` plugin option from `no-oc` to
`with-api`. This change not only makes the option use case far more
obvious, it will also align the use of the option to both `collect` and
`report` so that users need only be aware of a single option for either
method.

The cluster profile also has logic to detect which plugin option,
`no-oc` or `with-api` to use based on the (RHEL) sos version installed
on the nodes being inspected by the `ocp` cluster profile.

Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
---
 sos/collector/clusters/ocp.py   | 72 +++++++++++++++++++++++++++------
 sos/report/plugins/openshift.py | 26 +++++++-----
 2 files changed, 77 insertions(+), 21 deletions(-)

diff --git a/sos/collector/clusters/ocp.py b/sos/collector/clusters/ocp.py
index 9beb2f9b..e31d1903 100644
--- a/sos/collector/clusters/ocp.py
+++ b/sos/collector/clusters/ocp.py
@@ -30,7 +30,11 @@ class ocp(Cluster):
     clusterAdmin privileges.
 
     If this requires the use of a secondary configuration file, specify that
-    path with the 'kubeconfig' cluster option.
+    path with the 'kubeconfig' cluster option. This config file will also be
+    used on a single master node to perform API collections if the `with-api`
+    option is enabled (default disabled). If no `kubeconfig` option is given,
+    but `with-api` is enabled, the cluster profile will attempt to use a
+    well-known default kubeconfig file if it is available on the host.
 
     Alternatively, provide a clusterAdmin access token either via the 'token'
     cluster option or, preferably, the SOSOCPTOKEN environment variable.
@@ -45,7 +49,7 @@ class ocp(Cluster):
     option mentioned above.
 
     To avoid redundant collections of OCP API information (e.g. 'oc get'
-    commands), this profile will attempt to enable the openshift plugin on only
+    commands), this profile will attempt to enable the API collections on only
     a single master node. If the none of the master nodes have a functional
     'oc' binary available, *and* the --no-local option is used, that means that
     no API data will be collected.
@@ -63,7 +67,8 @@ class ocp(Cluster):
         ('label', '', 'Colon delimited list of labels to select nodes with'),
         ('role', 'master', 'Colon delimited list of roles to filter on'),
         ('kubeconfig', '', 'Path to the kubeconfig file'),
-        ('token', '', 'Service account token to use for oc authorization')
+        ('token', '', 'Service account token to use for oc authorization'),
+        ('with-api', False, 'Collect OCP API data from a master node')
     ]
 
     def fmt_oc_cmd(self, cmd):
@@ -219,13 +224,52 @@ class ocp(Cluster):
             return False
         return 'master' in self.node_dict[sosnode.address]['roles']
 
+    def _toggle_api_opt(self, node, use_api):
+        """In earlier versions of sos, the openshift plugin option that is
+        used to toggle the API collections was called `no-oc` rather than
+        `with-api`. This older plugin option had the inverse logic of the
+        current `with-api` option.
+
+        Use this to toggle the correct plugin option given the node's sos
+        version. Note that the use of version 4.2 here is tied to the RHEL
+        release (the only usecase for this cluster profile) rather than
+        the upstream version given the backports for that downstream.
+
+        :param node:    The node being inspected for API collections
+        :type node:     ``SoSNode``
+
+        :param use_api: Should this node enable API collections?
+        :type use_api:  ``bool``
+        """
+        if node.check_sos_version('4.2-16'):
+            _opt = 'with-api'
+            _val = 'on' if use_api else 'off'
+        else:
+            _opt = 'no-oc'
+            _val = 'off' if use_api else 'on'
+        node.plugopts.append("openshift.%s=%s" % (_opt, _val))
+
     def set_primary_options(self, node):
+
         node.enable_plugins.append('openshift')
+        if not self.get_option('with-api'):
+            self._toggle_api_opt(node, False)
+            return
         if self.api_collect_enabled:
             # a primary has already been enabled for API collection, disable
             # it among others
-            node.plugopts.append('openshift.no-oc=on')
+            self._toggle_api_opt(node, False)
         else:
+            # running in a container, so reference the /host mount point
+            master_kube = (
+                '/host/etc/kubernetes/static-pod-resources/'
+                'kube-apiserver-certs/secrets/node-kubeconfigs/'
+                'localhost.kubeconfig'
+            )
+            _optconfig = self.get_option('kubeconfig')
+            if _optconfig and not _optconfig.startswith('/host'):
+                _optconfig = '/host/' + _optconfig
+            _kubeconfig = _optconfig or master_kube
             _oc_cmd = 'oc'
             if node.host.containerized:
                 _oc_cmd = '/host/bin/oc'
@@ -244,17 +288,21 @@ class ocp(Cluster):
                                       need_root=True)
             if can_oc['status'] == 0:
                 # the primary node can already access the API
+                self._toggle_api_opt(node, True)
                 self.api_collect_enabled = True
             elif self.token:
                 node.sos_env_vars['SOSOCPTOKEN'] = self.token
+                self._toggle_api_opt(node, True)
+                self.api_collect_enabled = True
+            elif node.file_exists(_kubeconfig):
+                # if the file exists, then the openshift sos plugin will use it
+                # if the with-api option is turned on
+                if not _kubeconfig == master_kube:
+                    node.plugopts.append(
+                        "openshift.kubeconfig=%s" % _kubeconfig
+                    )
+                self._toggle_api_opt(node, True)
                 self.api_collect_enabled = True
-            elif self.get_option('kubeconfig'):
-                kc = self.get_option('kubeconfig')
-                if node.file_exists(kc):
-                    if node.host.containerized:
-                        kc = "/host/%s" % kc
-                    node.sos_env_vars['KUBECONFIG'] = kc
-                    self.api_collect_enabled = True
             if self.api_collect_enabled:
                 msg = ("API collections will be performed on %s\nNote: API "
                        "collections may extend runtime by 10s of minutes\n"
@@ -264,6 +312,6 @@ class ocp(Cluster):
 
     def set_node_options(self, node):
         # don't attempt OC API collections on non-primary nodes
-        node.plugopts.append('openshift.no-oc=on')
+        self._toggle_api_opt(node, False)
 
 # vim: set et ts=4 sw=4 :
diff --git a/sos/report/plugins/openshift.py b/sos/report/plugins/openshift.py
index d643f04c..a41ab62b 100644
--- a/sos/report/plugins/openshift.py
+++ b/sos/report/plugins/openshift.py
@@ -19,7 +19,10 @@ class Openshift(Plugin, RedHatPlugin):
     further extending the kubernetes plugin (or the OCP 3.x extensions included
     in the Red Hat version of the kube plugin).
 
-    By default, this plugin will collect cluster information and inspect the
+    This plugin may collect OCP API information when the `with-api` option is
+    enabled. This option is disabled by default.
+
+    When enabled, this plugin will collect cluster information and inspect the
     default namespaces/projects that are created during deployment - i.e. the
     namespaces of the cluster projects matching openshift.* and kube.*. At the
     time of this plugin's creation that number of default projects is already
@@ -34,16 +37,20 @@ class Openshift(Plugin, RedHatPlugin):
 
     Users will need to either:
 
-        1) Provide the bearer token via the `-k openshift.token` option
-        2) Provide the bearer token via the `SOSOCPTOKEN` environment variable
-        3) Otherwise ensure that the root user can successfully run `oc` and
+        1) Accept the use of a well-known stock kubeconfig file provided via a
+           static pod resource for the kube-apiserver
+        2) Provide the bearer token via the `-k openshift.token` option
+        3) Provide the bearer token via the `SOSOCPTOKEN` environment variable
+        4) Otherwise ensure that the root user can successfully run `oc` and
            get proper output prior to running this plugin
 
 
-    It is highly suggested that option #2 be used first, as this will prevent
-    the token from being recorded in output saved to the archive. Option #1 may
+    It is highly suggested that option #1 be used first, as this uses well
+    known configurations and requires the least information from the user. If
+    using a token, it is recommended to use option #3 as this will prevent
+    the token from being recorded in output saved to the archive. Option #2 may
     be used if this is considered an acceptable risk. It is not recommended to
-    rely on option #3, though it will provide the functionality needed.
+    rely on option #4, though it will provide the functionality needed.
     """
 
     short_desc = 'Openshift Container Platform 4.x'
@@ -65,7 +72,8 @@ class Openshift(Plugin, RedHatPlugin):
                   desc='Path to a locally available kubeconfig file'),
         PluginOpt('host', default='https://localhost:6443',
                   desc='host address to use for oc login, including port'),
-        PluginOpt('no-oc', default=True, desc='do not collect `oc` output'),
+        PluginOpt('with-api', default=False,
+                  desc='collect output from the OCP API'),
         PluginOpt('podlogs', default=True, desc='collect logs from each pod'),
         PluginOpt('podlogs-filter', default='', val_type=str,
                   desc='only collect logs from pods matching this pattern'),
@@ -212,7 +220,7 @@ class Openshift(Plugin, RedHatPlugin):
         self.add_copy_spec('/etc/kubernetes/*')
 
         # see if we run `oc` commands
-        if not self.get_option('no-oc'):
+        if self.get_option('with-api'):
             can_run_oc = self._check_oc_logged_in()
         else:
             can_run_oc = False
-- 
2.34.3