diff --git a/.gitignore b/.gitignore index af17557..5e6b751 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1 @@ -SOURCES/3.6.tar.gz +SOURCES/3.7.tar.gz diff --git a/.sos.metadata b/.sos.metadata index 092d6e5..e1c0998 100644 --- a/.sos.metadata +++ b/.sos.metadata @@ -1 +1 @@ -aa090f917b4f54421e2ad2294a60fc124ef66a85 SOURCES/3.6.tar.gz +54d7ee7557c05c911e6e0bffa062347f66744f58 SOURCES/3.7.tar.gz diff --git a/SOURCES/sos-3.6-centos-branding.patch b/SOURCES/sos-3.6-centos-branding.patch deleted file mode 100644 index f88d36e..0000000 --- a/SOURCES/sos-3.6-centos-branding.patch +++ /dev/null @@ -1,105 +0,0 @@ -diff -uNrp sos-3.6.orig/sos/policies/redhat.py sos-3.6/sos/policies/redhat.py ---- sos-3.6.orig/sos/policies/redhat.py 2018-11-04 17:44:59.513116585 +0000 -+++ sos-3.6/sos/policies/redhat.py 2018-11-04 17:53:28.333731059 +0000 -@@ -32,9 +32,9 @@ OS_RELEASE = "/etc/os-release" - - - class RedHatPolicy(LinuxPolicy): -- distro = "Red Hat" -- vendor = "Red Hat" -- vendor_url = "http://www.redhat.com/" -+ distro = "CentOS" -+ vendor = "CentOS" -+ vendor_url = "http://www,centos.org/" - _redhat_release = '/etc/redhat-release' - _tmp_dir = "/var/tmp" - _rpmq_cmd = 'rpm -qa --queryformat "%{NAME}|%{VERSION}|%{RELEASE}\\n"' -@@ -92,9 +92,9 @@ class RedHatPolicy(LinuxPolicy): - - @classmethod - def check(cls): -- """This method checks to see if we are running on Red Hat. It must be -+ """This method checks to see if we are running on CentOS. It must be - overriden by concrete subclasses to return True when running on a -- Fedora, RHEL or other Red Hat distribution or False otherwise.""" -+ Fedora, RHEL or CentOS distribution or False otherwise.""" - return False - - def check_usrmove(self, pkgs): -@@ -185,7 +185,7 @@ class RedHatPolicy(LinuxPolicy): - return self.host_name() - - --# Container environment variables on Red Hat systems. -+# Container environment variables on CentOS systems. - ENV_CONTAINER = 'container' - ENV_HOST_SYSROOT = 'HOST' - -@@ -195,22 +195,22 @@ _opts_all_logs_verify = SoSOptions(all_l - _opts_all_logs_no_lsof = SoSOptions(all_logs=True, - plugopts=['process.lsof=off']) - --RHEL_RELEASE_STR = "Red Hat Enterprise Linux" -+RHEL_RELEASE_STR = "CentOS Linux" - - RHV = "rhv" --RHV_DESC = "Red Hat Virtualization" -+RHV_DESC = "CentOS Virtualization" - - RHEL = "rhel" - RHEL_DESC = RHEL_RELEASE_STR - - RHOSP = "rhosp" --RHOSP_DESC = "Red Hat OpenStack Platform" -+RHOSP_DESC = "RDO" - - RHOCP = "ocp" --RHOCP_DESC = "OpenShift Container Platform by Red Hat" -+RHOCP_DESC = "OpenShift" - - RH_SATELLITE = "satellite" --RH_SATELLITE_DESC = "Red Hat Satellite" -+RH_SATELLITE_DESC = "Satellite" - - NOTE_SIZE = "This preset may increase report size" - NOTE_TIME = "This preset may increase report run time" -@@ -230,9 +230,9 @@ rhel_presets = { - - - class RHELPolicy(RedHatPolicy): -- distro = RHEL_RELEASE_STR -- vendor = "Red Hat" -- vendor_url = "https://access.redhat.com/support/" -+ distro = "CentOS Linux" -+ vendor = "CentOS" -+ vendor_url = "https://wiki.centos.org/" - msg = _("""\ - This command will collect diagnostic and configuration \ - information from this %(distro)s system and installed \ -@@ -262,7 +262,7 @@ No changes will be made to system config - def check(cls): - """Test to see if the running host is a RHEL installation. - -- Checks for the presence of the "Red Hat Enterprise Linux" -+ Checks for the presence of the "CentOS Linux" - release string at the beginning of the NAME field in the - `/etc/os-release` file and returns ``True`` if it is - found, and ``False`` otherwise. -@@ -324,7 +324,7 @@ No changes will be made to system config - - ATOMIC = "atomic" - ATOMIC_RELEASE_STR = "Atomic" --ATOMIC_DESC = "Red Hat Enterprise Linux Atomic Host" -+ATOMIC_DESC = "CentOS Linux Atomic Host" - - atomic_presets = { - ATOMIC: PresetDefaults(name=ATOMIC, desc=ATOMIC_DESC, note=NOTE_TIME, -@@ -333,7 +333,7 @@ atomic_presets = { - - - class RedHatAtomicPolicy(RHELPolicy): -- distro = "Red Hat Atomic Host" -+ distro = "CentOS Atomic Host" - msg = _("""\ - This command will collect diagnostic and configuration \ - information from this %(distro)s system. diff --git a/SOURCES/sos-bz1311129-sos-conf-disabled-plugins-manpages.patch b/SOURCES/sos-bz1311129-sos-conf-disabled-plugins-manpages.patch new file mode 100644 index 0000000..bed3bde --- /dev/null +++ b/SOURCES/sos-bz1311129-sos-conf-disabled-plugins-manpages.patch @@ -0,0 +1,110 @@ +From 4cb21e2c16b55e7506a3cefd9148ba4bf49dbce1 Mon Sep 17 00:00:00 2001 +From: Pavel Moravec <pmoravec@redhat.com> +Date: Wed, 17 Apr 2019 13:17:24 +0200 +Subject: [PATCH] [sosreport] update sos.conf manpages by [general] section + description + +Since PR #1530, sosreport supports all command line options. Man pages +should document the enhancement. + +Resolves: #1652 + +Signed-off-by: Pavel Moravec <pmoravec@redhat.com> +--- + man/en/sos.conf.5 | 38 ++++++++++++++++++++++++++++++++++---- + 1 file changed, 34 insertions(+), 4 deletions(-) + +diff --git a/man/en/sos.conf.5 b/man/en/sos.conf.5 +index b40a48e1..ad18d5f2 100644 +--- a/man/en/sos.conf.5 ++++ b/man/en/sos.conf.5 +@@ -6,19 +6,49 @@ sos.conf \- sosreport configuration + sosreport uses a configuration file at /etc/sos.conf. + .SH PARAMETERS + .sp +-There are two sections in the sosreport configuration file: +-plugins, and tunables. Options are set using 'ini'-style +-\fBname = value\fP pairs. ++There are three sections in the sosreport configuration file: ++general, plugins and tunables. Options are set using 'ini'-style ++\fBname = value\fP pairs. Disabling/enabling a boolean option ++is done the same way like on command line (e.g. process.lsof=off). + + Some options accept a comma separated list of values. + ++Using options that dont expect a value (like all-logs or no-report) ++will result in enabling those options, regardless of value set. ++ ++Sections are parsed in the ordering: ++.br ++- \fB[general]\fP ++.br ++- \fB[plugins]\fP (disable) ++.br ++- \fB[plugins]\fP (enable) ++.br ++- \fB[tunables]\fP ++ ++.TP ++\fB[general]\fP ++<option> Sets (long) option value. Short options (i.e. z=auto) ++ are not supported. + .TP + \fB[plugins]\fP +-disable Comma separated list of plugins to disable. ++disable Comma separated list of plugins to disable. ++.br ++enable Comma separated list of plugins to enable. + .TP + \fB[tunables]\fP + plugin.option Alter available options for defined plugin. + .SH EXAMPLES ++To use quiet and batch mode with 10 threads: ++.LP ++[general] ++.br ++batch=yes ++.br ++build=true ++.br ++threads=10 ++.sp + To disable the 'general' and 'filesys' plugins: + .LP + [plugins] +-- +2.17.2 + +From 84822ff1bbe2d5543daa8059b0a2270c88e473d6 Mon Sep 17 00:00:00 2001 +From: Pavel Moravec <pmoravec@redhat.com> +Date: Wed, 17 Apr 2019 11:51:09 +0200 +Subject: [PATCH] [sosreport] initialize disabled plugins properly when parsing + sos.conf + +opts.noplugins is referred when parsing "tunables" section, so +the variable must be set to empty list every time. + +Resolves: #1651 + +Signed-off-by: Pavel Moravec <pmoravec@redhat.com> +--- + sos/__init__.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/sos/__init__.py b/sos/__init__.py +index dfc7ed5f..ed59025a 100644 +--- a/sos/__init__.py ++++ b/sos/__init__.py +@@ -250,8 +250,8 @@ class SoSOptions(object): + optlist.extend(SoSOptions._opt_to_args(opt, val)) + opts._merge_opts(argparser.parse_args(optlist), is_default) + ++ opts.noplugins = [] + if config.has_option("plugins", "disable"): +- opts.noplugins = [] + opts.noplugins.extend([plugin.strip() for plugin in + config.get("plugins", "disable").split(',')]) + +-- +2.17.2 + diff --git a/SOURCES/sos-bz1474976-regexp-sub.patch b/SOURCES/sos-bz1474976-regexp-sub.patch deleted file mode 100644 index 7ffcd64..0000000 --- a/SOURCES/sos-bz1474976-regexp-sub.patch +++ /dev/null @@ -1,66 +0,0 @@ -From b96bdab03f06408e162b1733b20e8ba9fbf8e012 Mon Sep 17 00:00:00 2001 -From: "Bryn M. Reeves" <bmr@redhat.com> -Date: Mon, 2 Jul 2018 12:01:04 +0100 -Subject: [PATCH] [archive] fix add_string()/do_*_sub() regression - -A change in the handling of add_string() operations in the archive -class causes the Plugin string substitution methods to fail (since -the archive was enforcing a check that the path did not already -exist - for substitutions this is always the case). - -Maintain the check for content that is being copied into the -archive anew, but make the add_string() method override this and -disable the existence checks. - -Signed-off-by: Bryn M. Reeves <bmr@redhat.com> ---- - sos/archive.py | 14 ++++++++++---- - tests/archive_tests.py | 12 ++---------- - 2 files changed, 12 insertions(+), 14 deletions(-) - -diff --git a/sos/archive.py b/sos/archive.py -index d53baf41..e153c09a 100644 ---- a/sos/archive.py -+++ b/sos/archive.py -@@ -158,7 +158,7 @@ class FileCacheArchive(Archive): - name = name.lstrip(os.sep) - return (os.path.join(self._archive_root, name)) - -- def _check_path(self, src, path_type, dest=None): -+ def _check_path(self, src, path_type, dest=None, force=False): - """Check a new destination path in the archive. - - Since it is possible for multiple plugins to collect the same -@@ -185,6 +185,7 @@ class FileCacheArchive(Archive): - :param src: the source path to be copied to the archive - :param path_type: the type of object to be copied - :param dest: an optional destination path -+ :param force: force file creation even if the path exists - :returns: An absolute destination path if the path should be - copied now or `None` otherwise - """ -@@ -208,6 +209,9 @@ class FileCacheArchive(Archive): - stat.ISSOCK(mode) - ]) - -+ if force: -+ return dest -+ - # Check destination path presence and type - if os.path.exists(dest): - # Use lstat: we care about the current object, not the referent. -@@ -274,9 +278,11 @@ class FileCacheArchive(Archive): - with self._path_lock: - src = dest - -- dest = self._check_path(dest, P_FILE) -- if not dest: -- return -+ # add_string() is a special case: it must always take precedence -+ # over any exixting content in the archive, since it is used by -+ # the Plugin postprocessing hooks to perform regex substitution -+ # on file content. -+ dest = self._check_path(dest, P_FILE, force=True) - - f = codecs.open(dest, 'w', encoding='utf-8') - if isinstance(content, bytes): diff --git a/SOURCES/sos-bz1594327-archive-encryption.patch b/SOURCES/sos-bz1594327-archive-encryption.patch deleted file mode 100644 index 51c419f..0000000 --- a/SOURCES/sos-bz1594327-archive-encryption.patch +++ /dev/null @@ -1,262 +0,0 @@ -From 7b475f1da0f843b20437896737be04cc1c7bbc0a Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker <jhunsake@redhat.com> -Date: Fri, 25 May 2018 13:38:27 -0400 -Subject: [PATCH] [sosreport] Add mechanism to encrypt final archive - -Adds an option to encrypt the resulting archive that sos generates. -There are two methods for doing so: - - --encrypt-key Uses a key-pair for asymmetric encryption - --encrypt-pass Uses a password for symmetric encryption - -For key-pair encryption, the key-to-be-used must be imported into the -root user's keyring, as gpg does not allow for the use of keyfiles. - -If the encryption process fails, sos will not abort as the unencrypted -archive will have already been created. The assumption being that the -archive is still of use and/or the user has another means of encrypting -it. - -Resolves: #1320 - -Signed-off-by: Jake Hunsaker <jhunsake@redhat.com> -Signed-off-by: Bryn M. Reeves <bmr@redhat.com> ---- - man/en/sosreport.1 | 28 ++++++++++++++++++++++ - sos/__init__.py | 10 ++++---- - sos/archive.py | 63 ++++++++++++++++++++++++++++++++++++++++++++++---- - sos/sosreport.py | 20 ++++++++++++++-- - tests/archive_tests.py | 3 ++- - 5 files changed, 113 insertions(+), 11 deletions(-) - -diff --git a/man/en/sosreport.1 b/man/en/sosreport.1 -index b0adcd8bb..b6051edc1 100644 ---- a/man/en/sosreport.1 -+++ b/man/en/sosreport.1 -@@ -22,6 +22,8 @@ sosreport \- Collect and package diagnostic and support data - [--log-size]\fR - [--all-logs]\fR - [-z|--compression-type method]\fR -+ [--encrypt-key KEY]\fR -+ [--encrypt-pass PASS]\fR - [--experimental]\fR - [-h|--help]\fR - -@@ -120,6 +122,32 @@ increase the size of reports. - .B \-z, \--compression-type METHOD - Override the default compression type specified by the active policy. - .TP -+.B \--encrypt-key KEY -+Encrypts the resulting archive that sosreport produces using GPG. KEY must be -+an existing key in the user's keyring as GPG does not allow for keyfiles. -+KEY can be any value accepted by gpg's 'recipient' option. -+ -+Note that the user running sosreport must match the user owning the keyring -+from which keys will be obtained. In particular this means that if sudo is -+used to run sosreport, the keyring must also be set up using sudo -+(or direct shell access to the account). -+ -+Users should be aware that encrypting the final archive will result in sos -+using double the amount of temporary disk space - the encrypted archive must be -+written as a separate, rather than replacement, file within the temp directory -+that sos writes the archive to. However, since the encrypted archive will be -+the same size as the original archive, there is no additional space consumption -+once the temporary directory is removed at the end of execution. -+ -+This means that only the encrypted archive is present on disk after sos -+finishes running. -+ -+If encryption fails for any reason, the original unencrypted archive is -+preserved instead. -+.TP -+.B \--encrypt-pass PASS -+The same as \--encrypt-key, but use the provided PASS for symmetric encryption -+rather than key-pair encryption. - .TP - .B \--batch - Generate archive without prompting for interactive input. -diff --git a/sos/__init__.py b/sos/__init__.py -index ef4524c60..cd9779bdc 100644 ---- a/sos/__init__.py -+++ b/sos/__init__.py -@@ -45,10 +45,10 @@ def _default(msg): - _arg_names = [ - 'add_preset', 'alloptions', 'all_logs', 'batch', 'build', 'case_id', - 'chroot', 'compression_type', 'config_file', 'desc', 'debug', 'del_preset', -- 'enableplugins', 'experimental', 'label', 'list_plugins', 'list_presets', -- 'list_profiles', 'log_size', 'noplugins', 'noreport', 'note', -- 'onlyplugins', 'plugopts', 'preset', 'profiles', 'quiet', 'sysroot', -- 'threads', 'tmp_dir', 'verbosity', 'verify' -+ 'enableplugins', 'encrypt_key', 'encrypt_pass', 'experimental', 'label', -+ 'list_plugins', 'list_presets', 'list_profiles', 'log_size', 'noplugins', -+ 'noreport', 'note', 'onlyplugins', 'plugopts', 'preset', 'profiles', -+ 'quiet', 'sysroot', 'threads', 'tmp_dir', 'verbosity', 'verify' - ] - - #: Arguments with non-zero default values -@@ -84,6 +84,8 @@ class SoSOptions(object): - del_preset = "" - desc = "" - enableplugins = [] -+ encrypt_key = None -+ encrypt_pass = None - experimental = False - label = "" - list_plugins = False -diff --git a/sos/archive.py b/sos/archive.py -index e153c09ad..263e3dd3f 100644 ---- a/sos/archive.py -+++ b/sos/archive.py -@@ -142,11 +142,12 @@ class FileCacheArchive(Archive): - _archive_root = "" - _archive_name = "" - -- def __init__(self, name, tmpdir, policy, threads): -+ def __init__(self, name, tmpdir, policy, threads, enc_opts): - self._name = name - self._tmp_dir = tmpdir - self._policy = policy - self._threads = threads -+ self.enc_opts = enc_opts - self._archive_root = os.path.join(tmpdir, name) - with self._path_lock: - os.makedirs(self._archive_root, 0o700) -@@ -384,12 +385,65 @@ def finalize(self, method): - os.stat(self._archive_name).st_size)) - self.method = method - try: -- return self._compress() -+ res = self._compress() - except Exception as e: - exp_msg = "An error occurred compressing the archive: " - self.log_error("%s %s" % (exp_msg, e)) - return self.name() - -+ if self.enc_opts['encrypt']: -+ try: -+ return self._encrypt(res) -+ except Exception as e: -+ exp_msg = "An error occurred encrypting the archive:" -+ self.log_error("%s %s" % (exp_msg, e)) -+ return res -+ else: -+ return res -+ -+ def _encrypt(self, archive): -+ """Encrypts the compressed archive using GPG. -+ -+ If encryption fails for any reason, it should be logged by sos but not -+ cause execution to stop. The assumption is that the unencrypted archive -+ would still be of use to the user, and/or that the end user has another -+ means of securing the archive. -+ -+ Returns the name of the encrypted archive, or raises an exception to -+ signal that encryption failed and the unencrypted archive name should -+ be used. -+ """ -+ arc_name = archive.replace("sosreport-", "secured-sosreport-") -+ arc_name += ".gpg" -+ enc_cmd = "gpg --batch -o %s " % arc_name -+ env = None -+ if self.enc_opts["key"]: -+ # need to assume a trusted key here to be able to encrypt the -+ # archive non-interactively -+ enc_cmd += "--trust-model always -e -r %s " % self.enc_opts["key"] -+ enc_cmd += archive -+ if self.enc_opts["password"]: -+ # prevent change of gpg options using a long password, but also -+ # prevent the addition of quote characters to the passphrase -+ passwd = "%s" % self.enc_opts["password"].replace('\'"', '') -+ env = {"sos_gpg": passwd} -+ enc_cmd += "-c --passphrase-fd 0 " -+ enc_cmd = "/bin/bash -c \"echo $sos_gpg | %s\"" % enc_cmd -+ enc_cmd += archive -+ r = sos_get_command_output(enc_cmd, timeout=0, env=env) -+ if r["status"] == 0: -+ return arc_name -+ elif r["status"] == 2: -+ if self.enc_opts["key"]: -+ msg = "Specified key not in keyring" -+ else: -+ msg = "Could not read passphrase" -+ else: -+ # TODO: report the actual error from gpg. Currently, we cannot as -+ # sos_get_command_output() does not capture stderr -+ msg = "gpg exited with code %s" % r["status"] -+ raise Exception(msg) -+ - - # Compatibility version of the tarfile.TarFile class. This exists to allow - # compatibility with PY2 runtimes that lack the 'filter' parameter to the -@@ -468,8 +522,9 @@ class TarFileArchive(FileCacheArchive): - method = None - _with_selinux_context = False - -- def __init__(self, name, tmpdir, policy, threads): -- super(TarFileArchive, self).__init__(name, tmpdir, policy, threads) -+ def __init__(self, name, tmpdir, policy, threads, enc_opts): -+ super(TarFileArchive, self).__init__(name, tmpdir, policy, threads, -+ enc_opts) - self._suffix = "tar" - self._archive_name = os.path.join(tmpdir, self.name()) - -diff --git a/sos/sosreport.py b/sos/sosreport.py -index 60802617c..00c3e8110 100644 ---- a/sos/sosreport.py -+++ b/sos/sosreport.py -@@ -316,6 +316,13 @@ def _parse_args(args): - preset_grp.add_argument("--del-preset", type=str, action="store", - help="Delete the named command line preset") - -+ encrypt_grp = parser.add_mutually_exclusive_group() -+ encrypt_grp.add_argument("--encrypt-key", -+ help="Encrypt the final archive using a GPG " -+ "key-pair") -+ encrypt_grp.add_argument("--encrypt-pass", -+ help="Encrypt the final archive using a password") -+ - return parser.parse_args(args) - - -@@ -431,16 +438,25 @@ def get_temp_file(self): - return self.tempfile_util.new() - - def _set_archive(self): -+ enc_opts = { -+ 'encrypt': True if (self.opts.encrypt_pass or -+ self.opts.encrypt_key) else False, -+ 'key': self.opts.encrypt_key, -+ 'password': self.opts.encrypt_pass -+ } -+ - archive_name = os.path.join(self.tmpdir, - self.policy.get_archive_name()) - if self.opts.compression_type == 'auto': - auto_archive = self.policy.get_preferred_archive() - self.archive = auto_archive(archive_name, self.tmpdir, -- self.policy, self.opts.threads) -+ self.policy, self.opts.threads, -+ enc_opts) - - else: - self.archive = TarFileArchive(archive_name, self.tmpdir, -- self.policy, self.opts.threads) -+ self.policy, self.opts.threads, -+ enc_opts) - - self.archive.set_debug(True if self.opts.debug else False) - -diff --git a/tests/archive_tests.py b/tests/archive_tests.py -index b4dd8d0ff..e5b329b5f 100644 ---- a/tests/archive_tests.py -+++ b/tests/archive_tests.py -@@ -19,7 +19,8 @@ class TarFileArchiveTest(unittest.TestCase): - - def setUp(self): - self.tmpdir = tempfile.mkdtemp() -- self.tf = TarFileArchive('test', self.tmpdir, Policy(), 1) -+ enc = {'encrypt': False} -+ self.tf = TarFileArchive('test', self.tmpdir, Policy(), 1, enc) - - def tearDown(self): - shutil.rmtree(self.tmpdir) diff --git a/SOURCES/sos-bz1596494-cds-on-rhui3.patch b/SOURCES/sos-bz1596494-cds-on-rhui3.patch deleted file mode 100644 index 5c55040..0000000 --- a/SOURCES/sos-bz1596494-cds-on-rhui3.patch +++ /dev/null @@ -1,33 +0,0 @@ -From 62f4affbc9fb6da06dd5707e9aa659d206352e87 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec <pmoravec@redhat.com> -Date: Tue, 3 Jul 2018 13:02:09 +0200 -Subject: [PATCH] [rhui] Fix detection of CDS for RHUI3 - -Detection of CDS node on RHUI 3 cant rely on deprecated pulp-cds package -but rather on rhui-mirrorlist one. - -Resolves: #1375 - -Signed-off-by: Pavel Moravec <pmoravec@redhat.com> -Signed-off-by: Bryn M. Reeves <bmr@redhat.com> ---- - sos/plugins/rhui.py | 6 +++++- - 1 file changed, 5 insertions(+), 1 deletion(-) - -diff --git a/sos/plugins/rhui.py b/sos/plugins/rhui.py -index 2b1e2baa7..459a89831 100644 ---- a/sos/plugins/rhui.py -+++ b/sos/plugins/rhui.py -@@ -22,7 +22,11 @@ class Rhui(Plugin, RedHatPlugin): - files = [rhui_debug_path] - - def setup(self): -- if self.is_installed("pulp-cds"): -+ cds_installed = [ -+ self.is_installed("pulp-cds"), -+ self.is_installed("rhui-mirrorlist") -+ ] -+ if any(cds_installed): - cds = "--cds" - else: - cds = "" diff --git a/SOURCES/sos-bz1597532-stat-isblk.patch b/SOURCES/sos-bz1597532-stat-isblk.patch deleted file mode 100644 index 6200ffd..0000000 --- a/SOURCES/sos-bz1597532-stat-isblk.patch +++ /dev/null @@ -1,36 +0,0 @@ -From 4127d02f00561b458398ce2b5ced7ae853b23227 Mon Sep 17 00:00:00 2001 -From: Bryan Quigley <bryan.quigley@canonical.com> -Date: Mon, 2 Jul 2018 16:48:21 -0400 -Subject: [PATCH] [archive] fix stat typo - -They're just missing the S_ in front of them so if that code gets -reached it fails. - -Fixes: #1373 -Resolves: #1374 - -Signed-off-by: Bryan Quigley <bryan.quigley@canonical.com> -Signed-off-by: Bryn M. Reeves <bmr@redhat.com> ---- - sos/archive.py | 8 ++++---- - 1 file changed, 4 insertions(+), 4 deletions(-) - -diff --git a/sos/archive.py b/sos/archive.py -index 263e3dd3f..fdf6f9a80 100644 ---- a/sos/archive.py -+++ b/sos/archive.py -@@ -204,10 +204,10 @@ def _check_path(self, src, path_type, dest=None, force=False): - - def is_special(mode): - return any([ -- stat.ISBLK(mode), -- stat.ISCHR(mode), -- stat.ISFIFO(mode), -- stat.ISSOCK(mode) -+ stat.S_ISBLK(mode), -+ stat.S_ISCHR(mode), -+ stat.S_ISFIFO(mode), -+ stat.S_ISSOCK(mode) - ]) - - if force: diff --git a/SOURCES/sos-bz1600158-rhv-log-collector-analyzer.patch b/SOURCES/sos-bz1600158-rhv-log-collector-analyzer.patch deleted file mode 100644 index 6930786..0000000 --- a/SOURCES/sos-bz1600158-rhv-log-collector-analyzer.patch +++ /dev/null @@ -1,66 +0,0 @@ -From d297b2116fd864c65dba76b343f5101466c0eeb7 Mon Sep 17 00:00:00 2001 -From: Douglas Schilling Landgraf <dougsland@gmail.com> -Date: Tue, 10 Jul 2018 12:03:41 -0400 -Subject: [PATCH] [rhv-log-collector-analyzer] Add new plugin for RHV - -This commit adds the plugin rhv-log-collector-analyzer, it will -collect: - -- Output of rhv-log-collector-analyer --json -- Generated HTML file from --live - -Signed-off-by: Douglas Schilling Landgraf <dougsland@redhat.com> ---- - sos/plugins/rhv_analyzer.py | 40 +++++++++++++++++++++++++++++++++++++ - 1 file changed, 40 insertions(+) - create mode 100644 sos/plugins/rhv_analyzer.py - -diff --git a/sos/plugins/rhv_analyzer.py b/sos/plugins/rhv_analyzer.py -new file mode 100644 -index 00000000..7c233a0b ---- /dev/null -+++ b/sos/plugins/rhv_analyzer.py -@@ -0,0 +1,40 @@ -+# Copyright (C) 2018 Red Hat, Inc. -+# -+# This file is part of the sos project: https://github.com/sosreport/sos -+# -+# This copyrighted material is made available to anyone wishing to use, -+# modify, copy, or redistribute it subject to the terms and conditions of -+# version 2 of the GNU General Public License. -+# -+# See the LICENSE file in the source distribution for further information. -+ -+from sos.plugins import Plugin, RedHatPlugin -+ -+ -+class RhvLogCollectorAnalyzer(Plugin, RedHatPlugin): -+ """RHV Log Collector Analyzer""" -+ -+ packages = ('rhv-log-collector-analyzer',) -+ -+ plugin_name = 'RhvLogCollectorAnalyzer' -+ profiles = ('virt',) -+ -+ def setup(self): -+ tool_name = 'rhv-log-collector-analyzer' -+ report = "{dircmd}/analyzer-report.html".format( -+ dircmd=self.get_cmd_output_path() -+ ) -+ -+ self.add_cmd_output( -+ "{tool_name}" -+ " --live" -+ " --html={report}".format( -+ report=report, tool_name=tool_name) -+ ) -+ -+ self.add_cmd_output( -+ "{tool_name}" -+ " --json".format(tool_name=tool_name) -+ ) -+ -+# vim: expandtab tabstop=4 shiftwidth=4 --- -2.17.1 - diff --git a/SOURCES/sos-bz1608384-archive-name-sanitize.patch b/SOURCES/sos-bz1608384-archive-name-sanitize.patch deleted file mode 100644 index 4c48384..0000000 --- a/SOURCES/sos-bz1608384-archive-name-sanitize.patch +++ /dev/null @@ -1,52 +0,0 @@ -From bc650cd161548159e551ddc201596bf19b1865d0 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec <pmoravec@redhat.com> -Date: Fri, 27 Jul 2018 08:56:37 +0200 -Subject: [PATCH] [policies] sanitize report label - -similarly like we sanitize case id, we should sanitize report label -to e.g. exclude spaces from final tarball name. - -Resolves: #1389 - -Signed-off-by: Pavel Moravec <pmoravec@redhat.com> ---- - sos/policies/__init__.py | 9 +++------ - 1 file changed, 3 insertions(+), 6 deletions(-) - -diff --git a/sos/policies/__init__.py b/sos/policies/__init__.py -index 7b301dec..65d8aac6 100644 ---- a/sos/policies/__init__.py -+++ b/sos/policies/__init__.py -@@ -408,7 +408,7 @@ No changes will be made to system configuration. - date=date, - rand=rand - ) -- return time.strftime(nstr) -+ return self.sanitize_filename(time.strftime(nstr)) - - # for some specific binaries like "xz", we need to determine package - # providing it; that is policy specific. By default return the binary -@@ -726,8 +726,8 @@ class LinuxPolicy(Policy): - """Returns the name usd in the pre_work step""" - return self.host_name() - -- def sanitize_case_id(self, case_id): -- return re.sub(r"[^-a-z,A-Z.0-9]", "", case_id) -+ def sanitize_filename(self, name): -+ return re.sub(r"[^-a-z,A-Z.0-9]", "", name) - - def lsmod(self): - """Return a list of kernel module names as strings. -@@ -755,9 +755,6 @@ class LinuxPolicy(Policy): - if cmdline_opts.case_id: - self.case_id = cmdline_opts.case_id - -- if self.case_id: -- self.case_id = self.sanitize_case_id(self.case_id) -- - return - - --- -2.17.1 - diff --git a/SOURCES/sos-bz1609135-ceph-dont-collect-tmp-mnt.patch b/SOURCES/sos-bz1609135-ceph-dont-collect-tmp-mnt.patch deleted file mode 100644 index 400c654..0000000 --- a/SOURCES/sos-bz1609135-ceph-dont-collect-tmp-mnt.patch +++ /dev/null @@ -1,44 +0,0 @@ -From dfed1abf3cac691cfc669bbf4e07e58e2e637776 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec <pmoravec@redhat.com> -Date: Fri, 27 Jul 2018 08:27:45 +0200 -Subject: [PATCH] [apparmor,ceph] fix typo in add_forbidden_path - -commit 29a40b7 removed leading '/' from two forbidden paths - -Resolves: #1388 - -Signed-off-by: Pavel Moravec <pmoravec@redhat.com> ---- - sos/plugins/apparmor.py | 2 +- - sos/plugins/ceph.py | 2 +- - 2 files changed, 2 insertions(+), 2 deletions(-) - -diff --git a/sos/plugins/apparmor.py b/sos/plugins/apparmor.py -index c4c64baf..e239c0b5 100644 ---- a/sos/plugins/apparmor.py -+++ b/sos/plugins/apparmor.py -@@ -26,7 +26,7 @@ class Apparmor(Plugin, UbuntuPlugin): - self.add_forbidden_path([ - "/etc/apparmor.d/cache", - "/etc/apparmor.d/libvirt/libvirt*", -- "etc/apparmor.d/abstractions" -+ "/etc/apparmor.d/abstractions" - ]) - - self.add_cmd_output([ -diff --git a/sos/plugins/ceph.py b/sos/plugins/ceph.py -index 10e48b62..ed6816b2 100644 ---- a/sos/plugins/ceph.py -+++ b/sos/plugins/ceph.py -@@ -77,7 +77,7 @@ class Ceph(Plugin, RedHatPlugin, UbuntuPlugin): - "/var/lib/ceph/mon/*", - # Excludes temporary ceph-osd mount location like - # /var/lib/ceph/tmp/mnt.XXXX from sos collection. -- "var/lib/ceph/tmp/*mnt*", -+ "/var/lib/ceph/tmp/*mnt*", - "/etc/ceph/*bindpass*" - ]) - --- -2.17.1 - diff --git a/SOURCES/sos-bz1613806-rhosp-lsof-optional.patch b/SOURCES/sos-bz1613806-rhosp-lsof-optional.patch deleted file mode 100644 index 9a555bb..0000000 --- a/SOURCES/sos-bz1613806-rhosp-lsof-optional.patch +++ /dev/null @@ -1,113 +0,0 @@ -From a55680e6c8ac87fdf4ee3100717001c1f6f6a08b Mon Sep 17 00:00:00 2001 -From: Pavel Moravec <pmoravec@redhat.com> -Date: Thu, 9 Aug 2018 08:59:53 +0200 -Subject: [PATCH 1/3] [process] make lsof execution optional - -Make calling of lsof command optional (but enabled by default). - -Also remove "collect lsof-threads when --all-logs" as all-logs -has nothing in common. - -Resolves: #1394 - -Signed-off-by: Pavel Moravec <pmoravec@redhat.com> ---- - sos/plugins/process.py | 6 ++++-- - 1 file changed, 4 insertions(+), 2 deletions(-) - -diff --git a/sos/plugins/process.py b/sos/plugins/process.py -index 755eec8d..d1c455a5 100644 ---- a/sos/plugins/process.py -+++ b/sos/plugins/process.py -@@ -17,6 +17,7 @@ class Process(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin): - profiles = ('system',) - - option_list = [ -+ ("lsof", "gathers information on all open files", "slow", True), - ("lsof-threads", "gathers threads' open file info if supported", - "slow", False) - ] -@@ -35,9 +36,10 @@ class Process(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin): - - self.add_cmd_output("ps auxwww", root_symlink="ps") - self.add_cmd_output("pstree", root_symlink="pstree") -- self.add_cmd_output("lsof -b +M -n -l -c ''", root_symlink="lsof") -+ if self.get_option("lsof"): -+ self.add_cmd_output("lsof -b +M -n -l -c ''", root_symlink="lsof") - -- if self.get_option("lsof-threads") or self.get_option("all_logs"): -+ if self.get_option("lsof-threads"): - self.add_cmd_output("lsof -b +M -n -l") - - self.add_cmd_output([ --- -2.17.1 - -From 48a1a00685c680ba9fbd5c9b10377e8d0551a926 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec <pmoravec@redhat.com> -Date: Thu, 9 Aug 2018 18:11:38 +0200 -Subject: [PATCH 2/3] [policies] RHOSP preset with -k process.lsof=off - -Make lsof calls on OSP systems disabled by default. - -Relevant to: #1395 - -Signed-off-by: Pavel Moravec <pmoravec@redhat.com> ---- - sos/policies/redhat.py | 4 +++- - 1 file changed, 3 insertions(+), 1 deletion(-) - -diff --git a/sos/policies/redhat.py b/sos/policies/redhat.py -index cfbf7808..ee687d46 100644 ---- a/sos/policies/redhat.py -+++ b/sos/policies/redhat.py -@@ -192,6 +192,8 @@ ENV_HOST_SYSROOT = 'HOST' - _opts_verify = SoSOptions(verify=True) - _opts_all_logs = SoSOptions(all_logs=True) - _opts_all_logs_verify = SoSOptions(all_logs=True, verify=True) -+_opts_all_logs_no_lsof = SoSOptions(all_logs=True, -+ plugopts=['process.lsof=off']) - - RHEL_RELEASE_STR = "Red Hat Enterprise Linux" - -@@ -219,7 +221,7 @@ rhel_presets = { - opts=_opts_verify), - RHEL: PresetDefaults(name=RHEL, desc=RHEL_DESC), - RHOSP: PresetDefaults(name=RHOSP, desc=RHOSP_DESC, note=NOTE_SIZE, -- opts=_opts_all_logs), -+ opts=_opts_all_logs_no_lsof), - RHOCP: PresetDefaults(name=RHOCP, desc=RHOCP_DESC, note=NOTE_SIZE_TIME, - opts=_opts_all_logs_verify), - RH_SATELLITE: PresetDefaults(name=RH_SATELLITE, desc=RH_SATELLITE_DESC, --- -2.17.1 - -From 84c30742254a536f70bb4217756416bcf0e8a51b Mon Sep 17 00:00:00 2001 -From: Pavel Moravec <pmoravec@redhat.com> -Date: Thu, 9 Aug 2018 18:14:56 +0200 -Subject: [PATCH 3/3] [policies] enable RHOSP preset by presence of - rhosp-release package - -Resolves: #1395 - -Signed-off-by: Pavel Moravec <pmoravec@redhat.com> ---- - sos/policies/redhat.py | 2 ++ - 1 file changed, 2 insertions(+) - -diff --git a/sos/policies/redhat.py b/sos/policies/redhat.py -index ee687d46..5bfbade2 100644 ---- a/sos/policies/redhat.py -+++ b/sos/policies/redhat.py -@@ -315,6 +315,8 @@ No changes will be made to system configuration. - # Package based checks - if self.pkg_by_name("satellite-common") is not None: - return self.find_preset(RH_SATELLITE) -+ if self.pkg_by_name("rhosp-release") is not None: -+ return self.find_preset(RHOSP) - - # Vanilla RHEL is default - return self.find_preset(RHEL) --- -2.17.1 - diff --git a/SOURCES/sos-bz1616030-etcd-kube-osp-3-10.patch b/SOURCES/sos-bz1616030-etcd-kube-osp-3-10.patch deleted file mode 100644 index b08251c..0000000 --- a/SOURCES/sos-bz1616030-etcd-kube-osp-3-10.patch +++ /dev/null @@ -1,325 +0,0 @@ -From 6372a7f7f09511d864aa6bd894109d937f4fda65 Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker <jhunsake@redhat.com> -Date: Thu, 12 Jul 2018 12:36:25 -0400 -Subject: [PATCH 1/3] [kubernetes|etcd] Support OpenShift 3.10 deployments - -The 3.10 version of OCP changes the deployment configurations for etcd -and kubernetes components, and additionally changes the way the etcdctl -command is called when running in a static pod. Update these plugins to -support this new deployment style. - -Signed-off-by: Jake Hunsaker <jhunsake@redhat.com> ---- - sos/plugins/etcd.py | 11 ++- - sos/plugins/kubernetes.py | 148 +++++++++++++++++++------------------- - 2 files changed, 83 insertions(+), 76 deletions(-) - -diff --git a/sos/plugins/etcd.py b/sos/plugins/etcd.py -index c343f750..c8ee3849 100644 ---- a/sos/plugins/etcd.py -+++ b/sos/plugins/etcd.py -@@ -10,6 +10,7 @@ - # See the LICENSE file in the source distribution for further information. - - from sos.plugins import Plugin, RedHatPlugin -+from os import path - - - class etcd(Plugin, RedHatPlugin): -@@ -19,10 +20,14 @@ class etcd(Plugin, RedHatPlugin): - plugin_name = 'etcd' - packages = ('etcd',) - profiles = ('container', 'system', 'services', 'cluster') -- -- cmd = 'etcdctl' -+ files = ('/etc/origin/node/pods/etcd.yaml',) - - def setup(self): -+ if path.exists('/etc/origin/node/pods/etcd.yaml'): -+ etcd_cmd = 'master-exec etcd etcd etcdctl' -+ else: -+ etcd_cmd = 'etcdctl' -+ - etcd_url = self.get_etcd_url() - - self.add_forbidden_path('/etc/etcd/ca') -@@ -35,7 +40,7 @@ class etcd(Plugin, RedHatPlugin): - 'ls --recursive', - ] - -- self.add_cmd_output(['%s %s' % (self.cmd, sub) for sub in subcmds]) -+ self.add_cmd_output(['%s %s' % (etcd_cmd, sub) for sub in subcmds]) - - urls = [ - '/v2/stats/leader', -diff --git a/sos/plugins/kubernetes.py b/sos/plugins/kubernetes.py -index e75c7a37..21cb51df 100644 ---- a/sos/plugins/kubernetes.py -+++ b/sos/plugins/kubernetes.py -@@ -18,11 +18,16 @@ class kubernetes(Plugin, RedHatPlugin): - """Kubernetes plugin - """ - -- # Red Hat Atomic Platform and OpenShift Enterprise use the -- # atomic-openshift-master package to provide kubernetes -+ # OpenShift Container Platform uses the atomic-openshift-master package -+ # to provide kubernetes - packages = ('kubernetes', 'kubernetes-master', 'atomic-openshift-master') - profiles = ('container',) -- files = ("/etc/origin/master/master-config.yaml",) -+ # use files only for masters, rely on package list for nodes -+ files = ( -+ "/var/run/kubernetes/apiserver.key", -+ "/etc/origin/master/", -+ "/etc/origin/node/pods/master-config.yaml" -+ ) - - option_list = [ - ("all", "also collect all namespaces output separately", -@@ -33,12 +38,7 @@ class kubernetes(Plugin, RedHatPlugin): - ] - - def check_is_master(self): -- if any([ -- path.exists("/var/run/kubernetes/apiserver.key"), -- path.exists("/etc/origin/master/master-config.yaml") -- ]): -- return True -- return False -+ return any([path.exists(f) for f in self.files]) - - def setup(self): - self.add_copy_spec("/etc/kubernetes") -@@ -56,74 +56,76 @@ class kubernetes(Plugin, RedHatPlugin): - self.add_journal(units=svc) - - # We can only grab kubectl output from the master -- if self.check_is_master(): -- kube_cmd = "kubectl " -- if path.exists('/etc/origin/master/admin.kubeconfig'): -- kube_cmd += "--config=/etc/origin/master/admin.kubeconfig" -- -- kube_get_cmd = "get -o json " -- for subcmd in ['version', 'config view']: -- self.add_cmd_output('%s %s' % (kube_cmd, subcmd)) -- -- # get all namespaces in use -- kn = self.get_command_output('%s get namespaces' % kube_cmd) -- knsps = [n.split()[0] for n in kn['output'].splitlines()[1:] if n] -- -- resources = [ -- 'limitrange', -- 'pods', -- 'pvc', -- 'rc', -- 'resourcequota', -- 'services' -- ] -- -- # nodes and pvs are not namespaced, must pull separately. -- # Also collect master metrics -- self.add_cmd_output([ -- "{} get -o json nodes".format(kube_cmd), -- "{} get -o json pv".format(kube_cmd), -- "{} get --raw /metrics".format(kube_cmd) -- ]) -- -- for n in knsps: -- knsp = '--namespace=%s' % n -- if self.get_option('all'): -- k_cmd = '%s %s %s' % (kube_cmd, kube_get_cmd, knsp) -- -- self.add_cmd_output('%s events' % k_cmd) -+ if not self.check_is_master(): -+ return -+ -+ kube_cmd = "kubectl " -+ if path.exists('/etc/origin/master/admin.kubeconfig'): -+ kube_cmd += "--config=/etc/origin/master/admin.kubeconfig" -+ -+ kube_get_cmd = "get -o json " -+ for subcmd in ['version', 'config view']: -+ self.add_cmd_output('%s %s' % (kube_cmd, subcmd)) -+ -+ # get all namespaces in use -+ kn = self.get_command_output('%s get namespaces' % kube_cmd) -+ knsps = [n.split()[0] for n in kn['output'].splitlines()[1:] if n] -+ -+ resources = [ -+ 'limitrange', -+ 'pods', -+ 'pvc', -+ 'rc', -+ 'resourcequota', -+ 'services' -+ ] -+ -+ # nodes and pvs are not namespaced, must pull separately. -+ # Also collect master metrics -+ self.add_cmd_output([ -+ "{} get -o json nodes".format(kube_cmd), -+ "{} get -o json pv".format(kube_cmd), -+ "{} get --raw /metrics".format(kube_cmd) -+ ]) -+ -+ for n in knsps: -+ knsp = '--namespace=%s' % n -+ if self.get_option('all'): -+ k_cmd = '%s %s %s' % (kube_cmd, kube_get_cmd, knsp) -+ -+ self.add_cmd_output('%s events' % k_cmd) - -- for res in resources: -- self.add_cmd_output('%s %s' % (k_cmd, res)) -- -- if self.get_option('describe'): -- # need to drop json formatting for this -- k_cmd = '%s get %s' % (kube_cmd, knsp) -- for res in resources: -- r = self.get_command_output( -- '%s %s' % (k_cmd, res)) -- if r['status'] == 0: -- k_list = [k.split()[0] for k in -- r['output'].splitlines()[1:]] -- for k in k_list: -- k_cmd = '%s %s' % (kube_cmd, knsp) -- self.add_cmd_output( -- '%s describe %s %s' % (k_cmd, res, k)) -- -- if self.get_option('podlogs'): -- k_cmd = '%s %s' % (kube_cmd, knsp) -- r = self.get_command_output('%s get pods' % k_cmd) -- if r['status'] == 0: -- pods = [p.split()[0] for p in -- r['output'].splitlines()[1:]] -- for pod in pods: -- self.add_cmd_output('%s logs %s' % (k_cmd, pod)) -- -- if not self.get_option('all'): -- k_cmd = '%s get --all-namespaces=true' % kube_cmd - for res in resources: - self.add_cmd_output('%s %s' % (k_cmd, res)) - -+ if self.get_option('describe'): -+ # need to drop json formatting for this -+ k_cmd = '%s get %s' % (kube_cmd, knsp) -+ for res in resources: -+ r = self.get_command_output( -+ '%s %s' % (k_cmd, res)) -+ if r['status'] == 0: -+ k_list = [k.split()[0] for k in -+ r['output'].splitlines()[1:]] -+ for k in k_list: -+ k_cmd = '%s %s' % (kube_cmd, knsp) -+ self.add_cmd_output( -+ '%s describe %s %s' % (k_cmd, res, k)) -+ -+ if self.get_option('podlogs'): -+ k_cmd = '%s %s' % (kube_cmd, knsp) -+ r = self.get_command_output('%s get pods' % k_cmd) -+ if r['status'] == 0: -+ pods = [p.split()[0] for p in -+ r['output'].splitlines()[1:]] -+ for pod in pods: -+ self.add_cmd_output('%s logs %s' % (k_cmd, pod)) -+ -+ if not self.get_option('all'): -+ k_cmd = '%s get --all-namespaces=true' % kube_cmd -+ for res in resources: -+ self.add_cmd_output('%s %s' % (k_cmd, res)) -+ - def postproc(self): - # First, clear sensitive data from the json output collected. - # This will mask values when the "name" looks susceptible of --- -2.17.1 - - -From 63ad6c251ab88ab2f0e07ae9e3f1b2771d5e90ca Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker <jhunsake@redhat.com> -Date: Thu, 12 Jul 2018 13:07:34 -0400 -Subject: [PATCH 2/3] [kubernetes] Correct config option syntax - -Versions of kubernetes after 1.5 use --kubeconfig instead of --config to -specify a configuration file to use for kubectl commands. Update the -kubernetes plugin to use the proper syntax. - -Signed-off-by: Jake Hunsaker <jhunsake@redhat.com> ---- - sos/plugins/kubernetes.py | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/sos/plugins/kubernetes.py b/sos/plugins/kubernetes.py -index 21cb51df..c14e078e 100644 ---- a/sos/plugins/kubernetes.py -+++ b/sos/plugins/kubernetes.py -@@ -61,7 +61,7 @@ class kubernetes(Plugin, RedHatPlugin): - - kube_cmd = "kubectl " - if path.exists('/etc/origin/master/admin.kubeconfig'): -- kube_cmd += "--config=/etc/origin/master/admin.kubeconfig" -+ kube_cmd += "--kubeconfig=/etc/origin/master/admin.kubeconfig" - - kube_get_cmd = "get -o json " - for subcmd in ['version', 'config view']: --- -2.17.1 - - -From 46fffd469f4f3d07337dc335cfc24341e836f23b Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker <jhunsake@redhat.com> -Date: Thu, 12 Jul 2018 13:11:44 -0400 -Subject: [PATCH 3/3] [origin] Collect statistics information - -Adds collection of 'oc adm top' output for images and imagestreams. - -Resolves: #1165 -Closes: #1383 - -Signed-off-by: Jake Hunsaker <jhunsake@redhat.com> ---- - sos/plugins/origin.py | 26 ++++++++++++++++++++------ - 1 file changed, 20 insertions(+), 6 deletions(-) - -diff --git a/sos/plugins/origin.py b/sos/plugins/origin.py -index 02bc047a..0e384117 100644 ---- a/sos/plugins/origin.py -+++ b/sos/plugins/origin.py -@@ -124,14 +124,28 @@ class OpenShiftOrigin(Plugin): - # - # Note: Information about nodes, events, pods, and services - # is already collected by the Kubernetes plugin -+ -+ subcmds = [ -+ "describe projects", -+ "adm top images", -+ "adm top imagestreams" -+ ] -+ - self.add_cmd_output([ -- "%s describe projects" % oc_cmd_admin, -- "%s get -o json hostsubnet" % oc_cmd_admin, -- "%s get -o json clusternetwork" % oc_cmd_admin, -- "%s get -o json netnamespaces" % oc_cmd_admin, -- # Registry and router configs are typically here -- "%s get -o json dc -n default" % oc_cmd_admin, -+ '%s %s' % (oc_cmd_admin, subcmd) for subcmd in subcmds - ]) -+ -+ jcmds = [ -+ "hostsubnet", -+ "clusternetwork", -+ "netnamespaces", -+ "dc -n default" -+ ] -+ -+ self.add_cmd_output([ -+ '%s get -o json %s' % (oc_cmd_admin, jcmd) for jcmd in jcmds -+ ]) -+ - if self.get_option('diag'): - diag_cmd = "%s adm diagnostics -l 0" % oc_cmd_admin - if self.get_option('diag-prevent'): --- -2.17.1 - diff --git a/SOURCES/sos-bz1623070-pipe-returncode.patch b/SOURCES/sos-bz1623070-pipe-returncode.patch deleted file mode 100644 index 66c7c95..0000000 --- a/SOURCES/sos-bz1623070-pipe-returncode.patch +++ /dev/null @@ -1,37 +0,0 @@ -From 17bcd2bcdb8de4818b361582ac4d833ff324f4ff Mon Sep 17 00:00:00 2001 -From: "Bryn M. Reeves" <bmr@redhat.com> -Date: Mon, 10 Sep 2018 18:06:00 +0100 -Subject: [PATCH] [utilities] wait until AsyncReader p.poll() returns None - -On some systems the pipe used by the AsyncReader() class and the -sos_get_command_output() function may still be open at the time -the p.poll() call returns. At this time the command exit status -is undefined, leading to errors and collection failures for code -that tests the command's exit code. - -Wait explicitly until poll() returns None to avoid this. - -Resolves: #1417 - -Signed-off-by: Bryn M. Reeves <bmr@redhat.com> ---- - sos/utilities.py | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - -diff --git a/sos/utilities.py b/sos/utilities.py -index d112e15a..25e10429 100644 ---- a/sos/utilities.py -+++ b/sos/utilities.py -@@ -155,7 +155,8 @@ def sos_get_command_output(command, timeout=300, stderr=False, - - reader = AsyncReader(p.stdout, sizelimit, binary) - stdout = reader.get_contents() -- p.poll() -+ while p.poll() is None: -+ pass - - except OSError as e: - if e.errno == errno.ENOENT: --- -2.17.1 - diff --git a/SOURCES/sos-bz1624043-symlinks-not-copied.patch b/SOURCES/sos-bz1624043-symlinks-not-copied.patch deleted file mode 100644 index 8246ec8..0000000 --- a/SOURCES/sos-bz1624043-symlinks-not-copied.patch +++ /dev/null @@ -1,948 +0,0 @@ -From 2e07f7c4778145d4366476ecc4383d491458b541 Mon Sep 17 00:00:00 2001 -From: "Bryn M. Reeves" <bmr@redhat.com> -Date: Fri, 31 Aug 2018 12:50:24 +0100 -Subject: [PATCH 1/4] [sosreport] properly raise exceptions when --debug is - given - -OSError and IOError exceptions were not raised to the terminal -when --debug is in effect since they were silently caught in the -generic exception handler. - -Signed-off-by: Bryn M. Reeves <bmr@redhat.com> ---- - sos/sosreport.py | 5 ++++- - 1 file changed, 4 insertions(+), 1 deletion(-) - -diff --git a/sos/sosreport.py b/sos/sosreport.py -index 00c3e811..80633966 100644 ---- a/sos/sosreport.py -+++ b/sos/sosreport.py -@@ -995,7 +995,8 @@ class SoSReport(object): - print(" %s while setting up archive" % e.strerror) - print("") - else: -- raise e -+ print("Error setting up archive: %s" % e) -+ raise - except Exception as e: - self.ui_log.error("") - self.ui_log.error(" Unexpected exception setting up archive:") -@@ -1467,6 +1468,8 @@ class SoSReport(object): - return self.final_work() - - except (OSError): -+ if self.opts.debug: -+ raise - self._cleanup() - except (KeyboardInterrupt): - self.ui_log.error("\nExiting on user cancel") --- -2.17.1 - - -From c496d2bec8cae175faf986567e73d16d401d8564 Mon Sep 17 00:00:00 2001 -From: "Bryn M. Reeves" <bmr@redhat.com> -Date: Fri, 31 Aug 2018 12:52:38 +0100 -Subject: [PATCH 2/4] [archive] simplify FileCacheArchive.makedirs() - -Simplify the makedirs() method of FileCacheArchive and have it -bypass _check_path() and directly call os.makedirs(): a subsequent -patch will restrict the use of the method to setting up the sos_* -directories in the archive root. - -File, directory and other object type add_* methods will use a -new method that correctly handles symbolic links in intermediate -path components. - -Signed-off-by: Bryn M. Reeves <bmr@redhat.com> ---- - sos/archive.py | 8 ++++---- - 1 file changed, 4 insertions(+), 4 deletions(-) - -diff --git a/sos/archive.py b/sos/archive.py -index 5d99170f..ffa54036 100644 ---- a/sos/archive.py -+++ b/sos/archive.py -@@ -361,11 +361,11 @@ class FileCacheArchive(Archive): - return self._archive_root - - def makedirs(self, path, mode=0o700): -- dest = self._check_path(path, P_DIR) -- if not dest: -- return -+ """Create path, including leading components. - -- self._makedirs(self.dest_path(path)) -+ Used by sos.sosreport to set up sos_* directories. -+ """ -+ os.makedirs(os.path.join(self._archive_root, path), mode=mode) - self.log_debug("created directory at '%s' in FileCacheArchive '%s'" - % (path, self._archive_root)) - --- -2.17.1 - - -From ca422720b74181b2433473428e29e90af59b3cf8 Mon Sep 17 00:00:00 2001 -From: "Bryn M. Reeves" <bmr@redhat.com> -Date: Fri, 31 Aug 2018 12:55:51 +0100 -Subject: [PATCH 3/4] [archive] normalise dest_dir in - FileCacheArchive._check_path() - -Always set a valid dest_dir in _check_path() and do not assume -that it can be obtained by splitting the path: in the case of -a directory it is the unmodified 'dest' value. - -Signed-off-by: Bryn M. Reeves <bmr@redhat.com> ---- - sos/archive.py | 5 ++++- - 1 file changed, 4 insertions(+), 1 deletion(-) - -diff --git a/sos/archive.py b/sos/archive.py -index ffa54036..903cc672 100644 ---- a/sos/archive.py -+++ b/sos/archive.py -@@ -191,7 +191,10 @@ class FileCacheArchive(Archive): - copied now or `None` otherwise - """ - dest = dest or self.dest_path(src) -- dest_dir = os.path.split(dest)[0] -+ if path_type == P_DIR: -+ dest_dir = dest -+ else: -+ dest_dir = os.path.split(dest)[0] - if not dest_dir: - return dest - --- -2.17.1 - - -From 75d759066e8ee0a469abc37f48f7bfcdfe8182b5 Mon Sep 17 00:00:00 2001 -From: "Bryn M. Reeves" <bmr@redhat.com> -Date: Fri, 31 Aug 2018 12:58:01 +0100 -Subject: [PATCH 4/4] [archive] replace FileCacheArchive._makedirs() - -The Python os.makedirs() implementation is inadequate for sos's -needs: it will create leading directories given an intended path -destination, but it is not able to reflect cases where some of -the intermediate paths are actually symbolic links. - -Replace the use of os.makedirs() with a method that walks over -the path, and either creates directories, or symbolic links (and -their directory target) to better correspond with the content of -the host file system. - -This fixes a situation where two plugins can race in the archive, -leading to an exception in the plugin that runs last: - - - /foo/bar exists and is a link to /foo/bar.qux - - One plugin attempts to collect /foo/bar - - Another plugin attempts to collect a link /foo/qux -> /foo/bar/baz - -If the 2nd plugin happens to run first it will create the path -"/foo/bar" as a _directory_ (via _makedirs()). Since the archive -now checks for matching object types when a path collision occurs, -the first plugin will arrive at add_dir(), note that "/foo/bar" is -present and is not a symbolic link, and will raise an exception. - -Correct this by ensuring that whichever plugin executes first, the -correct link/directory path structure will be set up. - -Signed-off-by: Bryn M. Reeves <bmr@redhat.com> ---- - sos/archive.py | 72 ++++++++++++++++++++++++++++++++++++++++++++------ - 1 file changed, 64 insertions(+), 8 deletions(-) - -diff --git a/sos/archive.py b/sos/archive.py -index 903cc672..11afa7aa 100644 ---- a/sos/archive.py -+++ b/sos/archive.py -@@ -159,6 +159,67 @@ class FileCacheArchive(Archive): - name = name.lstrip(os.sep) - return (os.path.join(self._archive_root, name)) - -+ def _make_leading_paths(self, src, mode=0o700): -+ """Create leading path components -+ -+ The standard python `os.makedirs` is insufficient for our -+ needs: it will only create directories, and ignores the fact -+ that some path components may be symbolic links. -+ """ -+ self.log_debug("Making leading paths for %s" % src) -+ root = self._archive_root -+ -+ def in_archive(path): -+ """Test whether path ``path`` is inside the archive. -+ """ -+ return path.startswith(os.path.join(root, "")) -+ -+ if not src.startswith("/"): -+ # Sos archive path (sos_commands, sos_logs etc.) -+ src_dir = src -+ else: -+ # Host file path -+ src_dir = src if os.path.isdir(src) else os.path.split(src)[0] -+ -+ # Build a list of path components in root-to-leaf order. -+ path = src_dir -+ path_comps = [] -+ while path != '/' and path != '': -+ head, tail = os.path.split(path) -+ path_comps.append(tail) -+ path = head -+ path_comps.reverse() -+ -+ abs_path = root -+ rel_path = "" -+ -+ # Check and create components as needed -+ for comp in path_comps: -+ abs_path = os.path.join(abs_path, comp) -+ -+ if not in_archive(abs_path): -+ continue -+ -+ rel_path = os.path.join(rel_path, comp) -+ src_path = os.path.join("/", rel_path) -+ -+ if not os.path.exists(abs_path): -+ self.log_debug("Making path %s" % abs_path) -+ if os.path.islink(src_path) and os.path.isdir(src_path): -+ target = os.readlink(src_path) -+ abs_target = os.path.join(root, target) -+ -+ # Recursively create leading components of target -+ self._make_leading_paths(abs_target, mode=mode) -+ -+ self.log_debug("Making symlink '%s' -> '%s'" % -+ (abs_path, target)) -+ target = os.path.relpath(target) -+ os.symlink(target, abs_path) -+ else: -+ self.log_debug("Making directory %s" % abs_path) -+ os.mkdir(abs_path, mode) -+ - def _check_path(self, src, path_type, dest=None, force=False): - """Check a new destination path in the archive. - -@@ -203,7 +264,8 @@ class FileCacheArchive(Archive): - raise ValueError("path '%s' exists and is not a directory" % - dest_dir) - elif not os.path.exists(dest_dir): -- self._makedirs(dest_dir) -+ src_dir = src if path_type == P_DIR else os.path.split(src)[0] -+ self._make_leading_paths(src_dir) - - def is_special(mode): - return any([ -@@ -326,10 +388,7 @@ class FileCacheArchive(Archive): - - def add_dir(self, path): - with self._path_lock: -- dest = self._check_path(path, P_DIR) -- if not dest: -- return -- self.makedirs(path) -+ self._check_path(path, P_DIR) - - def add_node(self, path, mode, device): - dest = self._check_path(path, P_NODE) -@@ -347,9 +406,6 @@ class FileCacheArchive(Archive): - raise e - shutil.copystat(path, dest) - -- def _makedirs(self, path, mode=0o700): -- os.makedirs(path, mode) -- - def name_max(self): - if 'PC_NAME_MAX' in os.pathconf_names: - pc_name_max = os.pathconf_names['PC_NAME_MAX'] --- -2.17.1 - -From 5d6228b85e174dee8abcc4c206a1e9034242c6c6 Mon Sep 17 00:00:00 2001 -From: "Bryn M. Reeves" <bmr@redhat.com> -Date: Fri, 7 Sep 2018 12:06:34 -0400 -Subject: [PATCH 1/6] [sosreport] ensure ThreadPool exceptions are raised - -The ThreadPoolExecutor does not raise exceptions to the parent -thread immediately: it stores them in-line in the pool's results -list, and raises them to the caller on acccess to that slot in -the results iterator. - -Make sure that these exceptions are handled by iterating over all -results and asserting that they are non-None (in practice, this -code is never executed since the resulting raise will trap to an -exception handler, but it is less confusing than a bare 'pass'). - -Signed-off-by: Bryn M. Reeves <bmr@redhat.com> ---- - sos/sosreport.py | 8 ++++++-- - 1 file changed, 6 insertions(+), 2 deletions(-) - -diff --git a/sos/sosreport.py b/sos/sosreport.py -index 80633966..44be75a1 100644 ---- a/sos/sosreport.py -+++ b/sos/sosreport.py -@@ -1065,9 +1065,13 @@ class SoSReport(object): - try: - self.plugpool = ThreadPoolExecutor(self.opts.threads) - # Pass the plugpool its own private copy of self.pluglist -- self.plugpool.map(self._collect_plugin, list(self.pluglist), -- chunksize=1) -+ results = self.plugpool.map(self._collect_plugin, -+ list(self.pluglist), chunksize=1) - self.plugpool.shutdown(wait=True) -+ for res in results: -+ if not res: -+ self.soslog.debug("Unexpected plugin task result: %s" % -+ res) - self.ui_log.info("") - except KeyboardInterrupt: - # We may not be at a newline when the user issues Ctrl-C --- -2.17.1 - - -From 9aaba972bf6a42c33ea9bca80f07bfb880ba45a1 Mon Sep 17 00:00:00 2001 -From: "Bryn M. Reeves" <bmr@redhat.com> -Date: Fri, 7 Sep 2018 12:15:10 -0400 -Subject: [PATCH 2/6] [sosreport] trap directly to PDB in handle_exception() - -Now that plugins are run in a threadpool, it is not possible to -defer the call to pdb.post_mortem() to the top-level exception -handler in the main thread: this is due to the fact that in a pool, -exceptions are caught and saved to be re-raised to thread calling -the pool when results are returned. When the saved exception is -raised to the top-level handler the execution context it relates -to is gone: the backtrace and stack frame have been torn down and -only very limited information is available from the exception -frame. - -Instead, catch these exceptions _inside_ the thread pool context, -and directly trap to the Python debugger. This allows plugin code -to be debugged interactively with the full backtrace and with all -access to local variables and the execution stack. In addition, -this means that after the debugger has handled the exception it is -possible to return to the run and continue until report completion. - -One side effect of this change is that the *-plugin-errors.txt -file containng the backtrace is now written into the archive -whether or not --debug is given. - -Signed-off-by: Bryn M. Reeves <bmr@redhat.com> ---- - sos/sosreport.py | 9 ++++++++- - 1 file changed, 8 insertions(+), 1 deletion(-) - -diff --git a/sos/sosreport.py b/sos/sosreport.py -index 44be75a1..77ae7161 100644 ---- a/sos/sosreport.py -+++ b/sos/sosreport.py -@@ -30,6 +30,7 @@ from shutil import rmtree - import tempfile - import hashlib - from concurrent.futures import ThreadPoolExecutor, TimeoutError -+import pdb - - from sos import _sos as _ - from sos import __version__ -@@ -504,7 +505,13 @@ class SoSReport(object): - - def handle_exception(self, plugname=None, func=None): - if self.raise_plugins or self.exit_process: -- raise -+ # retrieve exception info for the current thread and stack. -+ (etype, val, tb) = sys.exc_info() -+ # we are NOT in interactive mode, print the exception... -+ traceback.print_exception(etype, val, tb, file=sys.stdout) -+ print_() -+ # ...then start the debugger in post-mortem mode. -+ pdb.post_mortem(tb) - if plugname and func: - self._log_plugin_exception(plugname, func) - --- -2.17.1 - - -From 0ea62d1ea57f41c1b75ccb83e69fdda386a7d280 Mon Sep 17 00:00:00 2001 -From: "Bryn M. Reeves" <bmr@redhat.com> -Date: Fri, 7 Sep 2018 13:00:52 -0400 -Subject: [PATCH 3/6] [Plugin] fix exception raise in Plugin._copy_dir() - -Use a naked 'raise' statement rather than raising the already caught -exception in _copy_dir(), so that the original stack and backtrace -are avaialable. - -Signed-off-by: Bryn M. Reeves <bmr@redhat.com> ---- - sos/plugins/__init__.py | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/sos/plugins/__init__.py b/sos/plugins/__init__.py -index 252de4d0..ac2c0bc8 100644 ---- a/sos/plugins/__init__.py -+++ b/sos/plugins/__init__.py -@@ -401,7 +401,7 @@ class Plugin(object): - msg = "Too many levels of symbolic links copying" - self._log_error("_copy_dir: %s '%s'" % (msg, srcpath)) - return -- raise e -+ raise - - def _get_dest_for_srcpath(self, srcpath): - if self.use_sysroot(): --- -2.17.1 - - -From d84c1cd6dedf51a8ed7b1a511585c0ac2db0f083 Mon Sep 17 00:00:00 2001 -From: "Bryn M. Reeves" <bmr@redhat.com> -Date: Wed, 5 Sep 2018 12:46:16 +0100 -Subject: [PATCH 4/6] [archive] fix leading path creation - -Fix the creation of leading path components for both paths that -contain intermediate components that are symbolic links (with both -absolute and relative targets), and those that contain only -directory components. - -Since symlinks may link to other files, and other symlinks, it is -necessary to handle these paths recursively and to include any -intermediate symlinked directories, or symlink targets in the set -of paths added to the archive. - -Related: #1404 - -Signed-off-by: Bryn M. Reeves <bmr@redhat.com> ---- - sos/archive.py | 41 ++++++++++++++++++++++++++++++++++------- - 1 file changed, 34 insertions(+), 7 deletions(-) - -diff --git a/sos/archive.py b/sos/archive.py -index 11afa7aa..c256a01f 100644 ---- a/sos/archive.py -+++ b/sos/archive.py -@@ -165,9 +165,24 @@ class FileCacheArchive(Archive): - The standard python `os.makedirs` is insufficient for our - needs: it will only create directories, and ignores the fact - that some path components may be symbolic links. -+ -+ :param src: The source path in the host file system for which -+ leading components should be created, or the path -+ to an sos_* virtual directory inside the archive. -+ -+ Host paths must be absolute (initial '/'), and -+ sos_* directory paths must be a path relative to -+ the root of the archive. -+ -+ :param mode: An optional mode to be used when creating path -+ components. -+ :returns: A rewritten destination path in the case that one -+ or more symbolic links in intermediate components -+ of the path have altered the path destination. - """ - self.log_debug("Making leading paths for %s" % src) - root = self._archive_root -+ dest = src - - def in_archive(path): - """Test whether path ``path`` is inside the archive. -@@ -191,34 +206,42 @@ class FileCacheArchive(Archive): - path_comps.reverse() - - abs_path = root -- rel_path = "" -+ src_path = "/" - - # Check and create components as needed - for comp in path_comps: - abs_path = os.path.join(abs_path, comp) - -+ # Do not create components that are above the archive root. - if not in_archive(abs_path): - continue - -- rel_path = os.path.join(rel_path, comp) -- src_path = os.path.join("/", rel_path) -+ src_path = os.path.join(src_path, comp) - - if not os.path.exists(abs_path): - self.log_debug("Making path %s" % abs_path) - if os.path.islink(src_path) and os.path.isdir(src_path): - target = os.readlink(src_path) -- abs_target = os.path.join(root, target) -+ -+ # The directory containing the source in the host fs, -+ # adjusted for the current level of path creation. -+ target_dir = os.path.split(src_path)[0] -+ -+ # The source path of the target in the host fs to be -+ # recursively copied. -+ target_src = os.path.join(target_dir, target) - - # Recursively create leading components of target -- self._make_leading_paths(abs_target, mode=mode) -+ dest = self._make_leading_paths(target_src, mode=mode) -+ dest = os.path.normpath(dest) - - self.log_debug("Making symlink '%s' -> '%s'" % - (abs_path, target)) -- target = os.path.relpath(target) - os.symlink(target, abs_path) - else: - self.log_debug("Making directory %s" % abs_path) - os.mkdir(abs_path, mode) -+ return dest - - def _check_path(self, src, path_type, dest=None, force=False): - """Check a new destination path in the archive. -@@ -259,13 +282,17 @@ class FileCacheArchive(Archive): - if not dest_dir: - return dest - -+ # Preserve destination basename for rewritten dest_dir -+ dest_name = os.path.split(src)[1] -+ - # Check containing directory presence and path type - if os.path.exists(dest_dir) and not os.path.isdir(dest_dir): - raise ValueError("path '%s' exists and is not a directory" % - dest_dir) - elif not os.path.exists(dest_dir): - src_dir = src if path_type == P_DIR else os.path.split(src)[0] -- self._make_leading_paths(src_dir) -+ src_dir = self._make_leading_paths(src_dir) -+ dest = self.dest_path(os.path.join(src_dir, dest_name)) - - def is_special(mode): - return any([ --- -2.17.1 - - -From 322f4a517ae336cc1443f9a399a0d15d45ec48b9 Mon Sep 17 00:00:00 2001 -From: "Bryn M. Reeves" <bmr@redhat.com> -Date: Fri, 7 Sep 2018 13:11:03 -0400 -Subject: [PATCH 5/6] [archive] add link follow-up to - FileCacheArchive.add_link() - -Creating a link may trigger further actions in the archive: if the -link target is a regular file, we must copy that file into the -archive, and if the target is a symbolic link, then we must create -that link, and copy in the link target. - -Handle this by calling add_file() or (recursively) add_link() in -order to create the missing pieces of the symlink chain. - -These operations must take place outside of the path lock since -they do not modify the archive namespace and will call methods of -the Archive object that will attempt to re-acquire this lock. - -Resolves: #1404 - -Signed-off-by: Bryn M. Reeves <bmr@redhat.com> ---- - sos/archive.py | 38 +++++++++++++++++++++++++++++++++++--- - 1 file changed, 35 insertions(+), 3 deletions(-) - -diff --git a/sos/archive.py b/sos/archive.py -index c256a01f..6db398fc 100644 ---- a/sos/archive.py -+++ b/sos/archive.py -@@ -403,6 +403,7 @@ class FileCacheArchive(Archive): - % (dest, self._archive_root)) - - def add_link(self, source, link_name): -+ self.log_debug("adding symlink at '%s' -> '%s'" % (link_name, source)) - with self._path_lock: - dest = self._check_path(link_name, P_LINK) - if not dest: -@@ -410,10 +411,41 @@ class FileCacheArchive(Archive): - - if not os.path.lexists(dest): - os.symlink(source, dest) -- self.log_debug("added symlink at '%s' to '%s' in archive '%s'" -- % (dest, source, self._archive_root)) -+ self.log_debug("added symlink at '%s' to '%s' in archive '%s'" -+ % (dest, source, self._archive_root)) -+ -+ # Follow-up must be outside the path lock: we recurse into -+ # other monitor methods that will attempt to reacquire it. -+ -+ source_dir = os.path.dirname(link_name) -+ host_source = os.path.join(source_dir, source) -+ if not os.path.exists(self.dest_path(host_source)): -+ if os.path.islink(host_source): -+ link_dir = os.path.dirname(link_name) -+ link_name = os.path.normpath(os.path.join(link_dir, source)) -+ dest_dir = os.path.dirname(link_name) -+ source = os.path.join(dest_dir, os.readlink(link_name)) -+ source = os.path.relpath(source) -+ self.log_debug("Adding link %s -> %s for link follow up" % -+ (link_name, source)) -+ self.add_link(source, link_name) -+ elif os.path.isdir(host_source): -+ self.log_debug("Adding dir %s for link follow up" % source) -+ self.add_dir(host_source) -+ elif os.path.isfile(host_source): -+ self.log_debug("Adding file %s for link follow up" % source) -+ self.add_file(host_source) -+ else: -+ self.log_debug("No link follow up: source=%s link_name=%s" % -+ (source, link_name)) - -- def add_dir(self, path): -+ -+ def add_dir(self, path, copy=False): -+ """Create a directory in the archive. -+ -+ :param path: the path in the host file system to add -+ """ -+ # Establish path structure - with self._path_lock: - self._check_path(path, P_DIR) - --- -2.17.1 - - -From 6e79c4b4a4f32fa549708dbb8c8b9af73ab8ff61 Mon Sep 17 00:00:00 2001 -From: "Bryn M. Reeves" <bmr@redhat.com> -Date: Mon, 10 Sep 2018 16:33:33 +0100 -Subject: [PATCH 6/6] [archive] remove unused 'copy' arg from - FileCacheArchive.add_dir() - -Signed-off-by: Bryn M. Reeves <bmr@redhat.com> ---- - sos/archive.py | 3 +-- - 1 file changed, 1 insertion(+), 2 deletions(-) - -diff --git a/sos/archive.py b/sos/archive.py -index 6db398fc..4b30630b 100644 ---- a/sos/archive.py -+++ b/sos/archive.py -@@ -439,8 +439,7 @@ class FileCacheArchive(Archive): - self.log_debug("No link follow up: source=%s link_name=%s" % - (source, link_name)) - -- -- def add_dir(self, path, copy=False): -+ def add_dir(self, path): - """Create a directory in the archive. - - :param path: the path in the host file system to add --- -2.17.1 - -From 919e8671a6ab9684d59525eb7f3607b3aab08ee1 Mon Sep 17 00:00:00 2001 -From: "Bryn M. Reeves" <bmr@redhat.com> -Date: Tue, 11 Sep 2018 12:16:57 -0400 -Subject: [PATCH] [archive] fix link rewriting logic in - FileCacheArchive.add_link() - -When processing link follow up for an original symbolic link, the -add_link() logic incorrectly used the _original_ host link name, -rather than the to-be-created name when calculating relative path -structures. If the prior link is at a greater or lesser level of -directory nesting this will lead to broken relative links in the -archive (one level too high or too low). - -In some cases (systemd) this behaviour was masked due to the fact -that identically named links exist at multiple levels of the path -hierarchy. - -Signed-off-by: Bryn M. Reeves <bmr@redhat.com> ---- - sos/archive.py | 30 +++++++++++++++++++----------- - 1 file changed, 19 insertions(+), 11 deletions(-) - -diff --git a/sos/archive.py b/sos/archive.py -index 528cfa576..7a7717de7 100644 ---- a/sos/archive.py -+++ b/sos/archive.py -@@ -417,27 +417,35 @@ def add_link(self, source, link_name): - # Follow-up must be outside the path lock: we recurse into - # other monitor methods that will attempt to reacquire it. - -+ self.log_debug("Link follow up: source=%s link_name=%s dest=%s" % -+ (source, link_name, dest)) -+ - source_dir = os.path.dirname(link_name) -- host_source = os.path.join(source_dir, source) -- if not os.path.exists(self.dest_path(host_source)): -- if os.path.islink(host_source): -- link_dir = os.path.dirname(link_name) -- link_name = os.path.normpath(os.path.join(link_dir, source)) -+ host_path_name = os.path.normpath(os.path.join(source_dir, source)) -+ dest_path_name = self.dest_path(host_path_name) -+ -+ if not os.path.exists(dest_path_name): -+ if os.path.islink(host_path_name): -+ # Normalised path for the new link_name -+ link_name = host_path_name -+ # Containing directory for the new link - dest_dir = os.path.dirname(link_name) -- source = os.path.join(dest_dir, os.readlink(link_name)) -- source = os.path.relpath(source) -+ # Relative source path of the new link -+ source = os.path.join(dest_dir, os.readlink(host_path_name)) -+ source = os.path.relpath(source, dest_dir) - self.log_debug("Adding link %s -> %s for link follow up" % - (link_name, source)) - self.add_link(source, link_name) -- elif os.path.isdir(host_source): -+ elif os.path.isdir(host_path_name): - self.log_debug("Adding dir %s for link follow up" % source) -- self.add_dir(host_source) -- elif os.path.isfile(host_source): -+ self.add_dir(host_path_name) -+ elif os.path.isfile(host_path_name): - self.log_debug("Adding file %s for link follow up" % source) -- self.add_file(host_source) -+ self.add_file(host_path_name) - else: - self.log_debug("No link follow up: source=%s link_name=%s" % - (source, link_name)) -+ self.log_debug("leaving add_link()") - - def add_dir(self, path): - """Create a directory in the archive. -From c065be9715dc845b6411a9a0b2d6171bbeb1c390 Mon Sep 17 00:00:00 2001 -From: "Bryn M. Reeves" <bmr@redhat.com> -Date: Wed, 12 Sep 2018 12:02:33 +0100 -Subject: [PATCH] [plugin] canonicalize link target path in - Plugin._copy_symlink() - -Since we may be dealing with paths that contain intermediate -symlinked directories, it is necessary to canonicalize the path -for the link target in order to eliminate additional levels of -symbolic links, and to calculate the correct relative path to -use within the archive. - -Related: #1404 - -Signed-off-by: Bryn M. Reeves <bmr@redhat.com> ---- - sos/plugins/__init__.py | 5 ++++- - 1 file changed, 4 insertions(+), 1 deletion(-) - -diff --git a/sos/plugins/__init__.py b/sos/plugins/__init__.py -index ac2c0bc8c..7d011a02c 100644 ---- a/sos/plugins/__init__.py -+++ b/sos/plugins/__init__.py -@@ -353,7 +353,10 @@ def _copy_symlink(self, srcpath): - absdest = os.path.normpath(dest) - # adjust the target used inside the report to always be relative - if os.path.isabs(linkdest): -- reldest = os.path.relpath(linkdest, os.path.dirname(srcpath)) -+ # Canonicalize the link target path to avoid additional levels -+ # of symbolic links (that would affect the path nesting level). -+ realdir = os.path.realpath(os.path.dirname(srcpath)) -+ reldest = os.path.relpath(linkdest, start=realdir) - # trim leading /sysroot - if self.use_sysroot(): - reldest = reldest[len(os.sep + os.pardir):] -From 868966cd9dbb96ce3635d884e67e738b18658140 Mon Sep 17 00:00:00 2001 -From: "Bryn M. Reeves" <bmr@redhat.com> -Date: Wed, 12 Sep 2018 16:11:07 +0100 -Subject: [PATCH] [archive] canonicalise paths for link follow up - -Ensure that the canonical path is used when processing link follow -up actions: the actual link path may contain one or more levels of -symbolic links, leading to broken links if the link target path is -assumed to be relative to the containing directory. - -Signed-off-by: Bryn M. Reeves <bmr@redhat.com> ---- - sos/archive.py | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/sos/archive.py b/sos/archive.py -index 7a7717de7..483d66f4f 100644 ---- a/sos/archive.py -+++ b/sos/archive.py -@@ -421,7 +421,7 @@ def add_link(self, source, link_name): - (source, link_name, dest)) - - source_dir = os.path.dirname(link_name) -- host_path_name = os.path.normpath(os.path.join(source_dir, source)) -+ host_path_name = os.path.realpath(os.path.join(source_dir, source)) - dest_path_name = self.dest_path(host_path_name) - - if not os.path.exists(dest_path_name): -From 8e60e299cdfb0027d6b6ea845234ef54ae785186 Mon Sep 17 00:00:00 2001 -From: "Bryn M. Reeves" <bmr@redhat.com> -Date: Thu, 13 Sep 2018 16:14:12 +0100 -Subject: [PATCH 1/2] [archive, plugin] avoid recursing on symbolic link loops - -It's possible that symlink loops exist in the host file system, -either 'simple' ('a'->'a'), or indirect ('a'->'b'->'a'). We need -to avoid recursing on these loops, to avoid exceeding the maximum -link or recursion depths, but we should still represent these -inodes as accurately as possible in the resulting archive. - -Detect loops in both the Plugin link handling code and in the new -Archive link follow-up code by creating the first requested level -of loop, and then skipping the recursive follow-up. This means -that the looping links are still created in the archive so long -as they are referenced in a copy spec but that we do not attempt -to indefinitely recurse while collecting them. - -Resolves: #1430 - -Signed-off-by: Bryn M. Reeves <bmr@redhat.com> ---- - sos/archive.py | 27 +++++++++++++++++++++++++++ - sos/plugins/__init__.py | 20 +++++++++++++++----- - 2 files changed, 42 insertions(+), 5 deletions(-) - -diff --git a/sos/archive.py b/sos/archive.py -index 483d66f4..e5819432 100644 ---- a/sos/archive.py -+++ b/sos/archive.py -@@ -424,6 +424,29 @@ class FileCacheArchive(Archive): - host_path_name = os.path.realpath(os.path.join(source_dir, source)) - dest_path_name = self.dest_path(host_path_name) - -+ def is_loop(link_name, source): -+ """Return ``True`` if the symbolic link ``link_name`` is part -+ of a file system loop, or ``False`` otherwise. -+ """ -+ link_dir = os.path.dirname(link_name) -+ if not os.path.isabs(source): -+ source = os.path.realpath(os.path.join(link_dir, source)) -+ link_name = os.path.realpath(link_name) -+ -+ # Simple a -> a loop -+ if link_name == source: -+ return True -+ -+ # Find indirect loops (a->b-a) by stat()ing the first step -+ # in the symlink chain -+ try: -+ os.stat(link_name) -+ except OSError as e: -+ if e.errno == 40: -+ return True -+ raise -+ return False -+ - if not os.path.exists(dest_path_name): - if os.path.islink(host_path_name): - # Normalised path for the new link_name -@@ -433,6 +456,10 @@ class FileCacheArchive(Archive): - # Relative source path of the new link - source = os.path.join(dest_dir, os.readlink(host_path_name)) - source = os.path.relpath(source, dest_dir) -+ if is_loop(link_name, source): -+ self.log_debug("Link '%s' - '%s' loops: skipping..." % -+ (link_name, source)) -+ return - self.log_debug("Adding link %s -> %s for link follow up" % - (link_name, source)) - self.add_link(source, link_name) -diff --git a/sos/plugins/__init__.py b/sos/plugins/__init__.py -index 7d011a02..7d2a8b2d 100644 ---- a/sos/plugins/__init__.py -+++ b/sos/plugins/__init__.py -@@ -376,6 +376,21 @@ class Plugin(object): - self._log_debug("link '%s' is a directory, skipping..." % linkdest) - return - -+ self.copied_files.append({'srcpath': srcpath, -+ 'dstpath': dstpath, -+ 'symlink': "yes", -+ 'pointsto': linkdest}) -+ -+ # Check for indirect symlink loops by stat()ing the next step -+ # in the link chain. -+ try: -+ os.stat(absdest) -+ except OSError as e: -+ if e.errno == 40: -+ self._log_debug("link '%s' is part of a file system " -+ "loop, skipping target..." % dstpath) -+ return -+ - # copy the symlink target translating relative targets - # to absolute paths to pass to _do_copy_path. - self._log_debug("normalized link target '%s' as '%s'" -@@ -388,11 +403,6 @@ class Plugin(object): - self._log_debug("link '%s' points to itself, skipping target..." - % linkdest) - -- self.copied_files.append({'srcpath': srcpath, -- 'dstpath': dstpath, -- 'symlink': "yes", -- 'pointsto': linkdest}) -- - def _copy_dir(self, srcpath): - try: - for afile in os.listdir(srcpath): --- -2.17.1 - - -From e108d7c03834446f8dac66ad69f5eade4f2c5fce Mon Sep 17 00:00:00 2001 -From: Pavel Moravec <pmoravec@redhat.com> -Date: Fri, 14 Sep 2018 10:42:07 +0200 -Subject: [PATCH 2/2] [archive] fix and simplify directory destination - rewriting - -Rewriting of the destination path by _make_leading_paths() only -applies when creating intermediate path components that are a -symbolic link. The final level of path creation must always be -a directory, and the destination is always the absolute path to -that directory. - -Always return the directory path when creating a new directory, -and do not attempt to rewrite the destination at the top level -in FileCacheArchive._check_path() since all intermediate links -have already been handled inside _make_leading_paths() (i.e. -the returned/rewritten destination is always equal to the path -that was passed into the function). - -Resolves: #1432 - -Signed-off-by: Pavel Moravec <pmoravec@redhat.com> -Signed-off-by: Bryn M. Reeves <bmr@redhat.com> ---- - sos/archive.py | 8 +++----- - 1 file changed, 3 insertions(+), 5 deletions(-) - -diff --git a/sos/archive.py b/sos/archive.py -index e5819432..b02b75f7 100644 ---- a/sos/archive.py -+++ b/sos/archive.py -@@ -241,6 +241,8 @@ class FileCacheArchive(Archive): - else: - self.log_debug("Making directory %s" % abs_path) - os.mkdir(abs_path, mode) -+ dest = src_path -+ - return dest - - def _check_path(self, src, path_type, dest=None, force=False): -@@ -282,17 +284,13 @@ class FileCacheArchive(Archive): - if not dest_dir: - return dest - -- # Preserve destination basename for rewritten dest_dir -- dest_name = os.path.split(src)[1] -- - # Check containing directory presence and path type - if os.path.exists(dest_dir) and not os.path.isdir(dest_dir): - raise ValueError("path '%s' exists and is not a directory" % - dest_dir) - elif not os.path.exists(dest_dir): - src_dir = src if path_type == P_DIR else os.path.split(src)[0] -- src_dir = self._make_leading_paths(src_dir) -- dest = self.dest_path(os.path.join(src_dir, dest_name)) -+ self._make_leading_paths(src_dir) - - def is_special(mode): - return any([ --- -2.17.1 - diff --git a/SOURCES/sos-bz1626159-atomic-attribute-error.patch b/SOURCES/sos-bz1626159-atomic-attribute-error.patch deleted file mode 100644 index 035c892..0000000 --- a/SOURCES/sos-bz1626159-atomic-attribute-error.patch +++ /dev/null @@ -1,60 +0,0 @@ -From 4440c9094d853a452cbff6f9801fc7d47352e9b4 Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker <jhunsake@redhat.com> -Date: Thu, 6 Sep 2018 13:56:20 -0400 -Subject: [PATCH] [atomic] Define valid preset for RHEL Atomic - -Defines an 'atomic' preset for use with the RedHatAtomic policy for RHEL -Atomic Host. Fixes sos being unable to run due to the preset probe -returning a string rather than a preset. - -Resolves: #1418 - -Signed-off-by: Jake Hunsaker <jhunsake@redhat.com> -Signed-off-by: Bryn M. Reeves <bmr@redhat.com> ---- - sos/policies/redhat.py | 15 ++++++++++++++- - 1 file changed, 14 insertions(+), 1 deletion(-) - -diff --git a/sos/policies/redhat.py b/sos/policies/redhat.py -index b494de3c..e1e417f3 100644 ---- a/sos/policies/redhat.py -+++ b/sos/policies/redhat.py -@@ -325,6 +325,12 @@ No changes will be made to system configuration. - - ATOMIC = "atomic" - ATOMIC_RELEASE_STR = "Atomic" -+ATOMIC_DESC = "Red Hat Enterprise Linux Atomic Host" -+ -+atomic_presets = { -+ ATOMIC: PresetDefaults(name=ATOMIC, desc=ATOMIC_DESC, note=NOTE_TIME, -+ opts=_opts_verify) -+} - - - class RedHatAtomicPolicy(RHELPolicy): -@@ -347,6 +353,10 @@ organization before being passed to any third party. - %(vendor_text)s - """) - -+ def __init__(self, sysroot=None): -+ super(RedHatAtomicPolicy, self).__init__(sysroot=sysroot) -+ self.register_presets(atomic_presets) -+ - @classmethod - def check(cls): - atomic = False -@@ -363,7 +373,10 @@ organization before being passed to any third party. - return atomic - - def probe_preset(self): -- return ATOMIC -+ if self.pkg_by_name('atomic-openshift'): -+ return self.find_preset(RHOCP) -+ -+ return self.find_preset(ATOMIC) - - - class FedoraPolicy(RedHatPolicy): --- -2.17.1 - diff --git a/SOURCES/sos-bz1636093-openstack-relax-enabling-plugins.patch b/SOURCES/sos-bz1636093-openstack-relax-enabling-plugins.patch deleted file mode 100644 index 283f844..0000000 --- a/SOURCES/sos-bz1636093-openstack-relax-enabling-plugins.patch +++ /dev/null @@ -1,424 +0,0 @@ -From 9b3d0b7d8732f53dbbd5e02182a9b0a0e1d6d249 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec <pmoravec@redhat.com> -Date: Fri, 31 Aug 2018 17:19:32 +0200 -Subject: [PATCH 1/2] [openstack_nova] remove too restrictive check_enabled - -Enable the plugin just based on package presence. - -Resolves: #1411 - -Signed-off-by: Pavel Moravec <pmoravec@redhat.com> ---- - sos/plugins/openstack_nova.py | 8 -------- - 1 file changed, 8 deletions(-) - -diff --git a/sos/plugins/openstack_nova.py b/sos/plugins/openstack_nova.py -index b041a59a..77c3b49a 100644 ---- a/sos/plugins/openstack_nova.py -+++ b/sos/plugins/openstack_nova.py -@@ -200,10 +200,6 @@ class DebianNova(OpenStackNova, DebianPlugin, UbuntuPlugin): - 'python-novnc' - ) - -- def check_enabled(self): -- self.nova = self.is_installed("nova-common") -- return self.nova -- - def setup(self): - super(DebianNova, self).setup() - self.add_copy_spec([ -@@ -233,10 +229,6 @@ class RedHatNova(OpenStackNova, RedHatPlugin): - 'novnc' - ) - -- def check_enabled(self): -- self.nova = self.is_installed("openstack-nova-common") -- return self.nova -- - def setup(self): - super(RedHatNova, self).setup() - self.add_copy_spec([ --- -2.17.1 - - -From f8ee9c4b87c6c3b8aa2bda3425f0e53499515363 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec <pmoravec@redhat.com> -Date: Fri, 31 Aug 2018 20:04:47 +0200 -Subject: [PATCH 2/2] [openstack_*] relax enabling of OSP RedHat plugins - -Allow automatic enabling of OSP packages also on containerized -environment. - -Relevant to: #1411 - -Signed-off-by: Pavel Moravec <pmoravec@redhat.com> -Signed-off-by: Bryn M. Reeves <bmr@redhat.com> ---- - sos/plugins/openstack_aodh.py | 8 +------- - sos/plugins/openstack_ceilometer.py | 10 +--------- - sos/plugins/openstack_cinder.py | 12 +----------- - sos/plugins/openstack_glance.py | 5 +---- - sos/plugins/openstack_heat.py | 10 +--------- - sos/plugins/openstack_horizon.py | 5 +---- - sos/plugins/openstack_instack.py | 14 +------------- - sos/plugins/openstack_ironic.py | 6 +----- - sos/plugins/openstack_keystone.py | 7 +------ - sos/plugins/openstack_manila.py | 9 +-------- - sos/plugins/openstack_neutron.py | 21 +-------------------- - sos/plugins/openstack_nova.py | 18 +----------------- - sos/plugins/openstack_octavia.py | 13 +++++++++++-- - sos/plugins/openstack_sahara.py | 7 +------ - sos/plugins/openstack_swift.py | 10 +--------- - sos/plugins/openstack_trove.py | 2 +- - 16 files changed, 26 insertions(+), 131 deletions(-) - -diff --git a/sos/plugins/openstack_aodh.py b/sos/plugins/openstack_aodh.py -index 9fcdf932..2c9057a6 100644 ---- a/sos/plugins/openstack_aodh.py -+++ b/sos/plugins/openstack_aodh.py -@@ -18,13 +18,7 @@ class OpenStackAodh(Plugin, RedHatPlugin): - plugin_name = "openstack_aodh" - profiles = ('openstack', 'openstack_controller') - -- packages = ( -- 'openstack-aodh-api', -- 'openstack-aodh-listener', -- 'openstack-aodh-notifier', -- 'openstack-aodh-evaluator,' -- 'openstack-aodh-common' -- ) -+ packages = ('openstack-selinux',) - - requires_root = False - -diff --git a/sos/plugins/openstack_ceilometer.py b/sos/plugins/openstack_ceilometer.py -index 3bdd74c8..bb89fa68 100644 ---- a/sos/plugins/openstack_ceilometer.py -+++ b/sos/plugins/openstack_ceilometer.py -@@ -86,15 +86,7 @@ class DebianCeilometer(OpenStackCeilometer, DebianPlugin, - - class RedHatCeilometer(OpenStackCeilometer, RedHatPlugin): - -- packages = ( -- 'openstack-ceilometer', -- 'openstack-ceilometer-api', -- 'openstack-ceilometer-central', -- 'openstack-ceilometer-collector', -- 'openstack-ceilometer-common', -- 'openstack-ceilometer-compute', -- 'python-ceilometerclient' -- ) -+ packages = ('openstack-selinux',) - - def setup(self): - super(RedHatCeilometer, self).setup() -diff --git a/sos/plugins/openstack_cinder.py b/sos/plugins/openstack_cinder.py -index f097fd5b..4fa753c4 100644 ---- a/sos/plugins/openstack_cinder.py -+++ b/sos/plugins/openstack_cinder.py -@@ -130,10 +130,6 @@ class DebianCinder(OpenStackCinder, DebianPlugin, UbuntuPlugin): - 'python-cinderclient' - ) - -- def check_enabled(self): -- self.cinder = self.is_installed("cinder-common") -- return self.cinder -- - def setup(self): - super(DebianCinder, self).setup() - -@@ -141,13 +137,7 @@ class DebianCinder(OpenStackCinder, DebianPlugin, UbuntuPlugin): - class RedHatCinder(OpenStackCinder, RedHatPlugin): - - cinder = False -- packages = ('openstack-cinder', -- 'python-cinder', -- 'python-cinderclient') -- -- def check_enabled(self): -- self.cinder = self.is_installed("openstack-cinder") -- return self.cinder -+ packages = ('openstack-selinux',) - - def setup(self): - super(RedHatCinder, self).setup() -diff --git a/sos/plugins/openstack_glance.py b/sos/plugins/openstack_glance.py -index fa68dd8e..bfb5f9fe 100644 ---- a/sos/plugins/openstack_glance.py -+++ b/sos/plugins/openstack_glance.py -@@ -130,9 +130,6 @@ class DebianGlance(OpenStackGlance, DebianPlugin, UbuntuPlugin): - - class RedHatGlance(OpenStackGlance, RedHatPlugin): - -- packages = ( -- 'openstack-glance', -- 'python-glanceclient' -- ) -+ packages = ('openstack-selinux',) - - # vim: set et ts=4 sw=4 : -diff --git a/sos/plugins/openstack_heat.py b/sos/plugins/openstack_heat.py -index 26f3f511..1dab72d0 100644 ---- a/sos/plugins/openstack_heat.py -+++ b/sos/plugins/openstack_heat.py -@@ -152,14 +152,6 @@ class DebianHeat(OpenStackHeat, DebianPlugin, UbuntuPlugin): - - class RedHatHeat(OpenStackHeat, RedHatPlugin): - -- packages = ( -- 'openstack-heat-api', -- 'openstack-heat-api-cfn', -- 'openstack-heat-api-cloudwatch', -- 'openstack-heat-cli', -- 'openstack-heat-common', -- 'openstack-heat-engine', -- 'python-heatclient' -- ) -+ packages = ('openstack-selinux',) - - # vim: set et ts=4 sw=4 : -diff --git a/sos/plugins/openstack_horizon.py b/sos/plugins/openstack_horizon.py -index 677a7c28..4299d8db 100644 ---- a/sos/plugins/openstack_horizon.py -+++ b/sos/plugins/openstack_horizon.py -@@ -103,10 +103,7 @@ class UbuntuHorizon(OpenStackHorizon, UbuntuPlugin): - - class RedHatHorizon(OpenStackHorizon, RedHatPlugin): - -- packages = ( -- 'python-django-horizon', -- 'openstack-dashboard' -- ) -+ packages = ('openstack-selinux',) - - def setup(self): - super(RedHatHorizon, self).setup() -diff --git a/sos/plugins/openstack_instack.py b/sos/plugins/openstack_instack.py -index cf90003e..37a75e02 100644 ---- a/sos/plugins/openstack_instack.py -+++ b/sos/plugins/openstack_instack.py -@@ -125,19 +125,7 @@ class OpenStackInstack(Plugin): - - class RedHatRDOManager(OpenStackInstack, RedHatPlugin): - -- packages = [ -- 'instack', -- 'instack-undercloud', -- 'openstack-tripleo', -- 'openstack-tripleo-common', -- 'openstack-tripleo-heat-templates', -- 'openstack-tripleo-image-elements', -- 'openstack-tripleo-puppet-elements', -- 'openstack-tripleo-ui', -- 'openstack-tripleo-validations', -- 'puppet-tripleo', -- 'python-tripleoclient' -- ] -+ packages = ('openstack-selinux',) - - def setup(self): - super(RedHatRDOManager, self).setup() -diff --git a/sos/plugins/openstack_ironic.py b/sos/plugins/openstack_ironic.py -index b4cdee6d..84055b67 100644 ---- a/sos/plugins/openstack_ironic.py -+++ b/sos/plugins/openstack_ironic.py -@@ -118,11 +118,7 @@ class DebianIronic(OpenStackIronic, DebianPlugin, UbuntuPlugin): - - class RedHatIronic(OpenStackIronic, RedHatPlugin): - -- packages = [ -- 'openstack-ironic-api', -- 'openstack-ironic-common', -- 'openstack-ironic-conductor', -- ] -+ packages = ('openstack-selinux',) - - discoverd_packages = [ - 'openstack-ironic-discoverd', -diff --git a/sos/plugins/openstack_keystone.py b/sos/plugins/openstack_keystone.py -index a6b1360f..76e4b380 100644 ---- a/sos/plugins/openstack_keystone.py -+++ b/sos/plugins/openstack_keystone.py -@@ -118,12 +118,7 @@ class DebianKeystone(OpenStackKeystone, DebianPlugin, UbuntuPlugin): - - class RedHatKeystone(OpenStackKeystone, RedHatPlugin): - -- packages = ( -- 'openstack-keystone', -- 'python-keystone', -- 'python-django-openstack-auth', -- 'python-keystoneclient' -- ) -+ packages = ('openstack-selinux',) - - def setup(self): - super(RedHatKeystone, self).setup() -diff --git a/sos/plugins/openstack_manila.py b/sos/plugins/openstack_manila.py -index ef926cda..e6409d00 100644 ---- a/sos/plugins/openstack_manila.py -+++ b/sos/plugins/openstack_manila.py -@@ -85,14 +85,7 @@ class DebianManila(OpenStackManila, DebianPlugin, UbuntuPlugin): - class RedHatManila(OpenStackManila, RedHatPlugin): - """OpenStackManila related information for Red Hat distributions.""" - -- packages = ( -- 'puppet-manila', -- 'openstack-manila', -- 'openstack-manila-share', -- 'python-manila', -- 'python-manilaclient', -- 'python-manila-tests' -- ) -+ packages = ('openstack-selinux',) - - def setup(self): - super(RedHatManila, self).setup() -diff --git a/sos/plugins/openstack_neutron.py b/sos/plugins/openstack_neutron.py -index a5134c9f..9ae741f3 100644 ---- a/sos/plugins/openstack_neutron.py -+++ b/sos/plugins/openstack_neutron.py -@@ -120,26 +120,7 @@ class DebianNeutron(OpenStackNeutron, DebianPlugin, UbuntuPlugin): - - class RedHatNeutron(OpenStackNeutron, RedHatPlugin): - -- packages = [ -- 'openstack-neutron', -- 'openstack-neutron-linuxbridge' -- 'openstack-neutron-metaplugin', -- 'openstack-neutron-openvswitch', -- 'openstack-neutron-bigswitch', -- 'openstack-neutron-brocade', -- 'openstack-neutron-cisco', -- 'openstack-neutron-hyperv', -- 'openstack-neutron-midonet', -- 'openstack-neutron-nec' -- 'openstack-neutron-nicira', -- 'openstack-neutron-plumgrid', -- 'openstack-neutron-ryu', -- 'python-neutron', -- 'python-neutronclient' -- ] -- -- def check_enabled(self): -- return self.is_installed("openstack-neutron") -+ packages = ('openstack-selinux',) - - def setup(self): - super(RedHatNeutron, self).setup() -diff --git a/sos/plugins/openstack_nova.py b/sos/plugins/openstack_nova.py -index 77c3b49a..4fde7565 100644 ---- a/sos/plugins/openstack_nova.py -+++ b/sos/plugins/openstack_nova.py -@@ -211,23 +211,7 @@ class DebianNova(OpenStackNova, DebianPlugin, UbuntuPlugin): - class RedHatNova(OpenStackNova, RedHatPlugin): - - nova = False -- packages = ( -- 'openstack-nova-common', -- 'openstack-nova-network', -- 'openstack-nova-conductor', -- 'openstack-nova-conductor', -- 'openstack-nova-scheduler', -- 'openstack-nova-console', -- 'openstack-nova-novncproxy', -- 'openstack-nova-compute', -- 'openstack-nova-api', -- 'openstack-nova-cert', -- 'openstack-nova-cells', -- 'openstack-nova-objectstore', -- 'python-nova', -- 'python-novaclient', -- 'novnc' -- ) -+ packages = ('openstack-selinux',) - - def setup(self): - super(RedHatNova, self).setup() -diff --git a/sos/plugins/openstack_octavia.py b/sos/plugins/openstack_octavia.py -index 46a943a5..86a91dc1 100644 ---- a/sos/plugins/openstack_octavia.py -+++ b/sos/plugins/openstack_octavia.py -@@ -9,12 +9,11 @@ - from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin - - --class OpenStackOctavia(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin): -+class OpenStackOctavia(Plugin): - """Openstack Octavia""" - - plugin_name = "openstack_octavia" - profiles = ('openstack', 'openstack_controller') -- packages = ('openstack-octavia-common',) - - var_puppet_gen = "/var/lib/config-data/puppet-generated/octavia" - -@@ -101,4 +100,14 @@ class OpenStackOctavia(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin): - regexp, r"\1*********" - ) - -+ -+class DebianOctavia(OpenStackOctavia, DebianPlugin, UbuntuPlugin): -+ -+ packages = ('openstack-octavia-common',) -+ -+ -+class RedHatOctavia(OpenStackOctavia, RedHatPlugin): -+ -+ packages = ('openstack-selinux',) -+ - # vim: set et ts=4 sw=4 : -diff --git a/sos/plugins/openstack_sahara.py b/sos/plugins/openstack_sahara.py -index cdb4b02d..83661b0f 100644 ---- a/sos/plugins/openstack_sahara.py -+++ b/sos/plugins/openstack_sahara.py -@@ -86,12 +86,7 @@ class DebianSahara(OpenStackSahara, DebianPlugin, UbuntuPlugin): - class RedHatSahara(OpenStackSahara, RedHatPlugin): - """OpenStack sahara related information for Red Hat distributions.""" - -- packages = ( -- 'openstack-sahara', -- 'openstack-sahara-api', -- 'openstack-sahara-engine', -- 'python-saharaclient' -- ) -+ packages = ('openstack-selinux',) - - def setup(self): - super(RedHatSahara, self).setup() -diff --git a/sos/plugins/openstack_swift.py b/sos/plugins/openstack_swift.py -index fdf101a9..6637bfa5 100644 ---- a/sos/plugins/openstack_swift.py -+++ b/sos/plugins/openstack_swift.py -@@ -91,14 +91,6 @@ class DebianSwift(OpenStackSwift, DebianPlugin, UbuntuPlugin): - - class RedHatSwift(OpenStackSwift, RedHatPlugin): - -- packages = ( -- 'openstack-swift', -- 'openstack-swift-account', -- 'openstack-swift-container', -- 'openstack-swift-object', -- 'openstack-swift-proxy', -- 'swift', -- 'python-swiftclient' -- ) -+ packages = ('openstack-selinux',) - - # vim: set et ts=4 sw=4 : -diff --git a/sos/plugins/openstack_trove.py b/sos/plugins/openstack_trove.py -index 6ec8aff8..699ae43d 100644 ---- a/sos/plugins/openstack_trove.py -+++ b/sos/plugins/openstack_trove.py -@@ -83,7 +83,7 @@ class DebianTrove(OpenStackTrove, DebianPlugin, UbuntuPlugin): - - class RedHatTrove(OpenStackTrove, RedHatPlugin): - -- packages = ['openstack-trove'] -+ packages = ('openstack-selinux',) - - def setup(self): - super(RedHatTrove, self).setup() --- -2.17.1 - diff --git a/SOURCES/sos-bz1637632-kernel-dont-collect-tracing-instance.patch b/SOURCES/sos-bz1637632-kernel-dont-collect-tracing-instance.patch deleted file mode 100644 index 058732d..0000000 --- a/SOURCES/sos-bz1637632-kernel-dont-collect-tracing-instance.patch +++ /dev/null @@ -1,33 +0,0 @@ -From d6379b5ba0f381ea8ec2403b9985100a946a5866 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec <pmoravec@redhat.com> -Date: Mon, 8 Oct 2018 10:45:04 +0200 -Subject: [PATCH] [kernel] dont collect some tracing instance files - -As copying of them hangs. - -Resolves: #1445 - -Signed-off-by: Pavel Moravec <pmoravec@redhat.com> ---- - sos/plugins/kernel.py | 5 ++++- - 1 file changed, 4 insertions(+), 1 deletion(-) - -diff --git a/sos/plugins/kernel.py b/sos/plugins/kernel.py -index 73109326..558e7143 100644 ---- a/sos/plugins/kernel.py -+++ b/sos/plugins/kernel.py -@@ -93,7 +93,10 @@ class Kernel(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin): - '/sys/kernel/debug/tracing/events/*', - '/sys/kernel/debug/tracing/free_buffer', - '/sys/kernel/debug/tracing/trace_marker', -- '/sys/kernel/debug/tracing/trace_marker_raw' -+ '/sys/kernel/debug/tracing/trace_marker_raw', -+ '/sys/kernel/debug/tracing/instances/*/per_cpu/*/snapshot_raw', -+ '/sys/kernel/debug/tracing/instances/*/per_cpu/*/trace_pipe*', -+ '/sys/kernel/debug/tracing/instances/*/trace_pipe' - ]) - - self.add_copy_spec([ --- -2.17.1 - diff --git a/SOURCES/sos-bz1639166-pcp-pmlogger-no-limit.patch b/SOURCES/sos-bz1639166-pcp-pmlogger-no-limit.patch new file mode 100644 index 0000000..8c12a1b --- /dev/null +++ b/SOURCES/sos-bz1639166-pcp-pmlogger-no-limit.patch @@ -0,0 +1,30 @@ +From 380737d0bf4021434db4d5e479f0b8a2aece6ec9 Mon Sep 17 00:00:00 2001 +From: Pavel Moravec <pmoravec@redhat.com> +Date: Thu, 4 Apr 2019 10:43:24 +0200 +Subject: [PATCH] [pcp] collect pmlogger without a sizelimit + +sizelimit=None defaults to --log-size, use sizelimit=0 instead + +Resolves: #1632 + +Signed-off-by: Pavel Moravec <pmoravec@redhat.com> +--- + sos/plugins/pcp.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/sos/plugins/pcp.py b/sos/plugins/pcp.py +index da7158a6..da2a6611 100644 +--- a/sos/plugins/pcp.py ++++ b/sos/plugins/pcp.py +@@ -130,7 +130,7 @@ class Pcp(Plugin, RedHatPlugin, DebianPlugin): + pmlogger_ls = self.get_cmd_output_now("ls -t1 %s" % path) + if pmlogger_ls: + for line in open(pmlogger_ls).read().splitlines(): +- self.add_copy_spec(line, sizelimit=None) ++ self.add_copy_spec(line, sizelimit=0) + files_collected = files_collected + 1 + if self.countlimit and files_collected == self.countlimit: + break +-- +2.17.2 + diff --git a/SOURCES/sos-bz1656732-ovirt_node-plugin.patch b/SOURCES/sos-bz1656732-ovirt_node-plugin.patch deleted file mode 100644 index 8b2e3d8..0000000 --- a/SOURCES/sos-bz1656732-ovirt_node-plugin.patch +++ /dev/null @@ -1,68 +0,0 @@ -From 0cddb4c820d39cae8bf6681c644fa353b0c20800 Mon Sep 17 00:00:00 2001 -From: Nijin Ashok <nashok@redhat.com> -Date: Mon, 16 Jul 2018 14:42:43 +0530 -Subject: [PATCH] [ovirt_node] New plugin for oVirt Node - -oVirt Node is a small scaled down version used for hosting virtual -machines. The plugin collects node specific information like -upgrade log, the layer structure etc. - -Resolves: #1381 - -Signed-off-by: Nijin Ashok nashok@redhat.com -Signed-off-by: Bryn M. Reeves bmr@redhat.com ---- - sos/plugins/ovirt_node.py | 41 +++++++++++++++++++++++++++++++++++++++ - 1 file changed, 41 insertions(+) - create mode 100644 sos/plugins/ovirt_node.py - -diff --git a/sos/plugins/ovirt_node.py b/sos/plugins/ovirt_node.py -new file mode 100644 -index 00000000..ccb5d3c6 ---- /dev/null -+++ b/sos/plugins/ovirt_node.py -@@ -0,0 +1,41 @@ -+# Copyright (C) 2018 Red Hat, Inc., -+# This file is part of the sos project: https://github.com/sosreport/sos -+# -+# This copyrighted material is made available to anyone wishing to use, -+# modify, copy, or redistribute it subject to the terms and conditions of -+# version 2 of the GNU General Public License. -+# -+# See the LICENSE file in the source distribution for further information. -+ -+from sos.plugins import Plugin, RedHatPlugin -+ -+ -+class OvirtNode(Plugin, RedHatPlugin): -+ """oVirt Node specific information""" -+ -+ packages = ( -+ 'imgbased', -+ 'ovirt-node-ng-nodectl', -+ ) -+ -+ plugin_name = 'ovirt_node' -+ profiles = ('virt',) -+ -+ def setup(self): -+ -+ # Add log files -+ self.add_copy_spec([ -+ '/var/log/imgbased.log', -+ # Required for node versions < 4.2 -+ '/tmp/imgbased.log', -+ ]) -+ -+ # Collect runtime info -+ self.add_cmd_output([ -+ 'imgbase layout', -+ 'nodectl --machine-readable check', -+ 'nodectl info', -+ ]) -+ -+ -+# vim: expandtab tabstop=4 shiftwidth=4 --- -2.17.2 - diff --git a/SOURCES/sos-bz1656812-bump-release.patch b/SOURCES/sos-bz1656812-bump-release.patch new file mode 100644 index 0000000..0fead48 --- /dev/null +++ b/SOURCES/sos-bz1656812-bump-release.patch @@ -0,0 +1,40 @@ +From 9b1f9472e7b0b8a993f635c8d4c757b59c46ed0f Mon Sep 17 00:00:00 2001 +From: "Bryn M. Reeves" <bmr@redhat.com> +Date: Wed, 27 Mar 2019 21:00:36 +0000 +Subject: [PATCH] [sos] bump release + +Signed-off-by: Bryn M. Reeves <bmr@redhat.com> +--- + sos.spec | 2 +- + sos/__init__.py | 2 +- + 2 files changed, 2 insertions(+), 2 deletions(-) + +diff --git a/sos.spec b/sos.spec +index 95249670..68aedcfd 100644 +--- a/sos.spec ++++ b/sos.spec +@@ -2,7 +2,7 @@ + + Summary: A set of tools to gather troubleshooting information from a system + Name: sos +-Version: 3.6 ++Version: 3.7 + Release: 1%{?dist} + Group: Applications/System + Source0: http://people.redhat.com/breeves/sos/releases/sos-%{version}.tar.gz +diff --git a/sos/__init__.py b/sos/__init__.py +index c436bd20..dfc7ed5f 100644 +--- a/sos/__init__.py ++++ b/sos/__init__.py +@@ -25,7 +25,7 @@ if six.PY3: + else: + from ConfigParser import ConfigParser, ParsingError, Error + +-__version__ = "3.6" ++__version__ = "3.7" + + gettext_dir = "/usr/share/locale" + gettext_app = "sos" +-- +2.17.2 + diff --git a/SOURCES/sos-bz1658570-docker-podman-containers.patch b/SOURCES/sos-bz1658570-docker-podman-containers.patch deleted file mode 100644 index dd29950..0000000 --- a/SOURCES/sos-bz1658570-docker-podman-containers.patch +++ /dev/null @@ -1,186 +0,0 @@ -From 77c72b415feccd828fd7bc13caebf9841afc40c2 Mon Sep 17 00:00:00 2001 -From: "Bryn M. Reeves" <bmr@redhat.com> -Date: Mon, 3 Sep 2018 17:11:06 +0100 -Subject: [PATCH] [docker] combine docker 'inspect' and 'logs' loops - -We're iterating over all the containers: might as well only do it -one time. - -Related: #1406, #1407 - -Signed-off-by: Bryn M. Reeves <bmr@redhat.com> ---- - sos/plugins/docker.py | 4 +--- - 1 file changed, 1 insertion(+), 3 deletions(-) - -diff --git a/sos/plugins/docker.py b/sos/plugins/docker.py -index a44264a4..5b2acff5 100644 ---- a/sos/plugins/docker.py -+++ b/sos/plugins/docker.py -@@ -80,9 +80,7 @@ class Docker(Plugin): - if insp: - for container in insp: - self.add_cmd_output("docker inspect %s" % container) -- -- if self.get_option('logs'): -- for container in insp: -+ if self.get_option('logs'): - self.add_cmd_output("docker logs -t %s" % container) - - --- -2.17.2 - -From e3cfb1428592390166237e715471bb62d9bd9db6 Mon Sep 17 00:00:00 2001 -From: Daniel J Walsh <dwalsh@redhat.com> -Date: Wed, 29 Aug 2018 06:50:10 -0400 -Subject: [PATCH] [podman] Add support for gathering information on podman - containers - -Resolves: #1407. - -Signed-off-by: Daniel J Walsh <dwalsh@redhat.com> -Signed-off-by: Bryn M. Reeves <bmr@redhat.com> ---- - sos/plugins/podman.py | 79 +++++++++++++++++++++++++++++++++++++++++++ - 1 file changed, 79 insertions(+) - create mode 100644 sos/plugins/podman.py - -diff --git a/sos/plugins/podman.py b/sos/plugins/podman.py -new file mode 100644 -index 00000000..c43246fc ---- /dev/null -+++ b/sos/plugins/podman.py -@@ -0,0 +1,79 @@ -+# Copyright (C) 2018 Red Hat, Inc. Daniel Walsh <dwalsh@redhat.com> -+ -+# This file is part of the sos project: https://github.com/sosreport/sos -+# -+# This copyrighted material is made available to anyone wishing to use, -+# modify, copy, or redistribute it subject to the terms and conditions of -+# version 2 of the GNU General Public License. -+# -+# See the LICENSE file in the source distribution for further information. -+ -+from sos.plugins import Plugin, RedHatPlugin, UbuntuPlugin -+ -+ -+class Podman(Plugin): -+ -+ """Podman containers -+ """ -+ -+ plugin_name = 'podman' -+ profiles = ('container',) -+ packages = ('podman') -+ -+ option_list = [ -+ ("all", "enable capture for all containers, even containers " -+ "that have terminated", 'fast', False), -+ ("logs", "capture logs for running containers", -+ 'fast', False), -+ ("size", "capture image sizes for podman ps", 'slow', False) -+ ] -+ -+ def setup(self): -+ self.add_copy_spec([ -+ "/etc/containers/registries.conf", -+ "/etc/containers/storage.conf", -+ "/etc/containers/mounts.conf", -+ "/etc/containers/policy.json", -+ ]) -+ -+ subcmds = [ -+ 'info', -+ 'images', -+ 'pod ps', -+ 'pod ps -a', -+ 'ps', -+ 'ps -a', -+ 'stats --no-stream', -+ 'version', -+ ] -+ -+ self.add_cmd_output(["podman %s" % s for s in subcmds]) -+ -+ # separately grab ps -s as this can take a *very* long time -+ if self.get_option('size'): -+ self.add_cmd_output('podman ps -as') -+ -+ self.add_journal(units="podman") -+ self.add_cmd_output("ls -alhR /etc/cni") -+ -+ ps_cmd = 'podman ps -q' -+ if self.get_option('all'): -+ ps_cmd = "%s -a" % ps_cmd -+ -+ img_cmd = 'podman images -q' -+ insp = set() -+ -+ for icmd in [ps_cmd, img_cmd]: -+ result = self.get_command_output(icmd) -+ if result['status'] == 0: -+ for con in result['output'].splitlines(): -+ insp.add(con) -+ -+ if insp: -+ for container in insp: -+ self.add_cmd_output("podman inspect %s" % container) -+ if self.get_option('logs'): -+ self.add_cmd_output("podman logs -t %s" % container) -+ -+ -+# vim: set et ts=4 sw=4 : --- -2.17.2 - -From 1401c7153dda9bd0558035ba0692cf05a93ca419 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec <pmoravec@redhat.com> -Date: Tue, 6 Nov 2018 08:13:52 +0100 -Subject: [PATCH] [podman] allow the plugin for RedHatPlugin and UbuntuPlugin - -Until Podman inherits RedHatPlugin and/or UbuntuPlugin, the plugin -can not be executed on underlying distros. - -Further, remove one redundant test as "for container in insp" will -work properly also for empty "insp". - -Resolves: #1473 - -Signed-off-by: Pavel Moravec <pmoravec@redhat.com> ---- - sos/plugins/podman.py | 11 +++++------ - 1 file changed, 5 insertions(+), 6 deletions(-) - -diff --git a/sos/plugins/podman.py b/sos/plugins/podman.py -index c43246fc..72e22558 100644 ---- a/sos/plugins/podman.py -+++ b/sos/plugins/podman.py -@@ -11,7 +11,7 @@ - from sos.plugins import Plugin, RedHatPlugin, UbuntuPlugin - - --class Podman(Plugin): -+class Podman(Plugin, RedHatPlugin, UbuntuPlugin): - - """Podman containers - """ -@@ -69,11 +69,10 @@ class Podman(Plugin): - for con in result['output'].splitlines(): - insp.add(con) - -- if insp: -- for container in insp: -- self.add_cmd_output("podman inspect %s" % container) -- if self.get_option('logs'): -- self.add_cmd_output("podman logs -t %s" % container) -+ for container in insp: -+ self.add_cmd_output("podman inspect %s" % container) -+ if self.get_option('logs'): -+ self.add_cmd_output("podman logs -t %s" % container) - - - # vim: set et ts=4 sw=4 : --- -2.17.2 - diff --git a/SOURCES/sos-bz1658571-postgresql-collect-full-dump.patch b/SOURCES/sos-bz1658571-postgresql-collect-full-dump.patch deleted file mode 100644 index f2a06ab..0000000 --- a/SOURCES/sos-bz1658571-postgresql-collect-full-dump.patch +++ /dev/null @@ -1,76 +0,0 @@ -From 47e6b3d92c8a13560b248e6f0e2ffb334b547d07 Mon Sep 17 00:00:00 2001 -From: Yedidyah Bar David <didi@redhat.com> -Date: Tue, 4 Dec 2018 13:08:44 +0200 -Subject: [PATCH] [Plugin] Obey sizelimit=0 - -If sizelimit is 0, do not limit. Only use the default if it's None. - -Bug-Url: https://bugzilla.redhat.com/1654068 -Signed-off-by: Yedidyah Bar David <didi@redhat.com> ---- - sos/plugins/__init__.py | 6 ++++-- - 1 file changed, 4 insertions(+), 2 deletions(-) - -diff --git a/sos/plugins/__init__.py b/sos/plugins/__init__.py -index 7d2a8b2d..97f3cc59 100644 ---- a/sos/plugins/__init__.py -+++ b/sos/plugins/__init__.py -@@ -569,7 +569,8 @@ class Plugin(object): - a single file the file will be tailed to meet sizelimit. If the first - file in a glob is too large it will be tailed to meet the sizelimit. - """ -- sizelimit = sizelimit or self.get_option("log_size") -+ if sizelimit is None: -+ sizelimit = self.get_option("log_size") - - if self.get_option('all_logs'): - sizelimit = None -@@ -703,7 +704,8 @@ class Plugin(object): - cmds = [cmds] - if len(cmds) > 1 and (suggest_filename or root_symlink): - self._log_warn("ambiguous filename or symlink for command list") -- sizelimit = sizelimit or self.get_option("log_size") -+ if sizelimit is None: -+ sizelimit = self.get_option("log_size") - for cmd in cmds: - self._add_cmd_output(cmd, suggest_filename=suggest_filename, - root_symlink=root_symlink, timeout=timeout, --- -2.17.2 - -From 254d93499d64acaff5103e15c25649d418004737 Mon Sep 17 00:00:00 2001 -From: Yedidyah Bar David <didi@redhat.com> -Date: Tue, 4 Dec 2018 13:10:32 +0200 -Subject: [PATCH] [postgresql] Do not limit dump size - -In principle, this might be risky - if a database is huge, we might not -want to collect all of it. But there is no sense in collecting only its -tail. If this turns out problematic, a future patch might check db size -and do not collect it at all if it's too large. - -Bug-Url: https://bugzilla.redhat.com/1654068 - -Resolves: #1497 - -Signed-off-by: Yedidyah Bar David <didi@redhat.com> -Signed-off-by: Bryn M. Reeves <bmr@redhat.com> ---- - sos/plugins/postgresql.py | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/sos/plugins/postgresql.py b/sos/plugins/postgresql.py -index d47f7e8b..aef431f8 100644 ---- a/sos/plugins/postgresql.py -+++ b/sos/plugins/postgresql.py -@@ -64,7 +64,7 @@ class PostgreSQL(Plugin): - if scl is not None: - cmd = self.convert_cmd_scl(scl, cmd) - self.add_cmd_output(cmd, suggest_filename=filename, -- binary=True) -+ binary=True, sizelimit=0) - - else: # no password in env or options - self.soslog.warning( --- -2.17.2 - diff --git a/SOURCES/sos-bz1669045-rhcos-policy-and-plugins.patch b/SOURCES/sos-bz1669045-rhcos-policy-and-plugins.patch deleted file mode 100644 index 6e008df..0000000 --- a/SOURCES/sos-bz1669045-rhcos-policy-and-plugins.patch +++ /dev/null @@ -1,302 +0,0 @@ -From fa06bc09c95c52565e29173535c7422608e9a29b Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker <jhunsake@redhat.com> -Date: Fri, 4 Jan 2019 13:35:34 -0500 -Subject: [PATCH 1/4] [redhat] Add RHCOS policy - -Adds a policy for Red Hat CoreOS. - -Signed-off-by: Jake Hunsaker <jhunsake@redhat.com> ---- - sos/policies/redhat.py | 42 ++++++++++++++++++++++++++++++++++++++++++ - 1 file changed, 42 insertions(+) - -diff --git a/sos/policies/redhat.py b/sos/policies/redhat.py -index e1e417f3..ea80704f 100644 ---- a/sos/policies/redhat.py -+++ b/sos/policies/redhat.py -@@ -379,6 +379,48 @@ organization before being passed to any third party. - return self.find_preset(ATOMIC) - - -+class RedHatCoreOSPolicy(RHELPolicy): -+ distro = "Red Hat CoreOS" -+ msg = _("""\ -+This command will collect diagnostic and configuration \ -+information from this %(distro)s system. -+ -+An archive containing the collected information will be \ -+generated in %(tmpdir)s and may be provided to a %(vendor)s \ -+support representative. -+ -+Any information provided to %(vendor)s will be treated in \ -+accordance with the published support policies at:\n -+ %(vendor_url)s -+ -+The generated archive may contain data considered sensitive \ -+and its content should be reviewed by the originating \ -+organization before being passed to any third party. -+%(vendor_text)s -+""") -+ -+ def __init__(self, sysroot=None): -+ super(RedHatCoreOSPolicy, self).__init__(sysroot=sysroot) -+ -+ @classmethod -+ def check(cls): -+ coreos = False -+ if ENV_HOST_SYSROOT not in os.environ: -+ return coreos -+ host_release = os.environ[ENV_HOST_SYSROOT] + cls._redhat_release -+ try: -+ for line in open(host_release, 'r').read().splitlines(): -+ coreos |= 'Red Hat CoreOS' in line -+ except IOError: -+ pass -+ return coreos -+ -+ def probe_preset(self): -+ # As of the creation of this policy, RHCOS is only available for -+ # RH OCP environments. -+ return self.find_preset(RHOCP) -+ -+ - class FedoraPolicy(RedHatPolicy): - - distro = "Fedora" --- -2.17.2 - - -From 3335f265213d7457d17139ee172bf21f1a66c229 Mon Sep 17 00:00:00 2001 -From: "Bryn M. Reeves" <bmr@redhat.com> -Date: Fri, 18 Jan 2019 18:03:21 +0000 -Subject: [PATCH 2/4] [policies] factor out Red Hat disclaimer text - -Rather than repeating the same boilerplate disclaimer text in each -Red Hat policy class, define it once as a string, and then cat it -into each policy that requires a distinct preamble. - -Signed-off-by: Bryn M. Reeves <bmr@redhat.com> ---- - sos/policies/redhat.py | 48 ++++++++++++++---------------------------- - 1 file changed, 16 insertions(+), 32 deletions(-) - -diff --git a/sos/policies/redhat.py b/sos/policies/redhat.py -index ea80704f..1d1606b6 100644 ---- a/sos/policies/redhat.py -+++ b/sos/policies/redhat.py -@@ -229,6 +229,19 @@ rhel_presets = { - note=NOTE_TIME, opts=_opts_verify), - } - -+# Legal disclaimer text for Red Hat products -+disclaimer_text = """ -+Any information provided to %(vendor)s will be treated in \ -+accordance with the published support policies at:\n -+ %(vendor_url)s -+ -+The generated archive may contain data considered sensitive \ -+and its content should be reviewed by the originating \ -+organization before being passed to any third party. -+ -+No changes will be made to system configuration. -+""" -+ - - class RHELPolicy(RedHatPolicy): - distro = RHEL_RELEASE_STR -@@ -242,18 +255,7 @@ applications. - An archive containing the collected information will be \ - generated in %(tmpdir)s and may be provided to a %(vendor)s \ - support representative. -- --Any information provided to %(vendor)s will be treated in \ --accordance with the published support policies at:\n -- %(vendor_url)s -- --The generated archive may contain data considered sensitive \ --and its content should be reviewed by the originating \ --organization before being passed to any third party. -- --No changes will be made to system configuration. --%(vendor_text)s --""") -+""" + disclaimer_text + "%(vendor_text)s\n") - - def __init__(self, sysroot=None): - super(RHELPolicy, self).__init__(sysroot=sysroot) -@@ -342,16 +344,7 @@ information from this %(distro)s system. - An archive containing the collected information will be \ - generated in %(tmpdir)s and may be provided to a %(vendor)s \ - support representative. -- --Any information provided to %(vendor)s will be treated in \ --accordance with the published support policies at:\n -- %(vendor_url)s -- --The generated archive may contain data considered sensitive \ --and its content should be reviewed by the originating \ --organization before being passed to any third party. --%(vendor_text)s --""") -+""" + disclaimer_text + "%(vendor_text)s\n") - - def __init__(self, sysroot=None): - super(RedHatAtomicPolicy, self).__init__(sysroot=sysroot) -@@ -388,16 +381,7 @@ information from this %(distro)s system. - An archive containing the collected information will be \ - generated in %(tmpdir)s and may be provided to a %(vendor)s \ - support representative. -- --Any information provided to %(vendor)s will be treated in \ --accordance with the published support policies at:\n -- %(vendor_url)s -- --The generated archive may contain data considered sensitive \ --and its content should be reviewed by the originating \ --organization before being passed to any third party. --%(vendor_text)s --""") -+""" + disclaimer_text + "%(vendor_text)s\n") - - def __init__(self, sysroot=None): - super(RedHatCoreOSPolicy, self).__init__(sysroot=sysroot) --- -2.17.2 - - -From ff9b64ffb383b5b57cbba6de665d2b7794849be7 Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker <jhunsake@redhat.com> -Date: Fri, 4 Jan 2019 14:43:05 -0500 -Subject: [PATCH 3/4] [rhcos] Add new plugin - -Adds a new plugin for Red Hat CoreOS - -Resolves: #1528 - -Signed-off-by: Jake Hunsaker <jhunsake@redhat.com> -Signed-off-by: Bryn M. Reeves <bmr@redhat.com> ---- - sos/plugins/rhcos.py | 30 ++++++++++++++++++++++++++++++ - 1 file changed, 30 insertions(+) - create mode 100644 sos/plugins/rhcos.py - -diff --git a/sos/plugins/rhcos.py b/sos/plugins/rhcos.py -new file mode 100644 -index 00000000..de9af9df ---- /dev/null -+++ b/sos/plugins/rhcos.py -@@ -0,0 +1,30 @@ -+# Copyright (C) 2019 Red Hat, Inc., Jake Hunsaker <jhunsake@redhat.com> -+ -+# This file is part of the sos project: https://github.com/sosreport/sos -+# -+# This copyrighted material is made available to anyone wishing to use, -+# modify, copy, or redistribute it subject to the terms and conditions of -+# version 2 of the GNU General Public License. -+# -+# See the LICENSE file in the source distribution for further information. -+ -+from sos.plugins import Plugin, RedHatPlugin -+ -+ -+class RHCoreOS(Plugin, RedHatPlugin): -+ """Red Hat CoreOS""" -+ -+ plugin_name = 'rhcos' -+ packages = ('redhat-release-coreos', 'coreos-metadata') -+ -+ def setup(self): -+ units = ['coreos-growpart', 'coreos-firstboot-complete'] -+ for unit in units: -+ self.add_journal(unit) -+ -+ self.add_cmd_output( -+ 'coreos-metadata --cmdline --attributes /dev/stdout', -+ timeout=60 -+ ) -+ -+# vim: set et ts=4 sw=4 : --- -2.17.2 - - -From 12f12d490866587b254cdf182585529714b7e5bc Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker <jhunsake@redhat.com> -Date: Fri, 4 Jan 2019 15:02:55 -0500 -Subject: [PATCH 4/4] [rpmostree] Add new plugin - -Adds a new plugin for rpm-ostree, which is no longer limited to use in -Atomic Host. - -Resolves: #1529 - -Signed-off-by: Jake Hunsaker <jhunsake@redhat.com> -Signed-off-by: Bryn M. Reeves <bmr@redhat.com> ---- - sos/plugins/atomichost.py | 1 - - sos/plugins/rpmostree.py | 40 +++++++++++++++++++++++++++++++++++++++ - 2 files changed, 40 insertions(+), 1 deletion(-) - create mode 100644 sos/plugins/rpmostree.py - -diff --git a/sos/plugins/atomichost.py b/sos/plugins/atomichost.py -index deecba87..0c1f4026 100644 ---- a/sos/plugins/atomichost.py -+++ b/sos/plugins/atomichost.py -@@ -25,7 +25,6 @@ class AtomicHost(Plugin, RedHatPlugin): - return self.policy.in_container() - - def setup(self): -- self.add_copy_spec("/etc/ostree/remotes.d") - self.add_cmd_output("atomic host status") - - if self.get_option('info'): -diff --git a/sos/plugins/rpmostree.py b/sos/plugins/rpmostree.py -new file mode 100644 -index 00000000..3c6872c2 ---- /dev/null -+++ b/sos/plugins/rpmostree.py -@@ -0,0 +1,40 @@ -+# Copyright (C) 2019 Red Hat, Inc., Jake Hunsaker <jhunsake@redhat.com> -+ -+# This file is part of the sos project: https://github.com/sosreport/sos -+# -+# This copyrighted material is made available to anyone wishing to use, -+# modify, copy, or redistribute it subject to the terms and conditions of -+# version 2 of the GNU General Public License. -+# -+# See the LICENSE file in the source distribution for further information. -+ -+from sos.plugins import Plugin, RedHatPlugin -+ -+ -+class Rpmostree(Plugin, RedHatPlugin): -+ """rpm-ostree image/package system""" -+ -+ plugin_name = 'rpmostree' -+ packages = ('rpm-ostree',) -+ -+ def setup(self): -+ self.add_copy_spec('/etc/ostree/remotes.d/') -+ -+ subcmds = [ -+ 'status', -+ 'db list', -+ 'db diff', -+ '--version' -+ ] -+ -+ self.add_cmd_output(["rpm-ostree %s" % subcmd for subcmd in subcmds]) -+ -+ units = [ -+ 'rpm-ostreed', -+ 'rpm-ostreed-automatic', -+ 'rpm-ostree-bootstatus' -+ ] -+ for unit in units: -+ self.add_journal(unit) -+ -+# vim: set et ts=4 sw=4 : --- -2.17.2 - diff --git a/SOURCES/sos-bz1679238-crio-plugin.patch b/SOURCES/sos-bz1679238-crio-plugin.patch deleted file mode 100644 index 71dd4f7..0000000 --- a/SOURCES/sos-bz1679238-crio-plugin.patch +++ /dev/null @@ -1,126 +0,0 @@ -From 64d7cccf48aee9a07e2e4c5237034638cae30391 Mon Sep 17 00:00:00 2001 -From: Daniel J Walsh <dwalsh@redhat.com> -Date: Wed, 29 Aug 2018 06:51:21 -0400 -Subject: [PATCH] [crio] Add support for gathering information on cri-o - containers - -Signed-off-by: Daniel J Walsh <dwalsh@redhat.com> ---- - sos/plugins/crio.py | 74 +++++++++++++++++++++++++++++++++++++++++++++ - 1 file changed, 74 insertions(+) - create mode 100644 sos/plugins/crio.py - -diff --git a/sos/plugins/crio.py b/sos/plugins/crio.py -new file mode 100644 -index 00000000..f3e9d842 ---- /dev/null -+++ b/sos/plugins/crio.py -@@ -0,0 +1,74 @@ -+# Copyright (C) 2018 Red Hat, Inc. Daniel Walsh <dwalsh@redhat.com> -+ -+# This file is part of the sos project: https://github.com/sosreport/sos -+# -+# This copyrighted material is made available to anyone wishing to use, -+# modify, copy, or redistribute it subject to the terms and conditions of -+# version 2 of the GNU General Public License. -+# -+# See the LICENSE file in the source distribution for further information. -+ -+from sos.plugins import Plugin, RedHatPlugin, UbuntuPlugin -+ -+ -+class CRIO(Plugin): -+ -+ """CRI-O containers -+ """ -+ -+ plugin_name = 'crio' -+ profiles = ('container',) -+ packages = ('cri-o', "cri-tools") -+ -+ option_list = [ -+ ("all", "enable capture for all containers, even containers " -+ "that have terminated", 'fast', False), -+ ("logs", "capture logs for running containers", -+ 'fast', False), -+ ] -+ -+ def setup(self): -+ self.add_copy_spec([ -+ "/etc/containers/registries.conf", -+ "/etc/containers/storage.conf", -+ "/etc/containers/mounts.conf", -+ "/etc/containers/policy.json", -+ "/etc/crio/crio.conf", -+ "/etc/crio/seccomp.json", -+ "/etc/systemd/system/cri-o.service", -+ ]) -+ -+ subcmds = [ -+ 'info', -+ 'images', -+ 'pods', -+ 'ps', -+ 'ps -a', -+ 'stats', -+ 'version', -+ ] -+ -+ self.add_cmd_output(["crictl %s" % s for s in subcmds]) -+ self.add_journal(units="cri-o") -+ self.add_cmd_output("ls -alhR /etc/cni") -+ -+ ps_cmd = 'crictl ps --quiet' -+ if self.get_option('all'): -+ ps_cmd = "%s -a" % ps_cmd -+ -+ img_cmd = 'cri-o images --quiet' -+ insp = set() -+ -+ for icmd in [ps_cmd, img_cmd]: -+ result = self.get_command_output(icmd) -+ if result['status'] == 0: -+ for con in result['output'].splitlines(): -+ insp.add(con) -+ -+ if insp: -+ for container in insp: -+ self.add_cmd_output("crictl inspect %s" % container) -+ if self.get_option('logs'): -+ self.add_cmd_output("crictl logs -t %s" % container) -+ -+# vim: set et ts=4 sw=4 : --- -2.17.2 - -From 36a82723dfc2734b18eede4b75708595345c9d7a Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker <jhunsake@redhat.com> -Date: Mon, 25 Feb 2019 11:50:50 -0500 -Subject: [PATCH] [crio] Add tagging classes - -Adds tagging classes so plugin will run on Red Hat and Ubuntu based -systems. - -Resolves: #1578 - -Signed-off-by: Jake Hunsaker <jhunsake@redhat.com> ---- - sos/plugins/crio.py | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/sos/plugins/crio.py b/sos/plugins/crio.py -index f3e9d842..7afdf047 100644 ---- a/sos/plugins/crio.py -+++ b/sos/plugins/crio.py -@@ -11,7 +11,7 @@ - from sos.plugins import Plugin, RedHatPlugin, UbuntuPlugin - - --class CRIO(Plugin): -+class CRIO(Plugin, RedHatPlugin, UbuntuPlugin): - - """CRI-O containers - """ --- -2.17.2 - diff --git a/SOURCES/sos-bz1690999-docker-skip-system-df.patch b/SOURCES/sos-bz1690999-docker-skip-system-df.patch deleted file mode 100644 index a49443b..0000000 --- a/SOURCES/sos-bz1690999-docker-skip-system-df.patch +++ /dev/null @@ -1,43 +0,0 @@ -From 372e9389e83befbb9d48ae9bebd83c7dde87d95c Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker <jhunsake@redhat.com> -Date: Wed, 20 Mar 2019 14:48:21 +0000 -Subject: [PATCH] [docker] do not collect 'system df' by default - -Gates collection of 'docker system df' behind the docker.size option -as this command can take a long time to run just like 'ps -as'. - -Related: #1580 - -Signed-off-by: Jake Hunsaker <jhunsake@redhat.com> -Signed-off-by: Bryn M. Reeves <bmr@redhat.com> ---- - sos/plugins/docker.py | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/sos/plugins/docker.py b/sos/plugins/docker.py -index d8c854f9..80c4946f 100644 ---- a/sos/plugins/docker.py -+++ b/sos/plugins/docker.py -@@ -49,7 +49,6 @@ class Docker(Plugin): - 'ps', - 'ps -a', - 'stats --no-stream', -- 'system df', - 'version', - 'volume ls' - ] -@@ -48,9 +48,10 @@ class Docker(Plugin): - for subcmd in subcmds: - self.add_cmd_output("docker %s" % subcmd) - -- # separately grab ps -s as this can take a *very* long time -+ # separately grab these separately as they can take a *very* long time - if self.get_option('size'): - self.add_cmd_output('docker ps -as') -+ self.add_cmd_output('docker system df') - - self.add_journal(units="docker") - self.add_cmd_output("ls -alhR /etc/docker") --- -2.17.2 - diff --git a/SOURCES/sos-bz1697813-plugin-vs-command-timeouts.patch b/SOURCES/sos-bz1697813-plugin-vs-command-timeouts.patch new file mode 100644 index 0000000..00e896e --- /dev/null +++ b/SOURCES/sos-bz1697813-plugin-vs-command-timeouts.patch @@ -0,0 +1,45 @@ +From 7c8c45dad3481cfaae3d3af9c188218aa14a3a6a Mon Sep 17 00:00:00 2001 +From: Pavel Moravec <pmoravec@redhat.com> +Date: Tue, 9 Apr 2019 09:50:34 +0200 +Subject: [PATCH] [foreman,satellite] increase plugin default timeouts + +Those two plugins call commands with bigger timeouts than the default +plugin timeout is. That can unexpectedly kill the plugin execution when +the commands execution took longer than the plugin timeout (but within +cmd timeout). + +Resolves: #1642 + +Signed-off-by: Pavel Moravec <pmoravec@redhat.com> +--- + sos/plugins/foreman.py | 1 + + sos/plugins/satellite.py | 1 + + 2 files changed, 2 insertions(+) + +diff --git a/sos/plugins/foreman.py b/sos/plugins/foreman.py +index a1e937f3..8bcd26bd 100644 +--- a/sos/plugins/foreman.py ++++ b/sos/plugins/foreman.py +@@ -19,6 +19,7 @@ class Foreman(Plugin): + """ + + plugin_name = 'foreman' ++ plugin_timeout = 1800 + profiles = ('sysmgmt',) + packages = ('foreman', 'foreman-proxy') + option_list = [ +diff --git a/sos/plugins/satellite.py b/sos/plugins/satellite.py +index c50c2ec7..83733076 100644 +--- a/sos/plugins/satellite.py ++++ b/sos/plugins/satellite.py +@@ -14,6 +14,7 @@ class Satellite(Plugin, RedHatPlugin): + """ + + plugin_name = 'satellite' ++ plugin_timeout = 1200 + profiles = ('sysmgmt',) + verify_packages = ('spacewalk.*',) + satellite = False +-- +2.17.2 + diff --git a/SOURCES/sos-bz1697854-plugopts-default-datatypes.patch b/SOURCES/sos-bz1697854-plugopts-default-datatypes.patch new file mode 100644 index 0000000..a5d1036 --- /dev/null +++ b/SOURCES/sos-bz1697854-plugopts-default-datatypes.patch @@ -0,0 +1,122 @@ +From c71b41547442d23daf5c3bf88450151d13903214 Mon Sep 17 00:00:00 2001 +From: Pavel Moravec <pmoravec@redhat.com> +Date: Thu, 4 Apr 2019 13:54:18 +0200 +Subject: [PATCH] [maas,mysql,npm,pacemaker,postgresql] fix plugopts data types + +With new enforcement of implicit data type for plugin options, the +plugopts must have proper data types of default values and plugins must work +with them accordingly (in tests or so). + +Resolves: #1635 + +Signed-off-by: Pavel Moravec <pmoravec@redhat.com> +--- + sos/plugins/maas.py | 6 +++--- + sos/plugins/mysql.py | 2 +- + sos/plugins/npm.py | 4 ++-- + sos/plugins/pacemaker.py | 4 ++-- + sos/plugins/postgresql.py | 6 +++--- + 5 files changed, 11 insertions(+), 11 deletions(-) + +diff --git a/sos/plugins/maas.py b/sos/plugins/maas.py +index f8305406..ea038e86 100644 +--- a/sos/plugins/maas.py ++++ b/sos/plugins/maas.py +@@ -21,10 +21,10 @@ class Maas(Plugin, UbuntuPlugin): + + option_list = [ + ('profile-name', +- 'The name with which you will later refer to this remote', '', False), +- ('url', 'The URL of the remote API', '', False), ++ 'The name with which you will later refer to this remote', '', ''), ++ ('url', 'The URL of the remote API', '', ''), + ('credentials', +- 'The credentials, also known as the API key', '', False) ++ 'The credentials, also known as the API key', '', '') + ] + + def _has_login_options(self): +diff --git a/sos/plugins/mysql.py b/sos/plugins/mysql.py +index 49bc4168..411d90b8 100644 +--- a/sos/plugins/mysql.py ++++ b/sos/plugins/mysql.py +@@ -22,7 +22,7 @@ class Mysql(Plugin): + + option_list = [ + ("dbuser", "username for database dumps", "", "mysql"), +- ("dbpass", "password for database dumps" + pw_warn_text, "", False), ++ ("dbpass", "password for database dumps" + pw_warn_text, "", ""), + ("dbdump", "collect a database dump", "", False) + ] + +diff --git a/sos/plugins/npm.py b/sos/plugins/npm.py +index 0fc95801..ca00d0c0 100644 +--- a/sos/plugins/npm.py ++++ b/sos/plugins/npm.py +@@ -25,7 +25,7 @@ class Npm(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin, SuSEPlugin): + option_list = [("project_path", + 'List npm modules of a project specified by path', + 'fast', +- 0)] ++ '')] + + # in Fedora, Debian, Ubuntu and Suse the package is called npm + packages = ('npm',) +@@ -79,7 +79,7 @@ class Npm(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin, SuSEPlugin): + self.add_string_as_file(json.dumps(output), outfn) + + def setup(self): +- if self.get_option("project_path") != 0: ++ if self.get_option("project_path"): + project_path = os.path.abspath(os.path.expanduser( + self.get_option("project_path"))) + self._get_npm_output("npm ls --json", "npm_ls_project", +diff --git a/sos/plugins/pacemaker.py b/sos/plugins/pacemaker.py +index a1b64ea5..940389ee 100644 +--- a/sos/plugins/pacemaker.py ++++ b/sos/plugins/pacemaker.py +@@ -25,7 +25,7 @@ class Pacemaker(Plugin): + ) + + option_list = [ +- ("crm_from", "specify the start time for crm_report", "fast", False), ++ ("crm_from", "specify the start time for crm_report", "fast", ''), + ("crm_scrub", "enable password scrubbing for crm_report", "", True), + ] + +@@ -87,7 +87,7 @@ class Pacemaker(Plugin): + # time in order to collect data. + crm_from = (datetime.today() - + timedelta(hours=72)).strftime("%Y-%m-%d %H:%m:%S") +- if self.get_option("crm_from") is not False: ++ if self.get_option("crm_from"): + if re.match(r'\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}', + str(self.get_option("crm_from"))): + crm_from = self.get_option("crm_from") +diff --git a/sos/plugins/postgresql.py b/sos/plugins/postgresql.py +index 1698b62f..a04dca8f 100644 +--- a/sos/plugins/postgresql.py ++++ b/sos/plugins/postgresql.py +@@ -31,7 +31,7 @@ class PostgreSQL(Plugin): + option_list = [ + ('pghome', 'PostgreSQL server home directory.', '', '/var/lib/pgsql'), + ('username', 'username for pg_dump', '', 'postgres'), +- ('password', 'password for pg_dump' + password_warn_text, '', False), ++ ('password', 'password for pg_dump' + password_warn_text, '', ''), + ('dbname', 'database name to dump for pg_dump', '', ''), + ('dbhost', 'database hostname/IP (do not use unix socket)', '', ''), + ('dbport', 'database server port number', '', '5432') +@@ -43,8 +43,8 @@ class PostgreSQL(Plugin): + # We're only modifying this for ourself and our children so + # there is no need to save and restore environment variables if + # the user decided to pass the password on the command line. +- if self.get_option("password") is not False: +- os.environ["PGPASSWORD"] = str(self.get_option("password")) ++ if self.get_option("password"): ++ os.environ["PGPASSWORD"] = self.get_option("password") + + if self.get_option("dbhost"): + cmd = "pg_dump -U %s -h %s -p %s -w -F t %s" % ( +-- +2.17.2 + diff --git a/SOURCES/sos-bz1702802-openstack_instack-ansible-log.patch b/SOURCES/sos-bz1702802-openstack_instack-ansible-log.patch new file mode 100644 index 0000000..80a0bbf --- /dev/null +++ b/SOURCES/sos-bz1702802-openstack_instack-ansible-log.patch @@ -0,0 +1,43 @@ +From 1b9c2032149488a2372d188a8ed3251e364f41cf Mon Sep 17 00:00:00 2001 +From: Emilien Macchi <emilien@redhat.com> +Date: Wed, 24 Apr 2019 20:54:05 -0400 +Subject: [PATCH] [openstack_instack] add ansible.log + +Collect /var/lib/mistral/config-download-latest/ansible.log which is an +important log to be able to debug issues with Ansible playbooks. + +/var/lib/mistral/config-download-latest is a directory that exists +anyway on the undercloud and is the place where the ansible logs is +stored. + +Note: we don't want to collect the whole /var/lib/mistral because it +contains sensitive informations like username/passwords/endpoints. + +rhbz#1702806 +rhbz#1702802 + +Resolves: #1661 + +Signed-off-by: Emilien Macchi <emilien@redhat.com> +Signed-off-by: Bryn M. Reeves <bmr@redhat.com> +--- + sos/plugins/openstack_instack.py | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/sos/plugins/openstack_instack.py b/sos/plugins/openstack_instack.py +index e3470f5a..15e6c384 100644 +--- a/sos/plugins/openstack_instack.py ++++ b/sos/plugins/openstack_instack.py +@@ -22,7 +22,8 @@ NON_CONTAINERIZED_DEPLOY = [ + CONTAINERIZED_DEPLOY = [ + '/var/log/heat-launcher/', + '/home/stack/install-undercloud.log', +- '/home/stack/undercloud-install-*.tar.bzip2' ++ '/home/stack/undercloud-install-*.tar.bzip2', ++ '/var/lib/mistral/config-download-latest/ansible.log' + ] + + +-- +2.17.2 + diff --git a/SOURCES/sos-bz1706060-vdsm-plugin.patch b/SOURCES/sos-bz1706060-vdsm-plugin.patch new file mode 100644 index 0000000..2ae0c5a --- /dev/null +++ b/SOURCES/sos-bz1706060-vdsm-plugin.patch @@ -0,0 +1,335 @@ +From 1b4f8dfb8ac85708441faa3b2c2b9c2624dfa155 Mon Sep 17 00:00:00 2001 +From: "irit.go" <igoihman@redhat.com> +Date: Tue, 24 Jul 2018 11:01:55 +0300 +Subject: [PATCH 1/2] [Plugin] add get_process_pids() to return PIDs by process + name + +Signed-off-by: Irit Goihman igoihman@redhat.com +Signed-off-by: Bryn M. Reeves <bmr@redhat.com> +--- + sos/plugins/__init__.py | 16 ++++++++++++++++ + 1 file changed, 16 insertions(+) + +diff --git a/sos/plugins/__init__.py b/sos/plugins/__init__.py +index 4c8822b7..cdeda77a 100644 +--- a/sos/plugins/__init__.py ++++ b/sos/plugins/__init__.py +@@ -1389,6 +1389,22 @@ class Plugin(object): + return False + return status + ++ def get_process_pids(self, process): ++ """Returns PIDs of all processes with process name. ++ If the process doesn't exist, returns an empty list""" ++ pids = [] ++ cmd_line_glob = "/proc/[0-9]*/cmdline" ++ cmd_line_paths = glob.glob(cmd_line_glob) ++ for path in cmd_line_paths: ++ try: ++ with open(path, 'r') as f: ++ cmd_line = f.read().strip() ++ if process in cmd_line: ++ pids.append(path.split("/")[2]) ++ except IOError as e: ++ continue ++ return pids ++ + + class RedHatPlugin(object): + """Tagging class for Red Hat's Linux distributions""" +-- +2.17.2 + + +From 0618db904dadb05fde70c181a5940989ac127fe2 Mon Sep 17 00:00:00 2001 +From: Irit Goihman <igoihman@redhat.com> +Date: Thu, 1 Feb 2018 16:44:32 +0200 +Subject: [PATCH 2/2] [plugins] add vdsm plugin + +Add a plugin for vdsm + +Resolves: #1205 + +Signed-off-by: Irit Goihman <igoihman@redhat.com> +Signed-off-by: Bryn M. Reeves <bmr@redhat.com> +--- + sos/plugins/vdsm.py | 146 ++++++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 146 insertions(+) + create mode 100644 sos/plugins/vdsm.py + +diff --git a/sos/plugins/vdsm.py b/sos/plugins/vdsm.py +new file mode 100644 +index 00000000..c648abbf +--- /dev/null ++++ b/sos/plugins/vdsm.py +@@ -0,0 +1,146 @@ ++# Copyright (C) 2018 Red Hat, Inc. ++ ++# This file is part of the sos project: https://github.com/sosreport/sos ++# ++# This copyrighted material is made available to anyone wishing to use, ++# modify, copy, or redistribute it subject to the terms and conditions of ++# version 2 of the GNU General Public License. ++# ++# See the LICENSE file in the source distribution for further information. ++ ++from sos.plugins import Plugin, RedHatPlugin ++ ++import glob ++import json ++import re ++ ++ ++# This configuration is based on vdsm.storage.lvm.LVM_CONF_TEMPLATE. ++# ++# locking_type is set to 0 in order to match lvm sos commands. With this ++# configuration we don't take any locks, so we will never block because ++# there is a stuck lvm command. ++# locking_type=0 ++# ++# use_lvmetad is set to 0 in order not to show cached, old lvm metadata. ++# use_lvmetad=0 ++# ++# preferred_names and filter config values are set to capture Vdsm devices. ++# preferred_names=[ '^/dev/mapper/' ] ++# filter=[ 'a|^/dev/mapper/.*|', 'r|.*|' ] ++LVM_CONFIG = """ ++global { ++ locking_type=0 ++ use_lvmetad=0 ++} ++devices { ++ preferred_names=["^/dev/mapper/"] ++ ignore_suspended_devices=1 ++ write_cache_state=0 ++ disable_after_error_count=3 ++ filter=["a|^/dev/mapper/.*|", "r|.*|"] ++} ++""" ++LVM_CONFIG = re.sub(r"\s+", " ", LVM_CONFIG).strip() ++ ++ ++class Vdsm(Plugin, RedHatPlugin): ++ """vdsm Plugin""" ++ ++ packages = ( ++ 'vdsm', ++ 'vdsm-client', ++ ) ++ ++ plugin_name = 'vdsm' ++ ++ def setup(self): ++ self.add_forbidden_path('/etc/pki/vdsm/keys/*') ++ self.add_forbidden_path('/etc/pki/vdsm/libvirt-spice/*-key.*') ++ self.add_forbidden_path('/etc/pki/libvirt/private/*') ++ ++ self.add_cmd_output('service vdsmd status') ++ ++ self.add_copy_spec([ ++ '/tmp/vds_installer*', ++ '/tmp/vds_bootstrap*', ++ '/etc/vdsm/*' ++ ]) ++ ++ limit = self.get_option('log_size') ++ ++ self.add_copy_spec('/var/log/vdsm/*', sizelimit=limit) ++ ++ self._add_vdsm_forbidden_paths() ++ self.add_copy_spec([ ++ '/var/run/vdsm/*', ++ '/usr/libexec/vdsm/hooks', ++ '/var/lib/vdsm' ++ ]) ++ ++ qemu_pids = self.get_process_pids('qemu-kvm') ++ if qemu_pids: ++ files = ["cmdline", "status", "mountstats"] ++ self.add_copy_spec([ ++ "/proc/%s/%s" % (pid, name) ++ for pid in qemu_pids ++ for name in files ++ ]) ++ self.add_cmd_output([ ++ "ls -ldZ /etc/vdsm", ++ "su vdsm -s sh -c 'tree -l /rhev/data-center'", ++ "su vdsm -s sh -c 'ls -lR /rhev/data-center'" ++ ]) ++ self.add_cmd_output([ ++ "lvm vgs -v -o +tags --config \'%s\'" % LVM_CONFIG, ++ "lvm lvs -v -o +tags --config \'%s\'" % LVM_CONFIG, ++ "lvm pvs -v -o +all --config \'%s\'" % LVM_CONFIG ++ ]) ++ ++ self.add_cmd_output([ ++ 'vdsm-client Host getCapabilities', ++ 'vdsm-client Host getStats', ++ 'vdsm-client Host getAllVmStats', ++ 'vdsm-client Host getVMFullList', ++ 'vdsm-client Host getDeviceList', ++ 'vdsm-client Host hostdevListByCaps', ++ 'vdsm-client Host getAllTasksInfo', ++ 'vdsm-client Host getAllTasksStatuses' ++ ]) ++ ++ try: ++ res = self.call_ext_prog( ++ 'vdsm-client Host getConnectedStoragePools' ++ ) ++ if res['status'] == 0: ++ pools = json.loads(res['output']) ++ for pool in pools: ++ self.add_cmd_output( ++ 'vdsm-client StoragePool getSpmStatus' ++ ' storagepoolID={}'.format(pool) ++ ) ++ except ValueError as e: ++ self._log_error( ++ 'vdsm-client Host getConnectedStoragePools: %s' % (e) ++ ) ++ ++ try: ++ res = self.call_ext_prog('vdsm-client Host getStorageDomains') ++ if res['status'] == 0: ++ sd_uuids = json.loads(res['output']) ++ dump_volume_chains_cmd = 'vdsm-tool dump-volume-chains %s' ++ self.add_cmd_output([ ++ dump_volume_chains_cmd % uuid for uuid in sd_uuids ++ ]) ++ except ValueError as e: ++ self._log_error( ++ 'vdsm-client Host getStorageDomains: %s' % (e) ++ ) ++ ++ def _add_vdsm_forbidden_paths(self): ++ """Add confidential sysprep vfds under /var/run/vdsm to ++ forbidden paths """ ++ ++ for file_path in glob.glob("/var/run/vdsm/*"): ++ if file_path.endswith(('.vfd', '/isoUploader', '/storage')): ++ self.add_forbidden_path(file_path) +-- +2.17.2 + +From 7141ebf3b2071c84286ced29154c33502c4da934 Mon Sep 17 00:00:00 2001 +From: Irit goihman <igoihman@redhat.com> +Date: Sun, 7 Apr 2019 14:03:55 +0300 +Subject: [PATCH] [vdsm] fix plugin docstring capitalisation + +--- + sos/plugins/vdsm.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/sos/plugins/vdsm.py b/sos/plugins/vdsm.py +index c648abbf9..4549c372e 100644 +--- a/sos/plugins/vdsm.py ++++ b/sos/plugins/vdsm.py +@@ -45,7 +45,7 @@ + + + class Vdsm(Plugin, RedHatPlugin): +- """vdsm Plugin""" ++ """vdsm plugin""" + + packages = ( + 'vdsm', +From 208a1d9622dfa13d923882793cd19e9e6cf1e488 Mon Sep 17 00:00:00 2001 +From: Irit goihman <igoihman@redhat.com> +Date: Sun, 7 Apr 2019 14:04:48 +0300 +Subject: [PATCH] [vdsm] use metadata_read_only=1 for LVM2 commands + +--- + sos/plugins/vdsm.py | 5 +++++ + 1 file changed, 5 insertions(+) + +diff --git a/sos/plugins/vdsm.py b/sos/plugins/vdsm.py +index 4549c372e..913d49a53 100644 +--- a/sos/plugins/vdsm.py ++++ b/sos/plugins/vdsm.py +@@ -22,6 +22,10 @@ + # there is a stuck lvm command. + # locking_type=0 + # ++# To prevent modifications to volume group metadata (for e.g. due to a ++# automatically detected inconsistency), metadata_read_only is set to 1. ++# metadata_read_only=1 ++# + # use_lvmetad is set to 0 in order not to show cached, old lvm metadata. + # use_lvmetad=0 + # +@@ -31,6 +35,7 @@ + LVM_CONFIG = """ + global { + locking_type=0 ++ metadata_read_only=1 + use_lvmetad=0 + } + devices { +From 97c21901ddb6f7d5e3169d1777983f784b103bc4 Mon Sep 17 00:00:00 2001 +From: Irit goihman <igoihman@redhat.com> +Date: Sun, 7 Apr 2019 14:05:30 +0300 +Subject: [PATCH] [vdsm] drop explicit size limiting + +--- + sos/plugins/vdsm.py | 4 +--- + 1 file changed, 1 insertion(+), 3 deletions(-) + +diff --git a/sos/plugins/vdsm.py b/sos/plugins/vdsm.py +index 913d49a53..2dc4b6bea 100644 +--- a/sos/plugins/vdsm.py ++++ b/sos/plugins/vdsm.py +@@ -72,9 +72,7 @@ def setup(self): + '/etc/vdsm/*' + ]) + +- limit = self.get_option('log_size') +- +- self.add_copy_spec('/var/log/vdsm/*', sizelimit=limit) ++ self.add_copy_spec('/var/log/vdsm/*') + + self._add_vdsm_forbidden_paths() + self.add_copy_spec([ +From cfaf930e58f4996919d0da6c356135cfce26dacb Mon Sep 17 00:00:00 2001 +From: Irit goihman <igoihman@redhat.com> +Date: Sun, 7 Apr 2019 14:13:59 +0300 +Subject: [PATCH] [vdsm] change filter + +--- + sos/plugins/vdsm.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/sos/plugins/vdsm.py b/sos/plugins/vdsm.py +index 2dc4b6bea..ab5c6130b 100644 +--- a/sos/plugins/vdsm.py ++++ b/sos/plugins/vdsm.py +@@ -43,7 +43,7 @@ + ignore_suspended_devices=1 + write_cache_state=0 + disable_after_error_count=3 +- filter=["a|^/dev/mapper/.*|", "r|.*|"] ++ filter=["a|^/dev/disk/by-id/dm-uuid-mpath-|", "r|.+|"] + } + """ + LVM_CONFIG = re.sub(r"\s+", " ", LVM_CONFIG).strip() +From 2ebc04da53dc871c8dd5243567afa4f8592dca29 Mon Sep 17 00:00:00 2001 +From: Irit goihman <igoihman@redhat.com> +Date: Sun, 7 Apr 2019 14:14:32 +0300 +Subject: [PATCH] [vdsm] capture supervdsmd status + +--- + sos/plugins/vdsm.py | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/sos/plugins/vdsm.py b/sos/plugins/vdsm.py +index ab5c6130b..ae9c17c96 100644 +--- a/sos/plugins/vdsm.py ++++ b/sos/plugins/vdsm.py +@@ -65,6 +65,7 @@ def setup(self): + self.add_forbidden_path('/etc/pki/libvirt/private/*') + + self.add_cmd_output('service vdsmd status') ++ self.add_cmd_output('service supervdsmd status') + + self.add_copy_spec([ + '/tmp/vds_installer*', diff --git a/SOURCES/sos-bz1711305-katello-qpid-certificate.patch b/SOURCES/sos-bz1711305-katello-qpid-certificate.patch new file mode 100644 index 0000000..3ca559a --- /dev/null +++ b/SOURCES/sos-bz1711305-katello-qpid-certificate.patch @@ -0,0 +1,48 @@ +From 166f712eb447f54f0e2c5396ea25f5bc11e3f519 Mon Sep 17 00:00:00 2001 +From: Pavel Moravec <pmoravec@redhat.com> +Date: Fri, 17 May 2019 15:55:05 +0200 +Subject: [PATCH] [katello] support both locations of qpid SSL certs + +Newer katello versions deploy certs for qpid to +/etc/pki/pulp/qpid/client.crt certs instead of +/etc/pki/katello/qpid_client_striped.crt . + +Sosreport should use either of the location that exists, to successfully +run few qpid-stat commands. + +Resolves: #1680 + +Signed-off-by: Pavel Moravec <pmoravec@redhat.com> +--- + sos/plugins/katello.py | 8 +++++++- + 1 file changed, 7 insertions(+), 1 deletion(-) + +diff --git a/sos/plugins/katello.py b/sos/plugins/katello.py +index 0794fb4c..1ea52da8 100644 +--- a/sos/plugins/katello.py ++++ b/sos/plugins/katello.py +@@ -10,6 +10,7 @@ + + from sos.plugins import Plugin, RedHatPlugin + from pipes import quote ++import os.path + + + class Katello(Plugin, RedHatPlugin): +@@ -24,7 +25,12 @@ class Katello(Plugin, RedHatPlugin): + "/var/log/httpd/katello-reverse-proxy_error_ssl.log*" + ]) + +- cert = "/etc/pki/katello/qpid_client_striped.crt" ++ # certificate file location relies on katello version, it can be either ++ # /etc/pki/katello/qpid_client_striped.crt (for older versions) or ++ # /etc/pki/pulp/qpid/client.crt (for newer versions) ++ cert = "/etc/pki/pulp/qpid/client.crt" ++ if not os.path.isfile(cert): ++ cert = "/etc/pki/katello/qpid_client_striped.crt" + self.add_cmd_output([ + "qpid-stat -%s --ssl-certificate=%s -b amqps://localhost:5671" % + (opt, cert) for opt in "quc" +-- +2.17.2 + diff --git a/SOURCES/sos-bz1715470-rhv-postgres-from-scl.patch b/SOURCES/sos-bz1715470-rhv-postgres-from-scl.patch deleted file mode 100644 index 48fd560..0000000 --- a/SOURCES/sos-bz1715470-rhv-postgres-from-scl.patch +++ /dev/null @@ -1,489 +0,0 @@ -From 6db459e2b21a798d93cc79e705e8e02f1bbd24c1 Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker <jhunsake@redhat.com> -Date: Tue, 24 Jul 2018 17:40:25 -0400 -Subject: [PATCH] [Policies|Plugins] Add services member - -Adds a services member to facilitate plugin enablement. This is tied to -a new InitSystem class that gets attached to policies. The InitSystem -class is used to determine services that are present on the system and -what those service statuses currently are (e.g. enabled/disable). - -Plugins can now specify a set of services to enable the plugin on if -that service exists on the system, similar to the file, command, and -package checks. - -Additionally, the Plugin class now has methods to check on service -states, and make decisions based off of. For example: - - def setup(self): - if self.is_service('foobar'): - self.add_cmd_output('barfoo') - -Currently, only systemd has actual functionality for this. The base -InitSystem inherited by policies by default will always return False for -service checks, thus resulting in the same behavior as before this -change. - -The Red Hat family of distributions has been set to systemd, as all -current versions of those distributions use systemd. - -Closes: #83 -Resolves: #1387 - -Signed-off-by: Jake Hunsaker <jhunsake@redhat.com> -Signed-off-by: Bryn M. Reeves <bmr@redhat.com> ---- - sos/plugins/__init__.py | 31 +++++++++-- - sos/policies/__init__.py | 115 ++++++++++++++++++++++++++++++++++++++- - sos/policies/redhat.py | 1 + - 3 files changed, 142 insertions(+), 5 deletions(-) - -diff --git a/sos/plugins/__init__.py b/sos/plugins/__init__.py -index 82fef18e5..252de4d05 100644 ---- a/sos/plugins/__init__.py -+++ b/sos/plugins/__init__.py -@@ -123,6 +123,7 @@ class Plugin(object): - files = () - commands = () - kernel_mods = () -+ services = () - archive = None - profiles = () - sysroot = '/' -@@ -202,6 +203,22 @@ def is_installed(self, package_name): - '''Is the package $package_name installed?''' - return self.policy.pkg_by_name(package_name) is not None - -+ def is_service(self, name): -+ '''Does the service $name exist on the system?''' -+ return self.policy.init_system.is_service(name) -+ -+ def service_is_enabled(self, name): -+ '''Is the service $name enabled?''' -+ return self.policy.init_system.is_enabled(name) -+ -+ def service_is_disabled(self, name): -+ '''Is the service $name disabled?''' -+ return self.policy.init_system.is_disabled(name) -+ -+ def get_service_status(self, name): -+ '''Return the reported status for service $name''' -+ return self.policy.init_system.get_service_status(name) -+ - def do_cmd_private_sub(self, cmd): - '''Remove certificate and key output archived by sosreport. cmd - is the command name from which output is collected (i.e. exlcuding -@@ -977,7 +994,8 @@ def check_enabled(self): - overridden. - """ - # some files or packages have been specified for this package -- if any([self.files, self.packages, self.commands, self.kernel_mods]): -+ if any([self.files, self.packages, self.commands, self.kernel_mods, -+ self.services]): - if isinstance(self.files, six.string_types): - self.files = [self.files] - -@@ -990,6 +1008,9 @@ def check_enabled(self): - if isinstance(self.kernel_mods, six.string_types): - self.kernel_mods = [self.kernel_mods] - -+ if isinstance(self.services, six.string_types): -+ self.services = [self.services] -+ - if isinstance(self, SCLPlugin): - # save SCLs that match files or packages - type(self)._scls_matched = [] -@@ -1005,7 +1026,8 @@ def check_enabled(self): - - return self._files_pkgs_or_cmds_present(self.files, - self.packages, -- self.commands) -+ self.commands, -+ self.services) - - if isinstance(self, SCLPlugin): - # if files and packages weren't specified, we take all SCLs -@@ -1013,7 +1035,7 @@ def check_enabled(self): - - return True - -- def _files_pkgs_or_cmds_present(self, files, packages, commands): -+ def _files_pkgs_or_cmds_present(self, files, packages, commands, services): - kernel_mods = self.policy.lsmod() - - def have_kmod(kmod): -@@ -1022,7 +1044,8 @@ def have_kmod(kmod): - return (any(os.path.exists(fname) for fname in files) or - any(self.is_installed(pkg) for pkg in packages) or - any(is_executable(cmd) for cmd in commands) or -- any(have_kmod(kmod) for kmod in self.kernel_mods)) -+ any(have_kmod(kmod) for kmod in self.kernel_mods) or -+ any(self.is_service(svc) for svc in services)) - - def default_enabled(self): - """This decides whether a plugin should be automatically loaded or -diff --git a/sos/policies/__init__.py b/sos/policies/__init__.py -index 65d8aac63..d6255d3ee 100644 ---- a/sos/policies/__init__.py -+++ b/sos/policies/__init__.py -@@ -13,7 +13,8 @@ - - from sos.utilities import (ImporterHelper, - import_module, -- shell_out) -+ shell_out, -+ sos_get_command_output) - from sos.plugins import IndependentPlugin, ExperimentalPlugin - from sos import _sos as _ - from sos import SoSOptions, _arg_names -@@ -49,6 +50,113 @@ def load(cache={}, sysroot=None): - return cache['policy'] - - -+class InitSystem(object): -+ """Encapsulates an init system to provide service-oriented functions to -+ sos. -+ -+ This should be used to query the status of services, such as if they are -+ enabled or disabled on boot, or if the service is currently running. -+ """ -+ -+ def __init__(self, init_cmd=None, list_cmd=None, query_cmd=None): -+ -+ self.services = {} -+ -+ self.init_cmd = init_cmd -+ self.list_cmd = "%s %s" % (self.init_cmd, list_cmd) or None -+ self.query_cmd = "%s %s" % (self.init_cmd, query_cmd) or None -+ -+ self.load_all_services() -+ -+ def is_enabled(self, name): -+ """Check if given service name is enabled """ -+ if self.services and name in self.services: -+ return self.services[name]['config'] == 'enabled' -+ return False -+ -+ def is_disabled(self, name): -+ """Check if a given service name is disabled """ -+ if self.services and name in self.services: -+ return self.services[name]['config'] == 'disabled' -+ return False -+ -+ def is_service(self, name): -+ """Checks if the given service name exists on the system at all, this -+ does not check for the service status -+ """ -+ return name in self.services -+ -+ def load_all_services(self): -+ """This loads all services known to the init system into a dict. -+ The dict should be keyed by the service name, and contain a dict of the -+ name and service status -+ """ -+ pass -+ -+ def _query_service(self, name): -+ """Query an individual service""" -+ if self.query_cmd: -+ res = sos_get_command_output("%s %s" % (self.query_cmd, name)) -+ if res['status'] == 0: -+ return res -+ else: -+ return None -+ return None -+ -+ def parse_query(self, output): -+ """Parses the output returned by the query command to make a -+ determination of what the state of the service is -+ -+ This should be overriden by anything that subclasses InitSystem -+ """ -+ return output -+ -+ def get_service_status(self, name): -+ """Returns the status for the given service name along with the output -+ of the query command -+ """ -+ svc = self._query_service(name) -+ if svc is not None: -+ return {'name': name, -+ 'status': self.parse_query(svc['output']), -+ 'output': svc['output'] -+ } -+ else: -+ return {'name': name, -+ 'status': 'missing', -+ 'output': '' -+ } -+ -+ -+class SystemdInit(InitSystem): -+ -+ def __init__(self): -+ super(SystemdInit, self).__init__( -+ init_cmd='systemctl', -+ list_cmd='list-unit-files --type=service', -+ query_cmd='status' -+ ) -+ -+ def parse_query(self, output): -+ for line in output.splitlines(): -+ if line.strip().startswith('Active:'): -+ return line.split()[1] -+ return 'unknown' -+ -+ def load_all_services(self): -+ svcs = shell_out(self.list_cmd).splitlines() -+ for line in svcs: -+ try: -+ name = line.split('.service')[0] -+ config = line.split()[1] -+ self.services[name] = { -+ 'name': name, -+ 'config': config -+ } -+ except IndexError: -+ pass -+ -+ - class PackageManager(object): - """Encapsulates a package manager. If you provide a query_command to the - constructor it should print each package on the system in the following -@@ -676,11 +784,16 @@ class LinuxPolicy(Policy): - distro = "Linux" - vendor = "None" - PATH = "/bin:/sbin:/usr/bin:/usr/sbin" -+ init = None - - _preferred_hash_name = None - - def __init__(self, sysroot=None): - super(LinuxPolicy, self).__init__(sysroot=sysroot) -+ if self.init == 'systemd': -+ self.init_system = SystemdInit() -+ else: -+ self.init_system = InitSystem() - - def get_preferred_hash_name(self): - -diff --git a/sos/policies/redhat.py b/sos/policies/redhat.py -index 5bfbade28..b494de3c4 100644 ---- a/sos/policies/redhat.py -+++ b/sos/policies/redhat.py -@@ -45,6 +45,7 @@ class RedHatPolicy(LinuxPolicy): - _host_sysroot = '/' - default_scl_prefix = '/opt/rh' - name_pattern = 'friendly' -+ init = 'systemd' - - def __init__(self, sysroot=None): - super(RedHatPolicy, self).__init__(sysroot=sysroot) -From 7b9284e948f2e9076c92741ed5b95fec7934af8d Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker <jhunsake@redhat.com> -Date: Fri, 15 Feb 2019 16:03:53 -0500 -Subject: [PATCH] [policy|plugin] Add 'is_running' check for services - -Adds a method to the InitSystem class used by policies and plugins to -check if a given service name is running. Plugins can make use of this -through the new self.service_is_running() method. - -For policies that use the base InitSystem class, this method will always -return True as the service_is_running() method is likely to be used when -determining if we should run commands or not, and we do not want to -incorrectly stop running those commands where they would collect -meaningful output today. - -The SystemD init system for policies properly checks to see if the given -service is active or not when reporting is the service is running. - -Resolves: #1567 - -Signed-off-by: Jake Hunsaker <jhunsake@redhat.com> -Signed-off-by: Bryn M. Reeves <bmr@redhat.com> ---- - sos/plugins/__init__.py | 6 +++++- - sos/policies/__init__.py | 22 ++++++++++++++++++---- - 2 files changed, 23 insertions(+), 5 deletions(-) - -diff --git a/sos/plugins/__init__.py b/sos/plugins/__init__.py -index 47b028a85..030e7a305 100644 ---- a/sos/plugins/__init__.py -+++ b/sos/plugins/__init__.py -@@ -215,9 +215,13 @@ def service_is_disabled(self, name): - '''Is the service $name disabled?''' - return self.policy.init_system.is_disabled(name) - -+ def service_is_running(self, name): -+ '''Is the service $name currently running?''' -+ return self.policy.init_system.is_running(name) -+ - def get_service_status(self, name): - '''Return the reported status for service $name''' -- return self.policy.init_system.get_service_status(name) -+ return self.policy.init_system.get_service_status(name)['status'] - - def do_cmd_private_sub(self, cmd): - '''Remove certificate and key output archived by sosreport. cmd -diff --git a/sos/policies/__init__.py b/sos/policies/__init__.py -index d6255d3ee..d0b180152 100644 ---- a/sos/policies/__init__.py -+++ b/sos/policies/__init__.py -@@ -86,6 +86,17 @@ def is_service(self, name): - """ - return name in self.services - -+ def is_running(self, name): -+ """Checks if the given service name is in a running state. -+ -+ This should be overridden by initsystems that subclass InitSystem -+ """ -+ # This is going to be primarily used in gating if service related -+ # commands are going to be run or not. Default to always returning -+ # True when an actual init system is not specified by policy so that -+ # we don't inadvertantly restrict sosreports on those systems -+ return True -+ - def load_all_services(self): - """This loads all services known to the init system into a dict. - The dict should be keyed by the service name, and contain a dict of the -@@ -96,10 +107,9 @@ def load_all_services(self): - def _query_service(self, name): - """Query an individual service""" - if self.query_cmd: -- res = sos_get_command_output("%s %s" % (self.query_cmd, name)) -- if res['status'] == 0: -- return res -- else: -+ try: -+ return sos_get_command_output("%s %s" % (self.query_cmd, name)) -+ except Exception: - return None - return None - -@@ -156,6 +166,10 @@ def load_all_services(self): - except IndexError: - pass - -+ def is_running(self, name): -+ svc = self.get_service_status(name) -+ return svc['status'] == 'active' -+ - - class PackageManager(object): - """Encapsulates a package manager. If you provide a query_command to the -From 3e736ad53d254aba8795b3d5d8ce0ec4f827ab1c Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker <jhunsake@redhat.com> -Date: Fri, 8 Feb 2019 13:19:56 -0500 -Subject: [PATCH] [postgresql] Use postgres 10 scl if installed - -Updates the plugin to check if the specified SCL is running, as some -systems may have multiple SCL versions installed, but only one will be -running at a time. We now use the running version for a pgdump. - -This is primarily aimed at RHV environments as 4.3 and later use version -10. - -Signed-off-by: Jake Hunsaker <jhunsake@redhat.com> ---- - sos/plugins/postgresql.py | 17 ++++++++++++++--- - 1 file changed, 14 insertions(+), 3 deletions(-) - -diff --git a/sos/plugins/postgresql.py b/sos/plugins/postgresql.py -index aef431f8a..e641c3b44 100644 ---- a/sos/plugins/postgresql.py -+++ b/sos/plugins/postgresql.py -@@ -80,14 +80,25 @@ def setup(self): - - class RedHatPostgreSQL(PostgreSQL, SCLPlugin): - -- packages = ('postgresql', 'rh-postgresql95-postgresql-server', ) -+ packages = ( -+ 'postgresql', -+ 'rh-postgresql95-postgresql-server', -+ 'rh-postgresql10-postgresql-server' -+ ) - - def setup(self): - super(RedHatPostgreSQL, self).setup() - -- scl = "rh-postgresql95" - pghome = self.get_option("pghome") - -+ scl = None -+ for pkg in self.packages[1:]: -+ # The scl name, package name, and service name all differ slightly -+ # but is at least consistent in doing so across versions, so we -+ # need to do some mangling here -+ if self.service_is_running(pkg.replace('-server', '')): -+ scl = pkg.split('-postgresql-')[0] -+ - # Copy PostgreSQL log files. - for filename in find("*.log", pghome): - self.add_copy_spec(filename) -@@ -111,7 +122,7 @@ def setup(self): - ) - ) - -- if scl in self.scls_matched: -+ if scl and scl in self.scls_matched: - self.do_pg_dump(scl=scl, filename="pgdump-scl-%s.tar" % scl) - - -From 0ba743bbf9df335dd47ec45a450e63d72d7ce494 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec <pmoravec@redhat.com> -Date: Wed, 5 Sep 2018 12:34:48 +0200 -Subject: [PATCH] [plugins] fix 6db459e for SCL services - -Calling _files_pkgs_or_cmds_present for SCLs lacks "services" -argument that was added in 6db459e commit. - -Also it is worth renaming the method to more generic -_check_plugin_triggers . - -Resolves: #1416 - -Signed-off-by: Pavel Moravec <pmoravec@redhat.com> ---- - sos/plugins/__init__.py | 18 ++++++++++-------- - 1 file changed, 10 insertions(+), 8 deletions(-) - -diff --git a/sos/plugins/__init__.py b/sos/plugins/__init__.py -index 97f3cc592..3abe29db6 100644 ---- a/sos/plugins/__init__.py -+++ b/sos/plugins/__init__.py -@@ -1033,16 +1033,18 @@ def check_enabled(self): - files = [f % {"scl_name": scl} for f in self.files] - packages = [p % {"scl_name": scl} for p in self.packages] - commands = [c % {"scl_name": scl} for c in self.commands] -- if self._files_pkgs_or_cmds_present(files, -- packages, -- commands): -+ services = [s % {"scl_name": scl} for s in self.services] -+ if self._check_plugin_triggers(files, -+ packages, -+ commands, -+ services): - type(self)._scls_matched.append(scl) - return len(type(self)._scls_matched) > 0 - -- return self._files_pkgs_or_cmds_present(self.files, -- self.packages, -- self.commands, -- self.services) -+ return self._check_plugin_triggers(self.files, -+ self.packages, -+ self.commands, -+ self.services) - - if isinstance(self, SCLPlugin): - # if files and packages weren't specified, we take all SCLs -@@ -1050,7 +1052,7 @@ def check_enabled(self): - - return True - -- def _files_pkgs_or_cmds_present(self, files, packages, commands, services): -+ def _check_plugin_triggers(self, files, packages, commands, services): - kernel_mods = self.policy.lsmod() - - def have_kmod(kmod): diff --git a/SPECS/sos.spec b/SPECS/sos.spec index 327e908..afd87c7 100644 --- a/SPECS/sos.spec +++ b/SPECS/sos.spec @@ -1,8 +1,8 @@ %{!?python_sitelib: %define python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")} Summary: A set of tools to gather troubleshooting information from a system Name: sos -Version: 3.6 -Release: 19%{?dist} +Version: 3.7 +Release: 5%{?dist} Group: Applications/System Source0: https://github.com/sosreport/sos/archive/%{version}.tar.gz License: GPLv2+ @@ -17,29 +17,17 @@ Requires: bzip2 Requires: xz Requires: python2-futures Obsoletes: sos-plugins-openstack +Conflicts: vdsm <= 4.30.17 Patch0: skip-generating-doc.patch -Patch1: sos-bz1474976-regexp-sub.patch -Patch2: sos-bz1594327-archive-encryption.patch -Patch3: sos-bz1597532-stat-isblk.patch -Patch4: sos-bz1596494-cds-on-rhui3.patch -Patch5: sos-bz1609135-ceph-dont-collect-tmp-mnt.patch -Patch6: sos-bz1608384-archive-name-sanitize.patch -Patch7: sos-bz1613806-rhosp-lsof-optional.patch -Patch8: sos-bz1600158-rhv-log-collector-analyzer.patch -Patch9: sos-bz1616030-etcd-kube-osp-3-10.patch -Patch10: sos-bz1624043-symlinks-not-copied.patch -Patch11: sos-bz1626159-atomic-attribute-error.patch -Patch12: sos-bz1623070-pipe-returncode.patch -Patch13: sos-bz1636093-openstack-relax-enabling-plugins.patch -Patch14: sos-bz1637632-kernel-dont-collect-tracing-instance.patch -Patch15: sos-bz1656732-ovirt_node-plugin.patch -Patch16: sos-bz1658570-docker-podman-containers.patch -Patch17: sos-bz1658571-postgresql-collect-full-dump.patch -Patch18: sos-bz1669045-rhcos-policy-and-plugins.patch -Patch19: sos-bz1679238-crio-plugin.patch -Patch20: sos-bz1690999-docker-skip-system-df.patch -Patch21: sos-bz1715470-rhv-postgres-from-scl.patch -Patch22: sos-3.6-centos-branding.patch +Patch1: sos-bz1656812-bump-release.patch +Patch2: sos-bz1639166-pcp-pmlogger-no-limit.patch +Patch3: sos-bz1697854-plugopts-default-datatypes.patch +Patch4: sos-bz1697813-plugin-vs-command-timeouts.patch +Patch5: sos-bz1311129-sos-conf-disabled-plugins-manpages.patch +Patch6: sos-bz1702802-openstack_instack-ansible-log.patch +Patch7: sos-bz1706060-vdsm-plugin.patch +Patch8: sos-bz1711305-katello-qpid-certificate.patch + %description Sos is a set of tools that gathers information about system @@ -58,20 +46,6 @@ support technicians and developers. %patch6 -p1 %patch7 -p1 %patch8 -p1 -%patch9 -p1 -%patch10 -p1 -%patch11 -p1 -%patch12 -p1 -%patch13 -p1 -%patch14 -p1 -%patch15 -p1 -%patch16 -p1 -%patch17 -p1 -%patch18 -p1 -%patch19 -p1 -%patch20 -p1 -%patch21 -p1 -%patch22 -p1 %build make @@ -95,44 +69,64 @@ rm -rf ${RPM_BUILD_ROOT} %config(noreplace) %{_sysconfdir}/sos.conf %changelog -* Mon Jul 29 2019 CentOS Sources <bugs@centos.org> - 3.6-19.el7.centos -- Roll in CentOS Branding - -* Thu May 30 2019 Pavel Moravec <pmoravec@redhat.com> = 3.6-19 -- [postgresql] Use postgres 10 scl if installed - Resolves: bz1715470 - -* Wed Mar 20 2019 Pavel Moravec <pmoravec@redhat.com> = 3.6-17 +* Tue Jun 25 2019 Pavel Moravec <pmoravec@redhat.com> = 3.7-5 +- Updates to vdsm plugin + Resolves: bz1706060 + +* Thu May 30 2019 Pavel Moravec <pmoravec@redhat.com> = 3.7-4 +- Add conflict to old vdsm package versions + Resolves: bz1706060 + +* Tue May 21 2019 Pavel Moravec <pmoravec@redhat.com> = 3.7-3 +- [katello] support both locations of qpid SSL certs + Resolves: bz1711305 +- [plugins] add vdsm plugin + Resolves: bz1706060 + +* Tue Apr 30 2019 Pavel Moravec <pmoravec@redhat.com> = 3.7-2 +- [sos] bump release + Resolves: bz1656812 +- [pcp] collect pmlogger without a sizelimit + Resolves: bz1639166 +- [maas,mysql,npm,pacemaker,postgresql] fix plugopts data types + Resolves: bz1697854 +- [foreman,satellite] increase plugin default timeouts + Resolves: bz1697813 +- [sosreport] update sos.conf manpages + Resolves: bz1311129 +- [openstack_instack] add ansible.log + Resolves: bz1702802 + +* Wed Mar 27 2019 Pavel Moravec <pmoravec@redhat.com> = 3.7-1 +- New upstream release sos-3.7 + +* Wed Mar 20 2019 Pavel Moravec <pmoravec@redhat.com> = 3.6-15 - [docker] do not collect 'system df' by default - Resolves: bz1690999 - -* Mon Feb 25 2019 Pavel Moravec <pmoravec@redhat.com> = 3.6-16 -- [crio] Add tagging classes - Resolves: bz1679238 + Resolves: bz1690273 -* Wed Feb 20 2019 Pavel Moravec <pmoravec@redhat.com> = 3.6-15 +* Tue Feb 19 2019 Pavel Moravec <pmoravec@redhat.com> = 3.6-14 - [crio] Add new plugin - Resolves: bz1679238 + Resolves: bz1666321 -* Thu Jan 24 2019 Pavel Moravec <pmoravec@redhat.com> = 3.6-14 -- [rhcos,rpmostree] Add RHCOS policy and 2 plugins - Resolves: bz1669045 +* Sun Jan 20 2019 Pavel Moravec <pmoravec@redhat.com> = 3.6-13 +- [policies] Add RHCOS policy and plugins + Resolves: bz1667217 -* Wed Dec 12 2018 Pavel Moravec <pmoravec@redhat.com> = 3.6-13 +* Wed Dec 12 2018 Pavel Moravec <pmoravec@redhat.com> = 3.6-12 - [ovirt_node] New plugin for oVirt Node - Resolves: bz1656732 + Resolves: bz1591433 - [podman] Add support for gathering information on podman - Resolves: bz1658570 + Resolves: bz1646698 - [postgresql] Do not limit dump size - Resolves: bz1658571 + Resolves: bz1656278 * Tue Oct 09 2018 Pavel Moravec <pmoravec@redhat.com> = 3.6-11 - [kernel] dont collect some tracing instance files - Resolves: bz1637632 + Resolves: bz1636333 * Thu Oct 04 2018 Pavel Moravec <pmoravec@redhat.com> = 3.6-10 - [openstack_*] relax enabling of OSP RedHat plugins - Resolves: bz1636093 + Resolves: bz1592909 * Fri Sep 14 2018 Pavel Moravec <pmoravec@redhat.com> = 3.6-9 - [archive] recursive symlink fix and simplify directory destination