diff --git a/.gitignore b/.gitignore index a6e0b84..ef784c7 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,2 @@ -SOURCES/sos-3.6.tar.gz +SOURCES/sos-3.7.tar.gz SOURCES/sos-audit-0.3.tgz diff --git a/.sos.metadata b/.sos.metadata index 5f9c46e..9abd7a0 100644 --- a/.sos.metadata +++ b/.sos.metadata @@ -1,2 +1,2 @@ -aa090f917b4f54421e2ad2294a60fc124ef66a85 SOURCES/sos-3.6.tar.gz +38aa7609e7545eddf709db60fa1523432b268e13 SOURCES/sos-3.7.tar.gz 9d478b9f0085da9178af103078bbf2fd77b0175a SOURCES/sos-audit-0.3.tgz diff --git a/README.debrand b/README.debrand deleted file mode 100644 index 01c46d2..0000000 --- a/README.debrand +++ /dev/null @@ -1,2 +0,0 @@ -Warning: This package was configured for automatic debranding, but the changes -failed to apply. diff --git a/SOURCES/sos-3.6-centos-branding.patch b/SOURCES/sos-3.6-centos-branding.patch deleted file mode 100644 index f88d36e..0000000 --- a/SOURCES/sos-3.6-centos-branding.patch +++ /dev/null @@ -1,105 +0,0 @@ -diff -uNrp sos-3.6.orig/sos/policies/redhat.py sos-3.6/sos/policies/redhat.py ---- sos-3.6.orig/sos/policies/redhat.py 2018-11-04 17:44:59.513116585 +0000 -+++ sos-3.6/sos/policies/redhat.py 2018-11-04 17:53:28.333731059 +0000 -@@ -32,9 +32,9 @@ OS_RELEASE = "/etc/os-release" - - - class RedHatPolicy(LinuxPolicy): -- distro = "Red Hat" -- vendor = "Red Hat" -- vendor_url = "http://www.redhat.com/" -+ distro = "CentOS" -+ vendor = "CentOS" -+ vendor_url = "http://www,centos.org/" - _redhat_release = '/etc/redhat-release' - _tmp_dir = "/var/tmp" - _rpmq_cmd = 'rpm -qa --queryformat "%{NAME}|%{VERSION}|%{RELEASE}\\n"' -@@ -92,9 +92,9 @@ class RedHatPolicy(LinuxPolicy): - - @classmethod - def check(cls): -- """This method checks to see if we are running on Red Hat. It must be -+ """This method checks to see if we are running on CentOS. It must be - overriden by concrete subclasses to return True when running on a -- Fedora, RHEL or other Red Hat distribution or False otherwise.""" -+ Fedora, RHEL or CentOS distribution or False otherwise.""" - return False - - def check_usrmove(self, pkgs): -@@ -185,7 +185,7 @@ class RedHatPolicy(LinuxPolicy): - return self.host_name() - - --# Container environment variables on Red Hat systems. -+# Container environment variables on CentOS systems. - ENV_CONTAINER = 'container' - ENV_HOST_SYSROOT = 'HOST' - -@@ -195,22 +195,22 @@ _opts_all_logs_verify = SoSOptions(all_l - _opts_all_logs_no_lsof = SoSOptions(all_logs=True, - plugopts=['process.lsof=off']) - --RHEL_RELEASE_STR = "Red Hat Enterprise Linux" -+RHEL_RELEASE_STR = "CentOS Linux" - - RHV = "rhv" --RHV_DESC = "Red Hat Virtualization" -+RHV_DESC = "CentOS Virtualization" - - RHEL = "rhel" - RHEL_DESC = RHEL_RELEASE_STR - - RHOSP = "rhosp" --RHOSP_DESC = "Red Hat OpenStack Platform" -+RHOSP_DESC = "RDO" - - RHOCP = "ocp" --RHOCP_DESC = "OpenShift Container Platform by Red Hat" -+RHOCP_DESC = "OpenShift" - - RH_SATELLITE = "satellite" --RH_SATELLITE_DESC = "Red Hat Satellite" -+RH_SATELLITE_DESC = "Satellite" - - NOTE_SIZE = "This preset may increase report size" - NOTE_TIME = "This preset may increase report run time" -@@ -230,9 +230,9 @@ rhel_presets = { - - - class RHELPolicy(RedHatPolicy): -- distro = RHEL_RELEASE_STR -- vendor = "Red Hat" -- vendor_url = "https://access.redhat.com/support/" -+ distro = "CentOS Linux" -+ vendor = "CentOS" -+ vendor_url = "https://wiki.centos.org/" - msg = _("""\ - This command will collect diagnostic and configuration \ - information from this %(distro)s system and installed \ -@@ -262,7 +262,7 @@ No changes will be made to system config - def check(cls): - """Test to see if the running host is a RHEL installation. - -- Checks for the presence of the "Red Hat Enterprise Linux" -+ Checks for the presence of the "CentOS Linux" - release string at the beginning of the NAME field in the - `/etc/os-release` file and returns ``True`` if it is - found, and ``False`` otherwise. -@@ -324,7 +324,7 @@ No changes will be made to system config - - ATOMIC = "atomic" - ATOMIC_RELEASE_STR = "Atomic" --ATOMIC_DESC = "Red Hat Enterprise Linux Atomic Host" -+ATOMIC_DESC = "CentOS Linux Atomic Host" - - atomic_presets = { - ATOMIC: PresetDefaults(name=ATOMIC, desc=ATOMIC_DESC, note=NOTE_TIME, -@@ -333,7 +333,7 @@ atomic_presets = { - - - class RedHatAtomicPolicy(RHELPolicy): -- distro = "Red Hat Atomic Host" -+ distro = "CentOS Atomic Host" - msg = _("""\ - This command will collect diagnostic and configuration \ - information from this %(distro)s system. diff --git a/SOURCES/sos-bz1599701-regexp-sub.patch b/SOURCES/sos-bz1599701-regexp-sub.patch deleted file mode 100644 index 7ffcd64..0000000 --- a/SOURCES/sos-bz1599701-regexp-sub.patch +++ /dev/null @@ -1,66 +0,0 @@ -From b96bdab03f06408e162b1733b20e8ba9fbf8e012 Mon Sep 17 00:00:00 2001 -From: "Bryn M. Reeves" -Date: Mon, 2 Jul 2018 12:01:04 +0100 -Subject: [PATCH] [archive] fix add_string()/do_*_sub() regression - -A change in the handling of add_string() operations in the archive -class causes the Plugin string substitution methods to fail (since -the archive was enforcing a check that the path did not already -exist - for substitutions this is always the case). - -Maintain the check for content that is being copied into the -archive anew, but make the add_string() method override this and -disable the existence checks. - -Signed-off-by: Bryn M. Reeves ---- - sos/archive.py | 14 ++++++++++---- - tests/archive_tests.py | 12 ++---------- - 2 files changed, 12 insertions(+), 14 deletions(-) - -diff --git a/sos/archive.py b/sos/archive.py -index d53baf41..e153c09a 100644 ---- a/sos/archive.py -+++ b/sos/archive.py -@@ -158,7 +158,7 @@ class FileCacheArchive(Archive): - name = name.lstrip(os.sep) - return (os.path.join(self._archive_root, name)) - -- def _check_path(self, src, path_type, dest=None): -+ def _check_path(self, src, path_type, dest=None, force=False): - """Check a new destination path in the archive. - - Since it is possible for multiple plugins to collect the same -@@ -185,6 +185,7 @@ class FileCacheArchive(Archive): - :param src: the source path to be copied to the archive - :param path_type: the type of object to be copied - :param dest: an optional destination path -+ :param force: force file creation even if the path exists - :returns: An absolute destination path if the path should be - copied now or `None` otherwise - """ -@@ -208,6 +209,9 @@ class FileCacheArchive(Archive): - stat.ISSOCK(mode) - ]) - -+ if force: -+ return dest -+ - # Check destination path presence and type - if os.path.exists(dest): - # Use lstat: we care about the current object, not the referent. -@@ -274,9 +278,11 @@ class FileCacheArchive(Archive): - with self._path_lock: - src = dest - -- dest = self._check_path(dest, P_FILE) -- if not dest: -- return -+ # add_string() is a special case: it must always take precedence -+ # over any exixting content in the archive, since it is used by -+ # the Plugin postprocessing hooks to perform regex substitution -+ # on file content. -+ dest = self._check_path(dest, P_FILE, force=True) - - f = codecs.open(dest, 'w', encoding='utf-8') - if isinstance(content, bytes): diff --git a/SOURCES/sos-bz1599739-cryptsetup-luksdump.patch b/SOURCES/sos-bz1599739-cryptsetup-luksdump.patch deleted file mode 100644 index 1bdeb3f..0000000 --- a/SOURCES/sos-bz1599739-cryptsetup-luksdump.patch +++ /dev/null @@ -1,55 +0,0 @@ -From 86e6843a61758fc17b13286c0c928efb97d15227 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Sat, 28 Jul 2018 09:45:49 +0200 -Subject: [PATCH] [block] collect luksDump for all encrypted devices - -Call "cryptsetup luksDump /dev/sd*" for all encrypted devices - -Resolves: #1390 - -Signed-off-by: Pavel Moravec ---- - sos/plugins/block.py | 22 ++++++++++++++++++++++ - 1 file changed, 22 insertions(+) - -diff --git a/sos/plugins/block.py b/sos/plugins/block.py -index 3a2d14d3..059686c5 100644 ---- a/sos/plugins/block.py -+++ b/sos/plugins/block.py -@@ -19,6 +19,22 @@ class Block(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin): - verify_packages = ('util-linux',) - files = ('/sys/block',) - -+ def get_luks_devices(self, lsblk_file): -+ out = [] -+ try: -+ lsblk_out = open(lsblk_file).read() -+ except IOError: -+ return out -+ for line in lsblk_out.splitlines(): -+ # find in output lines like -+ # |-sda2 crypto_LUKS -+ # and separate device name - it will be 1st string on the line -+ # after first '-' -+ if 'crypto_LUKS' in line: -+ dev = line.split()[0].split('-', 1)[1] -+ out.append(dev) -+ return out -+ - def setup(self): - self.add_cmd_output([ - "lsblk", -@@ -51,4 +67,10 @@ class Block(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin): - "fdisk -l %s" % disk_path - ]) - -+ lsblk_file = self.get_cmd_output_now("lsblk -f -a") -+ # for LUKS devices, collect cryptsetup luksDump -+ if lsblk_file: -+ for dev in self.get_luks_devices(lsblk_file): -+ self.add_cmd_output('cryptsetup luksDump /dev/%s' % dev) -+ - # vim: set et ts=4 sw=4 : --- -2.17.1 - diff --git a/SOURCES/sos-bz1607630-gssproxy-update-krb5.patch b/SOURCES/sos-bz1607630-gssproxy-update-krb5.patch deleted file mode 100644 index c19923c..0000000 --- a/SOURCES/sos-bz1607630-gssproxy-update-krb5.patch +++ /dev/null @@ -1,132 +0,0 @@ -From 0846ca08eb9e40125fe804d4886532980f9a0f6e Mon Sep 17 00:00:00 2001 -From: Robbie Harwood -Date: Mon, 23 Jul 2018 16:20:47 -0400 -Subject: [PATCH 1/2] [krb5] Add more files to krb5 plugin - -Add files for KDC configuration and logging, including kadmin files -and a klist of the K/M stash. - -Gather any additional configuration snippet files in -/etc/krb5.conf.d (which is configured by default on Fedora- and -RHEL-like systems, and hopefully on Debian systems in the future). -The sssd plugin already takes care of -/var/lib/sss/pubconf/krb5.include.d/, so don't include that. - -Resolves: #1385 - -Signed-off-by: Robbie Harwood -Signed-off-by: Bryn M. Reeves ---- - sos/plugins/krb5.py | 26 ++++++++++++++++++++++---- - 1 file changed, 22 insertions(+), 4 deletions(-) - -diff --git a/sos/plugins/krb5.py b/sos/plugins/krb5.py -index 3764f4ef..04d8018c 100644 ---- a/sos/plugins/krb5.py -+++ b/sos/plugins/krb5.py -@@ -1,4 +1,4 @@ --# Copyright (C) 2013 Red Hat, Inc., Bryn M. Reeves -+# Copyright (C) 2013,2018 Red Hat, Inc., Bryn M. Reeves - - # This file is part of the sos project: https://github.com/sosreport/sos - # -@@ -8,19 +8,37 @@ - # - # See the LICENSE file in the source distribution for further information. - --from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin -+from sos.plugins import Plugin, RedHatPlugin - - --class Krb5(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin): -+class Krb5(Plugin): - """Kerberos authentication - """ - plugin_name = 'krb5' - profiles = ('identity', 'system') - packages = ('krb5-libs', 'krb5-user') - -+ # This is Debian's default, which is closest to upstream's -+ kdcdir = "/var/lib/krb5kdc" -+ - def setup(self): -- self.add_copy_spec("/etc/krb5.conf") -+ self.add_copy_spec([ -+ "/etc/krb5.conf", -+ "/etc/krb5.conf.d/*", -+ "%s/kadm5.acl" % self.kdcdir, -+ "%s/kdc.conf" % self.kdcdir, -+ "/var/log/krb5kdc.log", -+ "/var/log/kadmind.log" -+ ]) -+ self.add_cmd_output("klist -ket %s/.k5*" % self.kdcdir) - self.add_cmd_output("klist -ket /etc/krb5.keytab") - - -+class RedHatKrb5(Krb5, RedHatPlugin): -+ -+ def setup(self): -+ self.kdcdir = "/var/kerberos/krb5kdc" -+ super(RedHatKrb5, self).setup() -+ -+ - # vim: set et ts=4 sw=4 : --- -2.17.1 - - -From b30bf75847791d85d0e6e51a9b526b2bc93fc38e Mon Sep 17 00:00:00 2001 -From: Robbie Harwood -Date: Mon, 23 Jul 2018 16:53:02 -0400 -Subject: [PATCH 2/2] [plugins] Add plugin for gssproxy - -gssproxy stores its configuration in /etc/gssproxy. Also capture the -mech configuration so that we can tell if gssproxy is enabled and any -other GSS mechs in use. - -Resolves: #1386 - -Signed-off-by: Robbie Harwood -Signed-off-by: Bryn M. Reeves ---- - sos/plugins/gssproxy.py | 28 ++++++++++++++++++++++++++++ - 1 file changed, 28 insertions(+) - create mode 100644 sos/plugins/gssproxy.py - -diff --git a/sos/plugins/gssproxy.py b/sos/plugins/gssproxy.py -new file mode 100644 -index 00000000..7fdde14d ---- /dev/null -+++ b/sos/plugins/gssproxy.py -@@ -0,0 +1,28 @@ -+# Copyright (C) 2018 Red Hat, Inc., Robbie Harwood -+ -+# This file is part of the sos project: https://github.com/sosreport/sos -+# -+# This copyrighted material is made available to anyone wishing to use, -+# modify, copy, or redistribute it subject to the terms and conditions of -+# version 2 of the GNU General Public License. -+# -+# See the LICENSE file in the source distribution for further information. -+ -+from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin -+ -+ -+class GSSProxy(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin): -+ """GSSAPI Proxy -+ """ -+ -+ plugin_name = "gssproxy" -+ profiles = ('services', 'security', 'identity') -+ packages = ('gssproxy',) -+ -+ def setup(self): -+ self.add_copy_spec([ -+ "/etc/gssproxy/*.conf", -+ "/etc/gss/mech.d/*" -+ ]) -+ -+# vim: set et ts=4 sw=4 : --- -2.17.1 - diff --git a/SOURCES/sos-bz1614952-archive-encryption.patch b/SOURCES/sos-bz1614952-archive-encryption.patch deleted file mode 100644 index 51c419f..0000000 --- a/SOURCES/sos-bz1614952-archive-encryption.patch +++ /dev/null @@ -1,262 +0,0 @@ -From 7b475f1da0f843b20437896737be04cc1c7bbc0a Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Fri, 25 May 2018 13:38:27 -0400 -Subject: [PATCH] [sosreport] Add mechanism to encrypt final archive - -Adds an option to encrypt the resulting archive that sos generates. -There are two methods for doing so: - - --encrypt-key Uses a key-pair for asymmetric encryption - --encrypt-pass Uses a password for symmetric encryption - -For key-pair encryption, the key-to-be-used must be imported into the -root user's keyring, as gpg does not allow for the use of keyfiles. - -If the encryption process fails, sos will not abort as the unencrypted -archive will have already been created. The assumption being that the -archive is still of use and/or the user has another means of encrypting -it. - -Resolves: #1320 - -Signed-off-by: Jake Hunsaker -Signed-off-by: Bryn M. Reeves ---- - man/en/sosreport.1 | 28 ++++++++++++++++++++++ - sos/__init__.py | 10 ++++---- - sos/archive.py | 63 ++++++++++++++++++++++++++++++++++++++++++++++---- - sos/sosreport.py | 20 ++++++++++++++-- - tests/archive_tests.py | 3 ++- - 5 files changed, 113 insertions(+), 11 deletions(-) - -diff --git a/man/en/sosreport.1 b/man/en/sosreport.1 -index b0adcd8bb..b6051edc1 100644 ---- a/man/en/sosreport.1 -+++ b/man/en/sosreport.1 -@@ -22,6 +22,8 @@ sosreport \- Collect and package diagnostic and support data - [--log-size]\fR - [--all-logs]\fR - [-z|--compression-type method]\fR -+ [--encrypt-key KEY]\fR -+ [--encrypt-pass PASS]\fR - [--experimental]\fR - [-h|--help]\fR - -@@ -120,6 +122,32 @@ increase the size of reports. - .B \-z, \--compression-type METHOD - Override the default compression type specified by the active policy. - .TP -+.B \--encrypt-key KEY -+Encrypts the resulting archive that sosreport produces using GPG. KEY must be -+an existing key in the user's keyring as GPG does not allow for keyfiles. -+KEY can be any value accepted by gpg's 'recipient' option. -+ -+Note that the user running sosreport must match the user owning the keyring -+from which keys will be obtained. In particular this means that if sudo is -+used to run sosreport, the keyring must also be set up using sudo -+(or direct shell access to the account). -+ -+Users should be aware that encrypting the final archive will result in sos -+using double the amount of temporary disk space - the encrypted archive must be -+written as a separate, rather than replacement, file within the temp directory -+that sos writes the archive to. However, since the encrypted archive will be -+the same size as the original archive, there is no additional space consumption -+once the temporary directory is removed at the end of execution. -+ -+This means that only the encrypted archive is present on disk after sos -+finishes running. -+ -+If encryption fails for any reason, the original unencrypted archive is -+preserved instead. -+.TP -+.B \--encrypt-pass PASS -+The same as \--encrypt-key, but use the provided PASS for symmetric encryption -+rather than key-pair encryption. - .TP - .B \--batch - Generate archive without prompting for interactive input. -diff --git a/sos/__init__.py b/sos/__init__.py -index ef4524c60..cd9779bdc 100644 ---- a/sos/__init__.py -+++ b/sos/__init__.py -@@ -45,10 +45,10 @@ def _default(msg): - _arg_names = [ - 'add_preset', 'alloptions', 'all_logs', 'batch', 'build', 'case_id', - 'chroot', 'compression_type', 'config_file', 'desc', 'debug', 'del_preset', -- 'enableplugins', 'experimental', 'label', 'list_plugins', 'list_presets', -- 'list_profiles', 'log_size', 'noplugins', 'noreport', 'note', -- 'onlyplugins', 'plugopts', 'preset', 'profiles', 'quiet', 'sysroot', -- 'threads', 'tmp_dir', 'verbosity', 'verify' -+ 'enableplugins', 'encrypt_key', 'encrypt_pass', 'experimental', 'label', -+ 'list_plugins', 'list_presets', 'list_profiles', 'log_size', 'noplugins', -+ 'noreport', 'note', 'onlyplugins', 'plugopts', 'preset', 'profiles', -+ 'quiet', 'sysroot', 'threads', 'tmp_dir', 'verbosity', 'verify' - ] - - #: Arguments with non-zero default values -@@ -84,6 +84,8 @@ class SoSOptions(object): - del_preset = "" - desc = "" - enableplugins = [] -+ encrypt_key = None -+ encrypt_pass = None - experimental = False - label = "" - list_plugins = False -diff --git a/sos/archive.py b/sos/archive.py -index e153c09ad..263e3dd3f 100644 ---- a/sos/archive.py -+++ b/sos/archive.py -@@ -142,11 +142,12 @@ class FileCacheArchive(Archive): - _archive_root = "" - _archive_name = "" - -- def __init__(self, name, tmpdir, policy, threads): -+ def __init__(self, name, tmpdir, policy, threads, enc_opts): - self._name = name - self._tmp_dir = tmpdir - self._policy = policy - self._threads = threads -+ self.enc_opts = enc_opts - self._archive_root = os.path.join(tmpdir, name) - with self._path_lock: - os.makedirs(self._archive_root, 0o700) -@@ -384,12 +385,65 @@ def finalize(self, method): - os.stat(self._archive_name).st_size)) - self.method = method - try: -- return self._compress() -+ res = self._compress() - except Exception as e: - exp_msg = "An error occurred compressing the archive: " - self.log_error("%s %s" % (exp_msg, e)) - return self.name() - -+ if self.enc_opts['encrypt']: -+ try: -+ return self._encrypt(res) -+ except Exception as e: -+ exp_msg = "An error occurred encrypting the archive:" -+ self.log_error("%s %s" % (exp_msg, e)) -+ return res -+ else: -+ return res -+ -+ def _encrypt(self, archive): -+ """Encrypts the compressed archive using GPG. -+ -+ If encryption fails for any reason, it should be logged by sos but not -+ cause execution to stop. The assumption is that the unencrypted archive -+ would still be of use to the user, and/or that the end user has another -+ means of securing the archive. -+ -+ Returns the name of the encrypted archive, or raises an exception to -+ signal that encryption failed and the unencrypted archive name should -+ be used. -+ """ -+ arc_name = archive.replace("sosreport-", "secured-sosreport-") -+ arc_name += ".gpg" -+ enc_cmd = "gpg --batch -o %s " % arc_name -+ env = None -+ if self.enc_opts["key"]: -+ # need to assume a trusted key here to be able to encrypt the -+ # archive non-interactively -+ enc_cmd += "--trust-model always -e -r %s " % self.enc_opts["key"] -+ enc_cmd += archive -+ if self.enc_opts["password"]: -+ # prevent change of gpg options using a long password, but also -+ # prevent the addition of quote characters to the passphrase -+ passwd = "%s" % self.enc_opts["password"].replace('\'"', '') -+ env = {"sos_gpg": passwd} -+ enc_cmd += "-c --passphrase-fd 0 " -+ enc_cmd = "/bin/bash -c \"echo $sos_gpg | %s\"" % enc_cmd -+ enc_cmd += archive -+ r = sos_get_command_output(enc_cmd, timeout=0, env=env) -+ if r["status"] == 0: -+ return arc_name -+ elif r["status"] == 2: -+ if self.enc_opts["key"]: -+ msg = "Specified key not in keyring" -+ else: -+ msg = "Could not read passphrase" -+ else: -+ # TODO: report the actual error from gpg. Currently, we cannot as -+ # sos_get_command_output() does not capture stderr -+ msg = "gpg exited with code %s" % r["status"] -+ raise Exception(msg) -+ - - # Compatibility version of the tarfile.TarFile class. This exists to allow - # compatibility with PY2 runtimes that lack the 'filter' parameter to the -@@ -468,8 +522,9 @@ class TarFileArchive(FileCacheArchive): - method = None - _with_selinux_context = False - -- def __init__(self, name, tmpdir, policy, threads): -- super(TarFileArchive, self).__init__(name, tmpdir, policy, threads) -+ def __init__(self, name, tmpdir, policy, threads, enc_opts): -+ super(TarFileArchive, self).__init__(name, tmpdir, policy, threads, -+ enc_opts) - self._suffix = "tar" - self._archive_name = os.path.join(tmpdir, self.name()) - -diff --git a/sos/sosreport.py b/sos/sosreport.py -index 60802617c..00c3e8110 100644 ---- a/sos/sosreport.py -+++ b/sos/sosreport.py -@@ -316,6 +316,13 @@ def _parse_args(args): - preset_grp.add_argument("--del-preset", type=str, action="store", - help="Delete the named command line preset") - -+ encrypt_grp = parser.add_mutually_exclusive_group() -+ encrypt_grp.add_argument("--encrypt-key", -+ help="Encrypt the final archive using a GPG " -+ "key-pair") -+ encrypt_grp.add_argument("--encrypt-pass", -+ help="Encrypt the final archive using a password") -+ - return parser.parse_args(args) - - -@@ -431,16 +438,25 @@ def get_temp_file(self): - return self.tempfile_util.new() - - def _set_archive(self): -+ enc_opts = { -+ 'encrypt': True if (self.opts.encrypt_pass or -+ self.opts.encrypt_key) else False, -+ 'key': self.opts.encrypt_key, -+ 'password': self.opts.encrypt_pass -+ } -+ - archive_name = os.path.join(self.tmpdir, - self.policy.get_archive_name()) - if self.opts.compression_type == 'auto': - auto_archive = self.policy.get_preferred_archive() - self.archive = auto_archive(archive_name, self.tmpdir, -- self.policy, self.opts.threads) -+ self.policy, self.opts.threads, -+ enc_opts) - - else: - self.archive = TarFileArchive(archive_name, self.tmpdir, -- self.policy, self.opts.threads) -+ self.policy, self.opts.threads, -+ enc_opts) - - self.archive.set_debug(True if self.opts.debug else False) - -diff --git a/tests/archive_tests.py b/tests/archive_tests.py -index b4dd8d0ff..e5b329b5f 100644 ---- a/tests/archive_tests.py -+++ b/tests/archive_tests.py -@@ -19,7 +19,8 @@ class TarFileArchiveTest(unittest.TestCase): - - def setUp(self): - self.tmpdir = tempfile.mkdtemp() -- self.tf = TarFileArchive('test', self.tmpdir, Policy(), 1) -+ enc = {'encrypt': False} -+ self.tf = TarFileArchive('test', self.tmpdir, Policy(), 1, enc) - - def tearDown(self): - shutil.rmtree(self.tmpdir) diff --git a/SOURCES/sos-bz1614953-stat-isblk.patch b/SOURCES/sos-bz1614953-stat-isblk.patch deleted file mode 100644 index 6200ffd..0000000 --- a/SOURCES/sos-bz1614953-stat-isblk.patch +++ /dev/null @@ -1,36 +0,0 @@ -From 4127d02f00561b458398ce2b5ced7ae853b23227 Mon Sep 17 00:00:00 2001 -From: Bryan Quigley -Date: Mon, 2 Jul 2018 16:48:21 -0400 -Subject: [PATCH] [archive] fix stat typo - -They're just missing the S_ in front of them so if that code gets -reached it fails. - -Fixes: #1373 -Resolves: #1374 - -Signed-off-by: Bryan Quigley -Signed-off-by: Bryn M. Reeves ---- - sos/archive.py | 8 ++++---- - 1 file changed, 4 insertions(+), 4 deletions(-) - -diff --git a/sos/archive.py b/sos/archive.py -index 263e3dd3f..fdf6f9a80 100644 ---- a/sos/archive.py -+++ b/sos/archive.py -@@ -204,10 +204,10 @@ def _check_path(self, src, path_type, dest=None, force=False): - - def is_special(mode): - return any([ -- stat.ISBLK(mode), -- stat.ISCHR(mode), -- stat.ISFIFO(mode), -- stat.ISSOCK(mode) -+ stat.S_ISBLK(mode), -+ stat.S_ISCHR(mode), -+ stat.S_ISFIFO(mode), -+ stat.S_ISSOCK(mode) - ]) - - if force: diff --git a/SOURCES/sos-bz1614954-cds-on-rhui3.patch b/SOURCES/sos-bz1614954-cds-on-rhui3.patch deleted file mode 100644 index 5c55040..0000000 --- a/SOURCES/sos-bz1614954-cds-on-rhui3.patch +++ /dev/null @@ -1,33 +0,0 @@ -From 62f4affbc9fb6da06dd5707e9aa659d206352e87 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Tue, 3 Jul 2018 13:02:09 +0200 -Subject: [PATCH] [rhui] Fix detection of CDS for RHUI3 - -Detection of CDS node on RHUI 3 cant rely on deprecated pulp-cds package -but rather on rhui-mirrorlist one. - -Resolves: #1375 - -Signed-off-by: Pavel Moravec -Signed-off-by: Bryn M. Reeves ---- - sos/plugins/rhui.py | 6 +++++- - 1 file changed, 5 insertions(+), 1 deletion(-) - -diff --git a/sos/plugins/rhui.py b/sos/plugins/rhui.py -index 2b1e2baa7..459a89831 100644 ---- a/sos/plugins/rhui.py -+++ b/sos/plugins/rhui.py -@@ -22,7 +22,11 @@ class Rhui(Plugin, RedHatPlugin): - files = [rhui_debug_path] - - def setup(self): -- if self.is_installed("pulp-cds"): -+ cds_installed = [ -+ self.is_installed("pulp-cds"), -+ self.is_installed("rhui-mirrorlist") -+ ] -+ if any(cds_installed): - cds = "--cds" - else: - cds = "" diff --git a/SOURCES/sos-bz1614955-ceph-dont-collect-tmp-mnt.patch b/SOURCES/sos-bz1614955-ceph-dont-collect-tmp-mnt.patch deleted file mode 100644 index 400c654..0000000 --- a/SOURCES/sos-bz1614955-ceph-dont-collect-tmp-mnt.patch +++ /dev/null @@ -1,44 +0,0 @@ -From dfed1abf3cac691cfc669bbf4e07e58e2e637776 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Fri, 27 Jul 2018 08:27:45 +0200 -Subject: [PATCH] [apparmor,ceph] fix typo in add_forbidden_path - -commit 29a40b7 removed leading '/' from two forbidden paths - -Resolves: #1388 - -Signed-off-by: Pavel Moravec ---- - sos/plugins/apparmor.py | 2 +- - sos/plugins/ceph.py | 2 +- - 2 files changed, 2 insertions(+), 2 deletions(-) - -diff --git a/sos/plugins/apparmor.py b/sos/plugins/apparmor.py -index c4c64baf..e239c0b5 100644 ---- a/sos/plugins/apparmor.py -+++ b/sos/plugins/apparmor.py -@@ -26,7 +26,7 @@ class Apparmor(Plugin, UbuntuPlugin): - self.add_forbidden_path([ - "/etc/apparmor.d/cache", - "/etc/apparmor.d/libvirt/libvirt*", -- "etc/apparmor.d/abstractions" -+ "/etc/apparmor.d/abstractions" - ]) - - self.add_cmd_output([ -diff --git a/sos/plugins/ceph.py b/sos/plugins/ceph.py -index 10e48b62..ed6816b2 100644 ---- a/sos/plugins/ceph.py -+++ b/sos/plugins/ceph.py -@@ -77,7 +77,7 @@ class Ceph(Plugin, RedHatPlugin, UbuntuPlugin): - "/var/lib/ceph/mon/*", - # Excludes temporary ceph-osd mount location like - # /var/lib/ceph/tmp/mnt.XXXX from sos collection. -- "var/lib/ceph/tmp/*mnt*", -+ "/var/lib/ceph/tmp/*mnt*", - "/etc/ceph/*bindpass*" - ]) - --- -2.17.1 - diff --git a/SOURCES/sos-bz1614956-archive-name-sanitize.patch b/SOURCES/sos-bz1614956-archive-name-sanitize.patch deleted file mode 100644 index 4c48384..0000000 --- a/SOURCES/sos-bz1614956-archive-name-sanitize.patch +++ /dev/null @@ -1,52 +0,0 @@ -From bc650cd161548159e551ddc201596bf19b1865d0 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Fri, 27 Jul 2018 08:56:37 +0200 -Subject: [PATCH] [policies] sanitize report label - -similarly like we sanitize case id, we should sanitize report label -to e.g. exclude spaces from final tarball name. - -Resolves: #1389 - -Signed-off-by: Pavel Moravec ---- - sos/policies/__init__.py | 9 +++------ - 1 file changed, 3 insertions(+), 6 deletions(-) - -diff --git a/sos/policies/__init__.py b/sos/policies/__init__.py -index 7b301dec..65d8aac6 100644 ---- a/sos/policies/__init__.py -+++ b/sos/policies/__init__.py -@@ -408,7 +408,7 @@ No changes will be made to system configuration. - date=date, - rand=rand - ) -- return time.strftime(nstr) -+ return self.sanitize_filename(time.strftime(nstr)) - - # for some specific binaries like "xz", we need to determine package - # providing it; that is policy specific. By default return the binary -@@ -726,8 +726,8 @@ class LinuxPolicy(Policy): - """Returns the name usd in the pre_work step""" - return self.host_name() - -- def sanitize_case_id(self, case_id): -- return re.sub(r"[^-a-z,A-Z.0-9]", "", case_id) -+ def sanitize_filename(self, name): -+ return re.sub(r"[^-a-z,A-Z.0-9]", "", name) - - def lsmod(self): - """Return a list of kernel module names as strings. -@@ -755,9 +755,6 @@ class LinuxPolicy(Policy): - if cmdline_opts.case_id: - self.case_id = cmdline_opts.case_id - -- if self.case_id: -- self.case_id = self.sanitize_case_id(self.case_id) -- - return - - --- -2.17.1 - diff --git a/SOURCES/sos-bz1614957-rhosp-lsof-optional.patch b/SOURCES/sos-bz1614957-rhosp-lsof-optional.patch deleted file mode 100644 index 9a555bb..0000000 --- a/SOURCES/sos-bz1614957-rhosp-lsof-optional.patch +++ /dev/null @@ -1,113 +0,0 @@ -From a55680e6c8ac87fdf4ee3100717001c1f6f6a08b Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Thu, 9 Aug 2018 08:59:53 +0200 -Subject: [PATCH 1/3] [process] make lsof execution optional - -Make calling of lsof command optional (but enabled by default). - -Also remove "collect lsof-threads when --all-logs" as all-logs -has nothing in common. - -Resolves: #1394 - -Signed-off-by: Pavel Moravec ---- - sos/plugins/process.py | 6 ++++-- - 1 file changed, 4 insertions(+), 2 deletions(-) - -diff --git a/sos/plugins/process.py b/sos/plugins/process.py -index 755eec8d..d1c455a5 100644 ---- a/sos/plugins/process.py -+++ b/sos/plugins/process.py -@@ -17,6 +17,7 @@ class Process(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin): - profiles = ('system',) - - option_list = [ -+ ("lsof", "gathers information on all open files", "slow", True), - ("lsof-threads", "gathers threads' open file info if supported", - "slow", False) - ] -@@ -35,9 +36,10 @@ class Process(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin): - - self.add_cmd_output("ps auxwww", root_symlink="ps") - self.add_cmd_output("pstree", root_symlink="pstree") -- self.add_cmd_output("lsof -b +M -n -l -c ''", root_symlink="lsof") -+ if self.get_option("lsof"): -+ self.add_cmd_output("lsof -b +M -n -l -c ''", root_symlink="lsof") - -- if self.get_option("lsof-threads") or self.get_option("all_logs"): -+ if self.get_option("lsof-threads"): - self.add_cmd_output("lsof -b +M -n -l") - - self.add_cmd_output([ --- -2.17.1 - -From 48a1a00685c680ba9fbd5c9b10377e8d0551a926 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Thu, 9 Aug 2018 18:11:38 +0200 -Subject: [PATCH 2/3] [policies] RHOSP preset with -k process.lsof=off - -Make lsof calls on OSP systems disabled by default. - -Relevant to: #1395 - -Signed-off-by: Pavel Moravec ---- - sos/policies/redhat.py | 4 +++- - 1 file changed, 3 insertions(+), 1 deletion(-) - -diff --git a/sos/policies/redhat.py b/sos/policies/redhat.py -index cfbf7808..ee687d46 100644 ---- a/sos/policies/redhat.py -+++ b/sos/policies/redhat.py -@@ -192,6 +192,8 @@ ENV_HOST_SYSROOT = 'HOST' - _opts_verify = SoSOptions(verify=True) - _opts_all_logs = SoSOptions(all_logs=True) - _opts_all_logs_verify = SoSOptions(all_logs=True, verify=True) -+_opts_all_logs_no_lsof = SoSOptions(all_logs=True, -+ plugopts=['process.lsof=off']) - - RHEL_RELEASE_STR = "Red Hat Enterprise Linux" - -@@ -219,7 +221,7 @@ rhel_presets = { - opts=_opts_verify), - RHEL: PresetDefaults(name=RHEL, desc=RHEL_DESC), - RHOSP: PresetDefaults(name=RHOSP, desc=RHOSP_DESC, note=NOTE_SIZE, -- opts=_opts_all_logs), -+ opts=_opts_all_logs_no_lsof), - RHOCP: PresetDefaults(name=RHOCP, desc=RHOCP_DESC, note=NOTE_SIZE_TIME, - opts=_opts_all_logs_verify), - RH_SATELLITE: PresetDefaults(name=RH_SATELLITE, desc=RH_SATELLITE_DESC, --- -2.17.1 - -From 84c30742254a536f70bb4217756416bcf0e8a51b Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Thu, 9 Aug 2018 18:14:56 +0200 -Subject: [PATCH 3/3] [policies] enable RHOSP preset by presence of - rhosp-release package - -Resolves: #1395 - -Signed-off-by: Pavel Moravec ---- - sos/policies/redhat.py | 2 ++ - 1 file changed, 2 insertions(+) - -diff --git a/sos/policies/redhat.py b/sos/policies/redhat.py -index ee687d46..5bfbade2 100644 ---- a/sos/policies/redhat.py -+++ b/sos/policies/redhat.py -@@ -315,6 +315,8 @@ No changes will be made to system configuration. - # Package based checks - if self.pkg_by_name("satellite-common") is not None: - return self.find_preset(RH_SATELLITE) -+ if self.pkg_by_name("rhosp-release") is not None: -+ return self.find_preset(RHOSP) - - # Vanilla RHEL is default - return self.find_preset(RHEL) --- -2.17.1 - diff --git a/SOURCES/sos-bz1619234-proc-sys-selinux-relabelto.patch b/SOURCES/sos-bz1619234-proc-sys-selinux-relabelto.patch deleted file mode 100644 index f38e44b..0000000 --- a/SOURCES/sos-bz1619234-proc-sys-selinux-relabelto.patch +++ /dev/null @@ -1,46 +0,0 @@ -From d5b1d349b868e66a4001c23dae7afa05daaca907 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Wed, 22 Aug 2018 10:35:58 +0200 -Subject: [PATCH] [archive] Dont copystat /sys and /proc paths - -Stop copying extended attributes of files under /sys and /proc -that can raise SELinux denials on that attempt. - -Resolves: #1399 - -Signed-off-by: Pavel Moravec ---- - sos/archive.py | 13 +++++++------ - 1 file changed, 7 insertions(+), 6 deletions(-) - -diff --git a/sos/archive.py b/sos/archive.py -index fdf6f9a8..5d99170f 100644 ---- a/sos/archive.py -+++ b/sos/archive.py -@@ -251,16 +251,17 @@ class FileCacheArchive(Archive): - pass - else: - self.log_info("caught '%s' copying '%s'" % (e, src)) -- try: -- shutil.copystat(src, dest) -- except OSError: -- # SELinux xattrs in /proc and /sys throw this -- pass -+ # copy file attributes, skip SELinux xattrs for /sys and /proc - try: - stat = os.stat(src) -+ if src.startswith("/sys/") or src.startswith("/proc/"): -+ shutil.copymode(src, dest) -+ os.utime(dest, ns=(stat.st_atime_ns, stat.st_mtime_ns)) -+ else: -+ shutil.copystat(src, dest) - os.chown(dest, stat.st_uid, stat.st_gid) - except Exception as e: -- self.log_debug("caught '%s' setting ownership of '%s'" -+ self.log_debug("caught '%s' setting attributes of '%s'" - % (e, dest)) - file_name = "'%s'" % src - else: --- -2.17.1 - diff --git a/SOURCES/sos-bz1620048-etcd-kube-osp-3-10.patch b/SOURCES/sos-bz1620048-etcd-kube-osp-3-10.patch deleted file mode 100644 index b08251c..0000000 --- a/SOURCES/sos-bz1620048-etcd-kube-osp-3-10.patch +++ /dev/null @@ -1,325 +0,0 @@ -From 6372a7f7f09511d864aa6bd894109d937f4fda65 Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Thu, 12 Jul 2018 12:36:25 -0400 -Subject: [PATCH 1/3] [kubernetes|etcd] Support OpenShift 3.10 deployments - -The 3.10 version of OCP changes the deployment configurations for etcd -and kubernetes components, and additionally changes the way the etcdctl -command is called when running in a static pod. Update these plugins to -support this new deployment style. - -Signed-off-by: Jake Hunsaker ---- - sos/plugins/etcd.py | 11 ++- - sos/plugins/kubernetes.py | 148 +++++++++++++++++++------------------- - 2 files changed, 83 insertions(+), 76 deletions(-) - -diff --git a/sos/plugins/etcd.py b/sos/plugins/etcd.py -index c343f750..c8ee3849 100644 ---- a/sos/plugins/etcd.py -+++ b/sos/plugins/etcd.py -@@ -10,6 +10,7 @@ - # See the LICENSE file in the source distribution for further information. - - from sos.plugins import Plugin, RedHatPlugin -+from os import path - - - class etcd(Plugin, RedHatPlugin): -@@ -19,10 +20,14 @@ class etcd(Plugin, RedHatPlugin): - plugin_name = 'etcd' - packages = ('etcd',) - profiles = ('container', 'system', 'services', 'cluster') -- -- cmd = 'etcdctl' -+ files = ('/etc/origin/node/pods/etcd.yaml',) - - def setup(self): -+ if path.exists('/etc/origin/node/pods/etcd.yaml'): -+ etcd_cmd = 'master-exec etcd etcd etcdctl' -+ else: -+ etcd_cmd = 'etcdctl' -+ - etcd_url = self.get_etcd_url() - - self.add_forbidden_path('/etc/etcd/ca') -@@ -35,7 +40,7 @@ class etcd(Plugin, RedHatPlugin): - 'ls --recursive', - ] - -- self.add_cmd_output(['%s %s' % (self.cmd, sub) for sub in subcmds]) -+ self.add_cmd_output(['%s %s' % (etcd_cmd, sub) for sub in subcmds]) - - urls = [ - '/v2/stats/leader', -diff --git a/sos/plugins/kubernetes.py b/sos/plugins/kubernetes.py -index e75c7a37..21cb51df 100644 ---- a/sos/plugins/kubernetes.py -+++ b/sos/plugins/kubernetes.py -@@ -18,11 +18,16 @@ class kubernetes(Plugin, RedHatPlugin): - """Kubernetes plugin - """ - -- # Red Hat Atomic Platform and OpenShift Enterprise use the -- # atomic-openshift-master package to provide kubernetes -+ # OpenShift Container Platform uses the atomic-openshift-master package -+ # to provide kubernetes - packages = ('kubernetes', 'kubernetes-master', 'atomic-openshift-master') - profiles = ('container',) -- files = ("/etc/origin/master/master-config.yaml",) -+ # use files only for masters, rely on package list for nodes -+ files = ( -+ "/var/run/kubernetes/apiserver.key", -+ "/etc/origin/master/", -+ "/etc/origin/node/pods/master-config.yaml" -+ ) - - option_list = [ - ("all", "also collect all namespaces output separately", -@@ -33,12 +38,7 @@ class kubernetes(Plugin, RedHatPlugin): - ] - - def check_is_master(self): -- if any([ -- path.exists("/var/run/kubernetes/apiserver.key"), -- path.exists("/etc/origin/master/master-config.yaml") -- ]): -- return True -- return False -+ return any([path.exists(f) for f in self.files]) - - def setup(self): - self.add_copy_spec("/etc/kubernetes") -@@ -56,74 +56,76 @@ class kubernetes(Plugin, RedHatPlugin): - self.add_journal(units=svc) - - # We can only grab kubectl output from the master -- if self.check_is_master(): -- kube_cmd = "kubectl " -- if path.exists('/etc/origin/master/admin.kubeconfig'): -- kube_cmd += "--config=/etc/origin/master/admin.kubeconfig" -- -- kube_get_cmd = "get -o json " -- for subcmd in ['version', 'config view']: -- self.add_cmd_output('%s %s' % (kube_cmd, subcmd)) -- -- # get all namespaces in use -- kn = self.get_command_output('%s get namespaces' % kube_cmd) -- knsps = [n.split()[0] for n in kn['output'].splitlines()[1:] if n] -- -- resources = [ -- 'limitrange', -- 'pods', -- 'pvc', -- 'rc', -- 'resourcequota', -- 'services' -- ] -- -- # nodes and pvs are not namespaced, must pull separately. -- # Also collect master metrics -- self.add_cmd_output([ -- "{} get -o json nodes".format(kube_cmd), -- "{} get -o json pv".format(kube_cmd), -- "{} get --raw /metrics".format(kube_cmd) -- ]) -- -- for n in knsps: -- knsp = '--namespace=%s' % n -- if self.get_option('all'): -- k_cmd = '%s %s %s' % (kube_cmd, kube_get_cmd, knsp) -- -- self.add_cmd_output('%s events' % k_cmd) -+ if not self.check_is_master(): -+ return -+ -+ kube_cmd = "kubectl " -+ if path.exists('/etc/origin/master/admin.kubeconfig'): -+ kube_cmd += "--config=/etc/origin/master/admin.kubeconfig" -+ -+ kube_get_cmd = "get -o json " -+ for subcmd in ['version', 'config view']: -+ self.add_cmd_output('%s %s' % (kube_cmd, subcmd)) -+ -+ # get all namespaces in use -+ kn = self.get_command_output('%s get namespaces' % kube_cmd) -+ knsps = [n.split()[0] for n in kn['output'].splitlines()[1:] if n] -+ -+ resources = [ -+ 'limitrange', -+ 'pods', -+ 'pvc', -+ 'rc', -+ 'resourcequota', -+ 'services' -+ ] -+ -+ # nodes and pvs are not namespaced, must pull separately. -+ # Also collect master metrics -+ self.add_cmd_output([ -+ "{} get -o json nodes".format(kube_cmd), -+ "{} get -o json pv".format(kube_cmd), -+ "{} get --raw /metrics".format(kube_cmd) -+ ]) -+ -+ for n in knsps: -+ knsp = '--namespace=%s' % n -+ if self.get_option('all'): -+ k_cmd = '%s %s %s' % (kube_cmd, kube_get_cmd, knsp) -+ -+ self.add_cmd_output('%s events' % k_cmd) - -- for res in resources: -- self.add_cmd_output('%s %s' % (k_cmd, res)) -- -- if self.get_option('describe'): -- # need to drop json formatting for this -- k_cmd = '%s get %s' % (kube_cmd, knsp) -- for res in resources: -- r = self.get_command_output( -- '%s %s' % (k_cmd, res)) -- if r['status'] == 0: -- k_list = [k.split()[0] for k in -- r['output'].splitlines()[1:]] -- for k in k_list: -- k_cmd = '%s %s' % (kube_cmd, knsp) -- self.add_cmd_output( -- '%s describe %s %s' % (k_cmd, res, k)) -- -- if self.get_option('podlogs'): -- k_cmd = '%s %s' % (kube_cmd, knsp) -- r = self.get_command_output('%s get pods' % k_cmd) -- if r['status'] == 0: -- pods = [p.split()[0] for p in -- r['output'].splitlines()[1:]] -- for pod in pods: -- self.add_cmd_output('%s logs %s' % (k_cmd, pod)) -- -- if not self.get_option('all'): -- k_cmd = '%s get --all-namespaces=true' % kube_cmd - for res in resources: - self.add_cmd_output('%s %s' % (k_cmd, res)) - -+ if self.get_option('describe'): -+ # need to drop json formatting for this -+ k_cmd = '%s get %s' % (kube_cmd, knsp) -+ for res in resources: -+ r = self.get_command_output( -+ '%s %s' % (k_cmd, res)) -+ if r['status'] == 0: -+ k_list = [k.split()[0] for k in -+ r['output'].splitlines()[1:]] -+ for k in k_list: -+ k_cmd = '%s %s' % (kube_cmd, knsp) -+ self.add_cmd_output( -+ '%s describe %s %s' % (k_cmd, res, k)) -+ -+ if self.get_option('podlogs'): -+ k_cmd = '%s %s' % (kube_cmd, knsp) -+ r = self.get_command_output('%s get pods' % k_cmd) -+ if r['status'] == 0: -+ pods = [p.split()[0] for p in -+ r['output'].splitlines()[1:]] -+ for pod in pods: -+ self.add_cmd_output('%s logs %s' % (k_cmd, pod)) -+ -+ if not self.get_option('all'): -+ k_cmd = '%s get --all-namespaces=true' % kube_cmd -+ for res in resources: -+ self.add_cmd_output('%s %s' % (k_cmd, res)) -+ - def postproc(self): - # First, clear sensitive data from the json output collected. - # This will mask values when the "name" looks susceptible of --- -2.17.1 - - -From 63ad6c251ab88ab2f0e07ae9e3f1b2771d5e90ca Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Thu, 12 Jul 2018 13:07:34 -0400 -Subject: [PATCH 2/3] [kubernetes] Correct config option syntax - -Versions of kubernetes after 1.5 use --kubeconfig instead of --config to -specify a configuration file to use for kubectl commands. Update the -kubernetes plugin to use the proper syntax. - -Signed-off-by: Jake Hunsaker ---- - sos/plugins/kubernetes.py | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/sos/plugins/kubernetes.py b/sos/plugins/kubernetes.py -index 21cb51df..c14e078e 100644 ---- a/sos/plugins/kubernetes.py -+++ b/sos/plugins/kubernetes.py -@@ -61,7 +61,7 @@ class kubernetes(Plugin, RedHatPlugin): - - kube_cmd = "kubectl " - if path.exists('/etc/origin/master/admin.kubeconfig'): -- kube_cmd += "--config=/etc/origin/master/admin.kubeconfig" -+ kube_cmd += "--kubeconfig=/etc/origin/master/admin.kubeconfig" - - kube_get_cmd = "get -o json " - for subcmd in ['version', 'config view']: --- -2.17.1 - - -From 46fffd469f4f3d07337dc335cfc24341e836f23b Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Thu, 12 Jul 2018 13:11:44 -0400 -Subject: [PATCH 3/3] [origin] Collect statistics information - -Adds collection of 'oc adm top' output for images and imagestreams. - -Resolves: #1165 -Closes: #1383 - -Signed-off-by: Jake Hunsaker ---- - sos/plugins/origin.py | 26 ++++++++++++++++++++------ - 1 file changed, 20 insertions(+), 6 deletions(-) - -diff --git a/sos/plugins/origin.py b/sos/plugins/origin.py -index 02bc047a..0e384117 100644 ---- a/sos/plugins/origin.py -+++ b/sos/plugins/origin.py -@@ -124,14 +124,28 @@ class OpenShiftOrigin(Plugin): - # - # Note: Information about nodes, events, pods, and services - # is already collected by the Kubernetes plugin -+ -+ subcmds = [ -+ "describe projects", -+ "adm top images", -+ "adm top imagestreams" -+ ] -+ - self.add_cmd_output([ -- "%s describe projects" % oc_cmd_admin, -- "%s get -o json hostsubnet" % oc_cmd_admin, -- "%s get -o json clusternetwork" % oc_cmd_admin, -- "%s get -o json netnamespaces" % oc_cmd_admin, -- # Registry and router configs are typically here -- "%s get -o json dc -n default" % oc_cmd_admin, -+ '%s %s' % (oc_cmd_admin, subcmd) for subcmd in subcmds - ]) -+ -+ jcmds = [ -+ "hostsubnet", -+ "clusternetwork", -+ "netnamespaces", -+ "dc -n default" -+ ] -+ -+ self.add_cmd_output([ -+ '%s get -o json %s' % (oc_cmd_admin, jcmd) for jcmd in jcmds -+ ]) -+ - if self.get_option('diag'): - diag_cmd = "%s adm diagnostics -l 0" % oc_cmd_admin - if self.get_option('diag-prevent'): --- -2.17.1 - diff --git a/SOURCES/sos-bz1620049-rhv-log-collector-analyzer.patch b/SOURCES/sos-bz1620049-rhv-log-collector-analyzer.patch deleted file mode 100644 index 6930786..0000000 --- a/SOURCES/sos-bz1620049-rhv-log-collector-analyzer.patch +++ /dev/null @@ -1,66 +0,0 @@ -From d297b2116fd864c65dba76b343f5101466c0eeb7 Mon Sep 17 00:00:00 2001 -From: Douglas Schilling Landgraf -Date: Tue, 10 Jul 2018 12:03:41 -0400 -Subject: [PATCH] [rhv-log-collector-analyzer] Add new plugin for RHV - -This commit adds the plugin rhv-log-collector-analyzer, it will -collect: - -- Output of rhv-log-collector-analyer --json -- Generated HTML file from --live - -Signed-off-by: Douglas Schilling Landgraf ---- - sos/plugins/rhv_analyzer.py | 40 +++++++++++++++++++++++++++++++++++++ - 1 file changed, 40 insertions(+) - create mode 100644 sos/plugins/rhv_analyzer.py - -diff --git a/sos/plugins/rhv_analyzer.py b/sos/plugins/rhv_analyzer.py -new file mode 100644 -index 00000000..7c233a0b ---- /dev/null -+++ b/sos/plugins/rhv_analyzer.py -@@ -0,0 +1,40 @@ -+# Copyright (C) 2018 Red Hat, Inc. -+# -+# This file is part of the sos project: https://github.com/sosreport/sos -+# -+# This copyrighted material is made available to anyone wishing to use, -+# modify, copy, or redistribute it subject to the terms and conditions of -+# version 2 of the GNU General Public License. -+# -+# See the LICENSE file in the source distribution for further information. -+ -+from sos.plugins import Plugin, RedHatPlugin -+ -+ -+class RhvLogCollectorAnalyzer(Plugin, RedHatPlugin): -+ """RHV Log Collector Analyzer""" -+ -+ packages = ('rhv-log-collector-analyzer',) -+ -+ plugin_name = 'RhvLogCollectorAnalyzer' -+ profiles = ('virt',) -+ -+ def setup(self): -+ tool_name = 'rhv-log-collector-analyzer' -+ report = "{dircmd}/analyzer-report.html".format( -+ dircmd=self.get_cmd_output_path() -+ ) -+ -+ self.add_cmd_output( -+ "{tool_name}" -+ " --live" -+ " --html={report}".format( -+ report=report, tool_name=tool_name) -+ ) -+ -+ self.add_cmd_output( -+ "{tool_name}" -+ " --json".format(tool_name=tool_name) -+ ) -+ -+# vim: expandtab tabstop=4 shiftwidth=4 --- -2.17.1 - diff --git a/SOURCES/sos-bz1627543-symlinks-not-copied.patch b/SOURCES/sos-bz1627543-symlinks-not-copied.patch deleted file mode 100644 index 8246ec8..0000000 --- a/SOURCES/sos-bz1627543-symlinks-not-copied.patch +++ /dev/null @@ -1,948 +0,0 @@ -From 2e07f7c4778145d4366476ecc4383d491458b541 Mon Sep 17 00:00:00 2001 -From: "Bryn M. Reeves" -Date: Fri, 31 Aug 2018 12:50:24 +0100 -Subject: [PATCH 1/4] [sosreport] properly raise exceptions when --debug is - given - -OSError and IOError exceptions were not raised to the terminal -when --debug is in effect since they were silently caught in the -generic exception handler. - -Signed-off-by: Bryn M. Reeves ---- - sos/sosreport.py | 5 ++++- - 1 file changed, 4 insertions(+), 1 deletion(-) - -diff --git a/sos/sosreport.py b/sos/sosreport.py -index 00c3e811..80633966 100644 ---- a/sos/sosreport.py -+++ b/sos/sosreport.py -@@ -995,7 +995,8 @@ class SoSReport(object): - print(" %s while setting up archive" % e.strerror) - print("") - else: -- raise e -+ print("Error setting up archive: %s" % e) -+ raise - except Exception as e: - self.ui_log.error("") - self.ui_log.error(" Unexpected exception setting up archive:") -@@ -1467,6 +1468,8 @@ class SoSReport(object): - return self.final_work() - - except (OSError): -+ if self.opts.debug: -+ raise - self._cleanup() - except (KeyboardInterrupt): - self.ui_log.error("\nExiting on user cancel") --- -2.17.1 - - -From c496d2bec8cae175faf986567e73d16d401d8564 Mon Sep 17 00:00:00 2001 -From: "Bryn M. Reeves" -Date: Fri, 31 Aug 2018 12:52:38 +0100 -Subject: [PATCH 2/4] [archive] simplify FileCacheArchive.makedirs() - -Simplify the makedirs() method of FileCacheArchive and have it -bypass _check_path() and directly call os.makedirs(): a subsequent -patch will restrict the use of the method to setting up the sos_* -directories in the archive root. - -File, directory and other object type add_* methods will use a -new method that correctly handles symbolic links in intermediate -path components. - -Signed-off-by: Bryn M. Reeves ---- - sos/archive.py | 8 ++++---- - 1 file changed, 4 insertions(+), 4 deletions(-) - -diff --git a/sos/archive.py b/sos/archive.py -index 5d99170f..ffa54036 100644 ---- a/sos/archive.py -+++ b/sos/archive.py -@@ -361,11 +361,11 @@ class FileCacheArchive(Archive): - return self._archive_root - - def makedirs(self, path, mode=0o700): -- dest = self._check_path(path, P_DIR) -- if not dest: -- return -+ """Create path, including leading components. - -- self._makedirs(self.dest_path(path)) -+ Used by sos.sosreport to set up sos_* directories. -+ """ -+ os.makedirs(os.path.join(self._archive_root, path), mode=mode) - self.log_debug("created directory at '%s' in FileCacheArchive '%s'" - % (path, self._archive_root)) - --- -2.17.1 - - -From ca422720b74181b2433473428e29e90af59b3cf8 Mon Sep 17 00:00:00 2001 -From: "Bryn M. Reeves" -Date: Fri, 31 Aug 2018 12:55:51 +0100 -Subject: [PATCH 3/4] [archive] normalise dest_dir in - FileCacheArchive._check_path() - -Always set a valid dest_dir in _check_path() and do not assume -that it can be obtained by splitting the path: in the case of -a directory it is the unmodified 'dest' value. - -Signed-off-by: Bryn M. Reeves ---- - sos/archive.py | 5 ++++- - 1 file changed, 4 insertions(+), 1 deletion(-) - -diff --git a/sos/archive.py b/sos/archive.py -index ffa54036..903cc672 100644 ---- a/sos/archive.py -+++ b/sos/archive.py -@@ -191,7 +191,10 @@ class FileCacheArchive(Archive): - copied now or `None` otherwise - """ - dest = dest or self.dest_path(src) -- dest_dir = os.path.split(dest)[0] -+ if path_type == P_DIR: -+ dest_dir = dest -+ else: -+ dest_dir = os.path.split(dest)[0] - if not dest_dir: - return dest - --- -2.17.1 - - -From 75d759066e8ee0a469abc37f48f7bfcdfe8182b5 Mon Sep 17 00:00:00 2001 -From: "Bryn M. Reeves" -Date: Fri, 31 Aug 2018 12:58:01 +0100 -Subject: [PATCH 4/4] [archive] replace FileCacheArchive._makedirs() - -The Python os.makedirs() implementation is inadequate for sos's -needs: it will create leading directories given an intended path -destination, but it is not able to reflect cases where some of -the intermediate paths are actually symbolic links. - -Replace the use of os.makedirs() with a method that walks over -the path, and either creates directories, or symbolic links (and -their directory target) to better correspond with the content of -the host file system. - -This fixes a situation where two plugins can race in the archive, -leading to an exception in the plugin that runs last: - - - /foo/bar exists and is a link to /foo/bar.qux - - One plugin attempts to collect /foo/bar - - Another plugin attempts to collect a link /foo/qux -> /foo/bar/baz - -If the 2nd plugin happens to run first it will create the path -"/foo/bar" as a _directory_ (via _makedirs()). Since the archive -now checks for matching object types when a path collision occurs, -the first plugin will arrive at add_dir(), note that "/foo/bar" is -present and is not a symbolic link, and will raise an exception. - -Correct this by ensuring that whichever plugin executes first, the -correct link/directory path structure will be set up. - -Signed-off-by: Bryn M. Reeves ---- - sos/archive.py | 72 ++++++++++++++++++++++++++++++++++++++++++++------ - 1 file changed, 64 insertions(+), 8 deletions(-) - -diff --git a/sos/archive.py b/sos/archive.py -index 903cc672..11afa7aa 100644 ---- a/sos/archive.py -+++ b/sos/archive.py -@@ -159,6 +159,67 @@ class FileCacheArchive(Archive): - name = name.lstrip(os.sep) - return (os.path.join(self._archive_root, name)) - -+ def _make_leading_paths(self, src, mode=0o700): -+ """Create leading path components -+ -+ The standard python `os.makedirs` is insufficient for our -+ needs: it will only create directories, and ignores the fact -+ that some path components may be symbolic links. -+ """ -+ self.log_debug("Making leading paths for %s" % src) -+ root = self._archive_root -+ -+ def in_archive(path): -+ """Test whether path ``path`` is inside the archive. -+ """ -+ return path.startswith(os.path.join(root, "")) -+ -+ if not src.startswith("/"): -+ # Sos archive path (sos_commands, sos_logs etc.) -+ src_dir = src -+ else: -+ # Host file path -+ src_dir = src if os.path.isdir(src) else os.path.split(src)[0] -+ -+ # Build a list of path components in root-to-leaf order. -+ path = src_dir -+ path_comps = [] -+ while path != '/' and path != '': -+ head, tail = os.path.split(path) -+ path_comps.append(tail) -+ path = head -+ path_comps.reverse() -+ -+ abs_path = root -+ rel_path = "" -+ -+ # Check and create components as needed -+ for comp in path_comps: -+ abs_path = os.path.join(abs_path, comp) -+ -+ if not in_archive(abs_path): -+ continue -+ -+ rel_path = os.path.join(rel_path, comp) -+ src_path = os.path.join("/", rel_path) -+ -+ if not os.path.exists(abs_path): -+ self.log_debug("Making path %s" % abs_path) -+ if os.path.islink(src_path) and os.path.isdir(src_path): -+ target = os.readlink(src_path) -+ abs_target = os.path.join(root, target) -+ -+ # Recursively create leading components of target -+ self._make_leading_paths(abs_target, mode=mode) -+ -+ self.log_debug("Making symlink '%s' -> '%s'" % -+ (abs_path, target)) -+ target = os.path.relpath(target) -+ os.symlink(target, abs_path) -+ else: -+ self.log_debug("Making directory %s" % abs_path) -+ os.mkdir(abs_path, mode) -+ - def _check_path(self, src, path_type, dest=None, force=False): - """Check a new destination path in the archive. - -@@ -203,7 +264,8 @@ class FileCacheArchive(Archive): - raise ValueError("path '%s' exists and is not a directory" % - dest_dir) - elif not os.path.exists(dest_dir): -- self._makedirs(dest_dir) -+ src_dir = src if path_type == P_DIR else os.path.split(src)[0] -+ self._make_leading_paths(src_dir) - - def is_special(mode): - return any([ -@@ -326,10 +388,7 @@ class FileCacheArchive(Archive): - - def add_dir(self, path): - with self._path_lock: -- dest = self._check_path(path, P_DIR) -- if not dest: -- return -- self.makedirs(path) -+ self._check_path(path, P_DIR) - - def add_node(self, path, mode, device): - dest = self._check_path(path, P_NODE) -@@ -347,9 +406,6 @@ class FileCacheArchive(Archive): - raise e - shutil.copystat(path, dest) - -- def _makedirs(self, path, mode=0o700): -- os.makedirs(path, mode) -- - def name_max(self): - if 'PC_NAME_MAX' in os.pathconf_names: - pc_name_max = os.pathconf_names['PC_NAME_MAX'] --- -2.17.1 - -From 5d6228b85e174dee8abcc4c206a1e9034242c6c6 Mon Sep 17 00:00:00 2001 -From: "Bryn M. Reeves" -Date: Fri, 7 Sep 2018 12:06:34 -0400 -Subject: [PATCH 1/6] [sosreport] ensure ThreadPool exceptions are raised - -The ThreadPoolExecutor does not raise exceptions to the parent -thread immediately: it stores them in-line in the pool's results -list, and raises them to the caller on acccess to that slot in -the results iterator. - -Make sure that these exceptions are handled by iterating over all -results and asserting that they are non-None (in practice, this -code is never executed since the resulting raise will trap to an -exception handler, but it is less confusing than a bare 'pass'). - -Signed-off-by: Bryn M. Reeves ---- - sos/sosreport.py | 8 ++++++-- - 1 file changed, 6 insertions(+), 2 deletions(-) - -diff --git a/sos/sosreport.py b/sos/sosreport.py -index 80633966..44be75a1 100644 ---- a/sos/sosreport.py -+++ b/sos/sosreport.py -@@ -1065,9 +1065,13 @@ class SoSReport(object): - try: - self.plugpool = ThreadPoolExecutor(self.opts.threads) - # Pass the plugpool its own private copy of self.pluglist -- self.plugpool.map(self._collect_plugin, list(self.pluglist), -- chunksize=1) -+ results = self.plugpool.map(self._collect_plugin, -+ list(self.pluglist), chunksize=1) - self.plugpool.shutdown(wait=True) -+ for res in results: -+ if not res: -+ self.soslog.debug("Unexpected plugin task result: %s" % -+ res) - self.ui_log.info("") - except KeyboardInterrupt: - # We may not be at a newline when the user issues Ctrl-C --- -2.17.1 - - -From 9aaba972bf6a42c33ea9bca80f07bfb880ba45a1 Mon Sep 17 00:00:00 2001 -From: "Bryn M. Reeves" -Date: Fri, 7 Sep 2018 12:15:10 -0400 -Subject: [PATCH 2/6] [sosreport] trap directly to PDB in handle_exception() - -Now that plugins are run in a threadpool, it is not possible to -defer the call to pdb.post_mortem() to the top-level exception -handler in the main thread: this is due to the fact that in a pool, -exceptions are caught and saved to be re-raised to thread calling -the pool when results are returned. When the saved exception is -raised to the top-level handler the execution context it relates -to is gone: the backtrace and stack frame have been torn down and -only very limited information is available from the exception -frame. - -Instead, catch these exceptions _inside_ the thread pool context, -and directly trap to the Python debugger. This allows plugin code -to be debugged interactively with the full backtrace and with all -access to local variables and the execution stack. In addition, -this means that after the debugger has handled the exception it is -possible to return to the run and continue until report completion. - -One side effect of this change is that the *-plugin-errors.txt -file containng the backtrace is now written into the archive -whether or not --debug is given. - -Signed-off-by: Bryn M. Reeves ---- - sos/sosreport.py | 9 ++++++++- - 1 file changed, 8 insertions(+), 1 deletion(-) - -diff --git a/sos/sosreport.py b/sos/sosreport.py -index 44be75a1..77ae7161 100644 ---- a/sos/sosreport.py -+++ b/sos/sosreport.py -@@ -30,6 +30,7 @@ from shutil import rmtree - import tempfile - import hashlib - from concurrent.futures import ThreadPoolExecutor, TimeoutError -+import pdb - - from sos import _sos as _ - from sos import __version__ -@@ -504,7 +505,13 @@ class SoSReport(object): - - def handle_exception(self, plugname=None, func=None): - if self.raise_plugins or self.exit_process: -- raise -+ # retrieve exception info for the current thread and stack. -+ (etype, val, tb) = sys.exc_info() -+ # we are NOT in interactive mode, print the exception... -+ traceback.print_exception(etype, val, tb, file=sys.stdout) -+ print_() -+ # ...then start the debugger in post-mortem mode. -+ pdb.post_mortem(tb) - if plugname and func: - self._log_plugin_exception(plugname, func) - --- -2.17.1 - - -From 0ea62d1ea57f41c1b75ccb83e69fdda386a7d280 Mon Sep 17 00:00:00 2001 -From: "Bryn M. Reeves" -Date: Fri, 7 Sep 2018 13:00:52 -0400 -Subject: [PATCH 3/6] [Plugin] fix exception raise in Plugin._copy_dir() - -Use a naked 'raise' statement rather than raising the already caught -exception in _copy_dir(), so that the original stack and backtrace -are avaialable. - -Signed-off-by: Bryn M. Reeves ---- - sos/plugins/__init__.py | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/sos/plugins/__init__.py b/sos/plugins/__init__.py -index 252de4d0..ac2c0bc8 100644 ---- a/sos/plugins/__init__.py -+++ b/sos/plugins/__init__.py -@@ -401,7 +401,7 @@ class Plugin(object): - msg = "Too many levels of symbolic links copying" - self._log_error("_copy_dir: %s '%s'" % (msg, srcpath)) - return -- raise e -+ raise - - def _get_dest_for_srcpath(self, srcpath): - if self.use_sysroot(): --- -2.17.1 - - -From d84c1cd6dedf51a8ed7b1a511585c0ac2db0f083 Mon Sep 17 00:00:00 2001 -From: "Bryn M. Reeves" -Date: Wed, 5 Sep 2018 12:46:16 +0100 -Subject: [PATCH 4/6] [archive] fix leading path creation - -Fix the creation of leading path components for both paths that -contain intermediate components that are symbolic links (with both -absolute and relative targets), and those that contain only -directory components. - -Since symlinks may link to other files, and other symlinks, it is -necessary to handle these paths recursively and to include any -intermediate symlinked directories, or symlink targets in the set -of paths added to the archive. - -Related: #1404 - -Signed-off-by: Bryn M. Reeves ---- - sos/archive.py | 41 ++++++++++++++++++++++++++++++++++------- - 1 file changed, 34 insertions(+), 7 deletions(-) - -diff --git a/sos/archive.py b/sos/archive.py -index 11afa7aa..c256a01f 100644 ---- a/sos/archive.py -+++ b/sos/archive.py -@@ -165,9 +165,24 @@ class FileCacheArchive(Archive): - The standard python `os.makedirs` is insufficient for our - needs: it will only create directories, and ignores the fact - that some path components may be symbolic links. -+ -+ :param src: The source path in the host file system for which -+ leading components should be created, or the path -+ to an sos_* virtual directory inside the archive. -+ -+ Host paths must be absolute (initial '/'), and -+ sos_* directory paths must be a path relative to -+ the root of the archive. -+ -+ :param mode: An optional mode to be used when creating path -+ components. -+ :returns: A rewritten destination path in the case that one -+ or more symbolic links in intermediate components -+ of the path have altered the path destination. - """ - self.log_debug("Making leading paths for %s" % src) - root = self._archive_root -+ dest = src - - def in_archive(path): - """Test whether path ``path`` is inside the archive. -@@ -191,34 +206,42 @@ class FileCacheArchive(Archive): - path_comps.reverse() - - abs_path = root -- rel_path = "" -+ src_path = "/" - - # Check and create components as needed - for comp in path_comps: - abs_path = os.path.join(abs_path, comp) - -+ # Do not create components that are above the archive root. - if not in_archive(abs_path): - continue - -- rel_path = os.path.join(rel_path, comp) -- src_path = os.path.join("/", rel_path) -+ src_path = os.path.join(src_path, comp) - - if not os.path.exists(abs_path): - self.log_debug("Making path %s" % abs_path) - if os.path.islink(src_path) and os.path.isdir(src_path): - target = os.readlink(src_path) -- abs_target = os.path.join(root, target) -+ -+ # The directory containing the source in the host fs, -+ # adjusted for the current level of path creation. -+ target_dir = os.path.split(src_path)[0] -+ -+ # The source path of the target in the host fs to be -+ # recursively copied. -+ target_src = os.path.join(target_dir, target) - - # Recursively create leading components of target -- self._make_leading_paths(abs_target, mode=mode) -+ dest = self._make_leading_paths(target_src, mode=mode) -+ dest = os.path.normpath(dest) - - self.log_debug("Making symlink '%s' -> '%s'" % - (abs_path, target)) -- target = os.path.relpath(target) - os.symlink(target, abs_path) - else: - self.log_debug("Making directory %s" % abs_path) - os.mkdir(abs_path, mode) -+ return dest - - def _check_path(self, src, path_type, dest=None, force=False): - """Check a new destination path in the archive. -@@ -259,13 +282,17 @@ class FileCacheArchive(Archive): - if not dest_dir: - return dest - -+ # Preserve destination basename for rewritten dest_dir -+ dest_name = os.path.split(src)[1] -+ - # Check containing directory presence and path type - if os.path.exists(dest_dir) and not os.path.isdir(dest_dir): - raise ValueError("path '%s' exists and is not a directory" % - dest_dir) - elif not os.path.exists(dest_dir): - src_dir = src if path_type == P_DIR else os.path.split(src)[0] -- self._make_leading_paths(src_dir) -+ src_dir = self._make_leading_paths(src_dir) -+ dest = self.dest_path(os.path.join(src_dir, dest_name)) - - def is_special(mode): - return any([ --- -2.17.1 - - -From 322f4a517ae336cc1443f9a399a0d15d45ec48b9 Mon Sep 17 00:00:00 2001 -From: "Bryn M. Reeves" -Date: Fri, 7 Sep 2018 13:11:03 -0400 -Subject: [PATCH 5/6] [archive] add link follow-up to - FileCacheArchive.add_link() - -Creating a link may trigger further actions in the archive: if the -link target is a regular file, we must copy that file into the -archive, and if the target is a symbolic link, then we must create -that link, and copy in the link target. - -Handle this by calling add_file() or (recursively) add_link() in -order to create the missing pieces of the symlink chain. - -These operations must take place outside of the path lock since -they do not modify the archive namespace and will call methods of -the Archive object that will attempt to re-acquire this lock. - -Resolves: #1404 - -Signed-off-by: Bryn M. Reeves ---- - sos/archive.py | 38 +++++++++++++++++++++++++++++++++++--- - 1 file changed, 35 insertions(+), 3 deletions(-) - -diff --git a/sos/archive.py b/sos/archive.py -index c256a01f..6db398fc 100644 ---- a/sos/archive.py -+++ b/sos/archive.py -@@ -403,6 +403,7 @@ class FileCacheArchive(Archive): - % (dest, self._archive_root)) - - def add_link(self, source, link_name): -+ self.log_debug("adding symlink at '%s' -> '%s'" % (link_name, source)) - with self._path_lock: - dest = self._check_path(link_name, P_LINK) - if not dest: -@@ -410,10 +411,41 @@ class FileCacheArchive(Archive): - - if not os.path.lexists(dest): - os.symlink(source, dest) -- self.log_debug("added symlink at '%s' to '%s' in archive '%s'" -- % (dest, source, self._archive_root)) -+ self.log_debug("added symlink at '%s' to '%s' in archive '%s'" -+ % (dest, source, self._archive_root)) -+ -+ # Follow-up must be outside the path lock: we recurse into -+ # other monitor methods that will attempt to reacquire it. -+ -+ source_dir = os.path.dirname(link_name) -+ host_source = os.path.join(source_dir, source) -+ if not os.path.exists(self.dest_path(host_source)): -+ if os.path.islink(host_source): -+ link_dir = os.path.dirname(link_name) -+ link_name = os.path.normpath(os.path.join(link_dir, source)) -+ dest_dir = os.path.dirname(link_name) -+ source = os.path.join(dest_dir, os.readlink(link_name)) -+ source = os.path.relpath(source) -+ self.log_debug("Adding link %s -> %s for link follow up" % -+ (link_name, source)) -+ self.add_link(source, link_name) -+ elif os.path.isdir(host_source): -+ self.log_debug("Adding dir %s for link follow up" % source) -+ self.add_dir(host_source) -+ elif os.path.isfile(host_source): -+ self.log_debug("Adding file %s for link follow up" % source) -+ self.add_file(host_source) -+ else: -+ self.log_debug("No link follow up: source=%s link_name=%s" % -+ (source, link_name)) - -- def add_dir(self, path): -+ -+ def add_dir(self, path, copy=False): -+ """Create a directory in the archive. -+ -+ :param path: the path in the host file system to add -+ """ -+ # Establish path structure - with self._path_lock: - self._check_path(path, P_DIR) - --- -2.17.1 - - -From 6e79c4b4a4f32fa549708dbb8c8b9af73ab8ff61 Mon Sep 17 00:00:00 2001 -From: "Bryn M. Reeves" -Date: Mon, 10 Sep 2018 16:33:33 +0100 -Subject: [PATCH 6/6] [archive] remove unused 'copy' arg from - FileCacheArchive.add_dir() - -Signed-off-by: Bryn M. Reeves ---- - sos/archive.py | 3 +-- - 1 file changed, 1 insertion(+), 2 deletions(-) - -diff --git a/sos/archive.py b/sos/archive.py -index 6db398fc..4b30630b 100644 ---- a/sos/archive.py -+++ b/sos/archive.py -@@ -439,8 +439,7 @@ class FileCacheArchive(Archive): - self.log_debug("No link follow up: source=%s link_name=%s" % - (source, link_name)) - -- -- def add_dir(self, path, copy=False): -+ def add_dir(self, path): - """Create a directory in the archive. - - :param path: the path in the host file system to add --- -2.17.1 - -From 919e8671a6ab9684d59525eb7f3607b3aab08ee1 Mon Sep 17 00:00:00 2001 -From: "Bryn M. Reeves" -Date: Tue, 11 Sep 2018 12:16:57 -0400 -Subject: [PATCH] [archive] fix link rewriting logic in - FileCacheArchive.add_link() - -When processing link follow up for an original symbolic link, the -add_link() logic incorrectly used the _original_ host link name, -rather than the to-be-created name when calculating relative path -structures. If the prior link is at a greater or lesser level of -directory nesting this will lead to broken relative links in the -archive (one level too high or too low). - -In some cases (systemd) this behaviour was masked due to the fact -that identically named links exist at multiple levels of the path -hierarchy. - -Signed-off-by: Bryn M. Reeves ---- - sos/archive.py | 30 +++++++++++++++++++----------- - 1 file changed, 19 insertions(+), 11 deletions(-) - -diff --git a/sos/archive.py b/sos/archive.py -index 528cfa576..7a7717de7 100644 ---- a/sos/archive.py -+++ b/sos/archive.py -@@ -417,27 +417,35 @@ def add_link(self, source, link_name): - # Follow-up must be outside the path lock: we recurse into - # other monitor methods that will attempt to reacquire it. - -+ self.log_debug("Link follow up: source=%s link_name=%s dest=%s" % -+ (source, link_name, dest)) -+ - source_dir = os.path.dirname(link_name) -- host_source = os.path.join(source_dir, source) -- if not os.path.exists(self.dest_path(host_source)): -- if os.path.islink(host_source): -- link_dir = os.path.dirname(link_name) -- link_name = os.path.normpath(os.path.join(link_dir, source)) -+ host_path_name = os.path.normpath(os.path.join(source_dir, source)) -+ dest_path_name = self.dest_path(host_path_name) -+ -+ if not os.path.exists(dest_path_name): -+ if os.path.islink(host_path_name): -+ # Normalised path for the new link_name -+ link_name = host_path_name -+ # Containing directory for the new link - dest_dir = os.path.dirname(link_name) -- source = os.path.join(dest_dir, os.readlink(link_name)) -- source = os.path.relpath(source) -+ # Relative source path of the new link -+ source = os.path.join(dest_dir, os.readlink(host_path_name)) -+ source = os.path.relpath(source, dest_dir) - self.log_debug("Adding link %s -> %s for link follow up" % - (link_name, source)) - self.add_link(source, link_name) -- elif os.path.isdir(host_source): -+ elif os.path.isdir(host_path_name): - self.log_debug("Adding dir %s for link follow up" % source) -- self.add_dir(host_source) -- elif os.path.isfile(host_source): -+ self.add_dir(host_path_name) -+ elif os.path.isfile(host_path_name): - self.log_debug("Adding file %s for link follow up" % source) -- self.add_file(host_source) -+ self.add_file(host_path_name) - else: - self.log_debug("No link follow up: source=%s link_name=%s" % - (source, link_name)) -+ self.log_debug("leaving add_link()") - - def add_dir(self, path): - """Create a directory in the archive. -From c065be9715dc845b6411a9a0b2d6171bbeb1c390 Mon Sep 17 00:00:00 2001 -From: "Bryn M. Reeves" -Date: Wed, 12 Sep 2018 12:02:33 +0100 -Subject: [PATCH] [plugin] canonicalize link target path in - Plugin._copy_symlink() - -Since we may be dealing with paths that contain intermediate -symlinked directories, it is necessary to canonicalize the path -for the link target in order to eliminate additional levels of -symbolic links, and to calculate the correct relative path to -use within the archive. - -Related: #1404 - -Signed-off-by: Bryn M. Reeves ---- - sos/plugins/__init__.py | 5 ++++- - 1 file changed, 4 insertions(+), 1 deletion(-) - -diff --git a/sos/plugins/__init__.py b/sos/plugins/__init__.py -index ac2c0bc8c..7d011a02c 100644 ---- a/sos/plugins/__init__.py -+++ b/sos/plugins/__init__.py -@@ -353,7 +353,10 @@ def _copy_symlink(self, srcpath): - absdest = os.path.normpath(dest) - # adjust the target used inside the report to always be relative - if os.path.isabs(linkdest): -- reldest = os.path.relpath(linkdest, os.path.dirname(srcpath)) -+ # Canonicalize the link target path to avoid additional levels -+ # of symbolic links (that would affect the path nesting level). -+ realdir = os.path.realpath(os.path.dirname(srcpath)) -+ reldest = os.path.relpath(linkdest, start=realdir) - # trim leading /sysroot - if self.use_sysroot(): - reldest = reldest[len(os.sep + os.pardir):] -From 868966cd9dbb96ce3635d884e67e738b18658140 Mon Sep 17 00:00:00 2001 -From: "Bryn M. Reeves" -Date: Wed, 12 Sep 2018 16:11:07 +0100 -Subject: [PATCH] [archive] canonicalise paths for link follow up - -Ensure that the canonical path is used when processing link follow -up actions: the actual link path may contain one or more levels of -symbolic links, leading to broken links if the link target path is -assumed to be relative to the containing directory. - -Signed-off-by: Bryn M. Reeves ---- - sos/archive.py | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/sos/archive.py b/sos/archive.py -index 7a7717de7..483d66f4f 100644 ---- a/sos/archive.py -+++ b/sos/archive.py -@@ -421,7 +421,7 @@ def add_link(self, source, link_name): - (source, link_name, dest)) - - source_dir = os.path.dirname(link_name) -- host_path_name = os.path.normpath(os.path.join(source_dir, source)) -+ host_path_name = os.path.realpath(os.path.join(source_dir, source)) - dest_path_name = self.dest_path(host_path_name) - - if not os.path.exists(dest_path_name): -From 8e60e299cdfb0027d6b6ea845234ef54ae785186 Mon Sep 17 00:00:00 2001 -From: "Bryn M. Reeves" -Date: Thu, 13 Sep 2018 16:14:12 +0100 -Subject: [PATCH 1/2] [archive, plugin] avoid recursing on symbolic link loops - -It's possible that symlink loops exist in the host file system, -either 'simple' ('a'->'a'), or indirect ('a'->'b'->'a'). We need -to avoid recursing on these loops, to avoid exceeding the maximum -link or recursion depths, but we should still represent these -inodes as accurately as possible in the resulting archive. - -Detect loops in both the Plugin link handling code and in the new -Archive link follow-up code by creating the first requested level -of loop, and then skipping the recursive follow-up. This means -that the looping links are still created in the archive so long -as they are referenced in a copy spec but that we do not attempt -to indefinitely recurse while collecting them. - -Resolves: #1430 - -Signed-off-by: Bryn M. Reeves ---- - sos/archive.py | 27 +++++++++++++++++++++++++++ - sos/plugins/__init__.py | 20 +++++++++++++++----- - 2 files changed, 42 insertions(+), 5 deletions(-) - -diff --git a/sos/archive.py b/sos/archive.py -index 483d66f4..e5819432 100644 ---- a/sos/archive.py -+++ b/sos/archive.py -@@ -424,6 +424,29 @@ class FileCacheArchive(Archive): - host_path_name = os.path.realpath(os.path.join(source_dir, source)) - dest_path_name = self.dest_path(host_path_name) - -+ def is_loop(link_name, source): -+ """Return ``True`` if the symbolic link ``link_name`` is part -+ of a file system loop, or ``False`` otherwise. -+ """ -+ link_dir = os.path.dirname(link_name) -+ if not os.path.isabs(source): -+ source = os.path.realpath(os.path.join(link_dir, source)) -+ link_name = os.path.realpath(link_name) -+ -+ # Simple a -> a loop -+ if link_name == source: -+ return True -+ -+ # Find indirect loops (a->b-a) by stat()ing the first step -+ # in the symlink chain -+ try: -+ os.stat(link_name) -+ except OSError as e: -+ if e.errno == 40: -+ return True -+ raise -+ return False -+ - if not os.path.exists(dest_path_name): - if os.path.islink(host_path_name): - # Normalised path for the new link_name -@@ -433,6 +456,10 @@ class FileCacheArchive(Archive): - # Relative source path of the new link - source = os.path.join(dest_dir, os.readlink(host_path_name)) - source = os.path.relpath(source, dest_dir) -+ if is_loop(link_name, source): -+ self.log_debug("Link '%s' - '%s' loops: skipping..." % -+ (link_name, source)) -+ return - self.log_debug("Adding link %s -> %s for link follow up" % - (link_name, source)) - self.add_link(source, link_name) -diff --git a/sos/plugins/__init__.py b/sos/plugins/__init__.py -index 7d011a02..7d2a8b2d 100644 ---- a/sos/plugins/__init__.py -+++ b/sos/plugins/__init__.py -@@ -376,6 +376,21 @@ class Plugin(object): - self._log_debug("link '%s' is a directory, skipping..." % linkdest) - return - -+ self.copied_files.append({'srcpath': srcpath, -+ 'dstpath': dstpath, -+ 'symlink': "yes", -+ 'pointsto': linkdest}) -+ -+ # Check for indirect symlink loops by stat()ing the next step -+ # in the link chain. -+ try: -+ os.stat(absdest) -+ except OSError as e: -+ if e.errno == 40: -+ self._log_debug("link '%s' is part of a file system " -+ "loop, skipping target..." % dstpath) -+ return -+ - # copy the symlink target translating relative targets - # to absolute paths to pass to _do_copy_path. - self._log_debug("normalized link target '%s' as '%s'" -@@ -388,11 +403,6 @@ class Plugin(object): - self._log_debug("link '%s' points to itself, skipping target..." - % linkdest) - -- self.copied_files.append({'srcpath': srcpath, -- 'dstpath': dstpath, -- 'symlink': "yes", -- 'pointsto': linkdest}) -- - def _copy_dir(self, srcpath): - try: - for afile in os.listdir(srcpath): --- -2.17.1 - - -From e108d7c03834446f8dac66ad69f5eade4f2c5fce Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Fri, 14 Sep 2018 10:42:07 +0200 -Subject: [PATCH 2/2] [archive] fix and simplify directory destination - rewriting - -Rewriting of the destination path by _make_leading_paths() only -applies when creating intermediate path components that are a -symbolic link. The final level of path creation must always be -a directory, and the destination is always the absolute path to -that directory. - -Always return the directory path when creating a new directory, -and do not attempt to rewrite the destination at the top level -in FileCacheArchive._check_path() since all intermediate links -have already been handled inside _make_leading_paths() (i.e. -the returned/rewritten destination is always equal to the path -that was passed into the function). - -Resolves: #1432 - -Signed-off-by: Pavel Moravec -Signed-off-by: Bryn M. Reeves ---- - sos/archive.py | 8 +++----- - 1 file changed, 3 insertions(+), 5 deletions(-) - -diff --git a/sos/archive.py b/sos/archive.py -index e5819432..b02b75f7 100644 ---- a/sos/archive.py -+++ b/sos/archive.py -@@ -241,6 +241,8 @@ class FileCacheArchive(Archive): - else: - self.log_debug("Making directory %s" % abs_path) - os.mkdir(abs_path, mode) -+ dest = src_path -+ - return dest - - def _check_path(self, src, path_type, dest=None, force=False): -@@ -282,17 +284,13 @@ class FileCacheArchive(Archive): - if not dest_dir: - return dest - -- # Preserve destination basename for rewritten dest_dir -- dest_name = os.path.split(src)[1] -- - # Check containing directory presence and path type - if os.path.exists(dest_dir) and not os.path.isdir(dest_dir): - raise ValueError("path '%s' exists and is not a directory" % - dest_dir) - elif not os.path.exists(dest_dir): - src_dir = src if path_type == P_DIR else os.path.split(src)[0] -- src_dir = self._make_leading_paths(src_dir) -- dest = self.dest_path(os.path.join(src_dir, dest_name)) -+ self._make_leading_paths(src_dir) - - def is_special(mode): - return any([ --- -2.17.1 - diff --git a/SOURCES/sos-bz1627544-pipe-returncode.patch b/SOURCES/sos-bz1627544-pipe-returncode.patch deleted file mode 100644 index 562a162..0000000 --- a/SOURCES/sos-bz1627544-pipe-returncode.patch +++ /dev/null @@ -1,37 +0,0 @@ -From 17bcd2bcdb8de4818b361582ac4d833ff324f4ff Mon Sep 17 00:00:00 2001 -From: "Bryn M. Reeves" -Date: Mon, 10 Sep 2018 18:06:00 +0100 -Subject: [PATCH] [utilities] wait until AsyncReader p.poll() returns None - -On some systems the pipe used by the AsyncReader() class and the -sos_get_command_output() function may still be open at the time -the p.poll() call returns. At this time the command exit status -is undefined, leading to errors and collection failures for code -that tests the command's exit code. - -Wait explicitly until poll() returns None to avoid this. - -Resolves: #1417 - -Signed-off-by: Bryn M. Reeves ---- - sos/utilities.py | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - -diff --git a/sos/utilities.py b/sos/utilities.py -index d112e15a..25e10429 100644 ---- a/sos/utilities.py -+++ b/sos/utilities.py -@@ -155,7 +155,8 @@ def sos_get_command_output(command, timeout=300, stderr=False, - - reader = AsyncReader(p.stdout, sizelimit, binary) - stdout = reader.get_contents() -- p.poll() -+ while p.poll() == None: -+ pass - - except OSError as e: - if e.errno == errno.ENOENT: --- -2.17.1 - diff --git a/SOURCES/sos-bz1627546-atomic-attribute-error.patch b/SOURCES/sos-bz1627546-atomic-attribute-error.patch deleted file mode 100644 index 035c892..0000000 --- a/SOURCES/sos-bz1627546-atomic-attribute-error.patch +++ /dev/null @@ -1,60 +0,0 @@ -From 4440c9094d853a452cbff6f9801fc7d47352e9b4 Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Thu, 6 Sep 2018 13:56:20 -0400 -Subject: [PATCH] [atomic] Define valid preset for RHEL Atomic - -Defines an 'atomic' preset for use with the RedHatAtomic policy for RHEL -Atomic Host. Fixes sos being unable to run due to the preset probe -returning a string rather than a preset. - -Resolves: #1418 - -Signed-off-by: Jake Hunsaker -Signed-off-by: Bryn M. Reeves ---- - sos/policies/redhat.py | 15 ++++++++++++++- - 1 file changed, 14 insertions(+), 1 deletion(-) - -diff --git a/sos/policies/redhat.py b/sos/policies/redhat.py -index b494de3c..e1e417f3 100644 ---- a/sos/policies/redhat.py -+++ b/sos/policies/redhat.py -@@ -325,6 +325,12 @@ No changes will be made to system configuration. - - ATOMIC = "atomic" - ATOMIC_RELEASE_STR = "Atomic" -+ATOMIC_DESC = "Red Hat Enterprise Linux Atomic Host" -+ -+atomic_presets = { -+ ATOMIC: PresetDefaults(name=ATOMIC, desc=ATOMIC_DESC, note=NOTE_TIME, -+ opts=_opts_verify) -+} - - - class RedHatAtomicPolicy(RHELPolicy): -@@ -347,6 +353,10 @@ organization before being passed to any third party. - %(vendor_text)s - """) - -+ def __init__(self, sysroot=None): -+ super(RedHatAtomicPolicy, self).__init__(sysroot=sysroot) -+ self.register_presets(atomic_presets) -+ - @classmethod - def check(cls): - atomic = False -@@ -363,7 +373,10 @@ organization before being passed to any third party. - return atomic - - def probe_preset(self): -- return ATOMIC -+ if self.pkg_by_name('atomic-openshift'): -+ return self.find_preset(RHOCP) -+ -+ return self.find_preset(ATOMIC) - - - class FedoraPolicy(RedHatPolicy): --- -2.17.1 - diff --git a/SOURCES/sos-bz1632607-unpackaged-traceback.patch b/SOURCES/sos-bz1632607-unpackaged-traceback.patch deleted file mode 100644 index 049f00b..0000000 --- a/SOURCES/sos-bz1632607-unpackaged-traceback.patch +++ /dev/null @@ -1,135 +0,0 @@ -From 18ae45219c9aa5ed2340a21e9f0aacad62d69242 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Sat, 8 Sep 2018 17:39:32 +0200 -Subject: [PATCH 1/3] [plugins] fix exception when collecting empty strings - -get first line of string to log only for nonempty content - -Relevant to: #1422 - -Signed-off-by: Pavel Moravec ---- - sos/plugins/__init__.py | 15 +++++++++------ - 1 file changed, 9 insertions(+), 6 deletions(-) - -diff --git a/sos/plugins/__init__.py b/sos/plugins/__init__.py -index c87ae19b..6c3b153e 100644 ---- a/sos/plugins/__init__.py -+++ b/sos/plugins/__init__.py -@@ -746,9 +746,10 @@ class Plugin(object): - def add_string_as_file(self, content, filename): - """Add a string to the archive as a file named `filename`""" - self.copy_strings.append((content, filename)) -- content = content.splitlines()[0] -- if not isinstance(content, six.string_types): -- content = content.decode('utf8', 'ignore') -+ if content: -+ content = content.splitlines()[0] -+ if not isinstance(content, six.string_types): -+ content = content.decode('utf8', 'ignore') - self._log_debug("added string ...'%s' as '%s'" % (content, filename)) - - def get_cmd_output_now(self, exe, suggest_filename=None, -@@ -948,9 +949,11 @@ class Plugin(object): - - def _collect_strings(self): - for string, file_name in self.copy_strings: -- content = string.splitlines()[0] -- if not isinstance(content, six.string_types): -- content = content.decode('utf8', 'ignore') -+ content = '' -+ if string: -+ content = string.splitlines()[0] -+ if not isinstance(content, six.string_types): -+ content = content.decode('utf8', 'ignore') - self._log_info("collecting string ...'%s' as '%s'" - % (content, file_name)) - try: --- -2.17.2 - - -From 036bbd0fa4c85f97da536717673ca0b668dd5276 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Sat, 8 Sep 2018 17:45:09 +0200 -Subject: [PATCH 2/3] [juju] catch exceptions when "juju status" command fails - -Catch exceptions when "juju status" command: -- does not exist (and generates empty output), or -- does not generate valid/expected JSON output - -Resolves: #1422 - -Signed-off-by: Pavel Moravec ---- - sos/plugins/juju.py | 7 ++++++- - 1 file changed, 6 insertions(+), 1 deletion(-) - -diff --git a/sos/plugins/juju.py b/sos/plugins/juju.py -index cbd4a17b..8d996041 100644 ---- a/sos/plugins/juju.py -+++ b/sos/plugins/juju.py -@@ -51,7 +51,12 @@ class Juju(Plugin, UbuntuPlugin): - cmd = "juju status --format json" - status_json = self.call_ext_prog(cmd)['output'] - self.add_string_as_file(status_json, "juju_status_json") -- return json_loads(status_json)['services'].keys() -+ # if status_json isn't in JSON format (i.e. 'juju' command not found), -+ # or if it does not contain 'services' key, return empty list -+ try: -+ return json_loads(status_json)['services'].keys() -+ except ValueError: -+ return [] - - @ensure_service_is_running("juju-db") - def export_mongodb(self): --- -2.17.2 - - -From 549591879a01edcb856f7f353af9d6324d469c39 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Mon, 24 Sep 2018 20:09:47 +0200 -Subject: [PATCH 3/3] [unpackaged] compare realpaths of files - -To compare files in $PATH with files installed from a package, we must -expand all symlinks to their realpaths. Otherwise we get false positives -like /bin/systemctl (as /bin -> /usr/bin). - -Resolves: #1437 - -Signed-off-by: Pavel Moravec ---- - sos/plugins/unpackaged.py | 8 +++++--- - 1 file changed, 5 insertions(+), 3 deletions(-) - -diff --git a/sos/plugins/unpackaged.py b/sos/plugins/unpackaged.py -index 4c065e11..91de9de2 100644 ---- a/sos/plugins/unpackaged.py -+++ b/sos/plugins/unpackaged.py -@@ -45,9 +45,10 @@ class Unpackaged(Plugin, RedHatPlugin): - path = os.path.abspath(os.readlink(path)) - except Exception: - continue -- file_list.append(path) -+ file_list.append(os.path.realpath(path)) - for name in dirs: -- file_list.append(os.path.join(root, name)) -+ file_list.append(os.path.realpath( -+ os.path.join(root, name))) - - return file_list - -@@ -63,7 +64,8 @@ class Unpackaged(Plugin, RedHatPlugin): - return expanded - - all_fsystem = [] -- all_frpm = set(self.policy.mangle_package_path( -+ all_frpm = set(os.path.realpath(x) -+ for x in self.policy.mangle_package_path( - self.policy.package_manager.files)) - - for d in get_env_path_list(): --- -2.17.2 - diff --git a/SOURCES/sos-bz1637127-powerpc-dlpar-lpm-logs.patch b/SOURCES/sos-bz1637127-powerpc-dlpar-lpm-logs.patch deleted file mode 100644 index ab4b65d..0000000 --- a/SOURCES/sos-bz1637127-powerpc-dlpar-lpm-logs.patch +++ /dev/null @@ -1,49 +0,0 @@ -From 848b110f83697814c72ac93b36e786ff9dafc0fc Mon Sep 17 00:00:00 2001 -From: Sourabh Jain -Date: Thu, 4 Oct 2018 11:14:09 +0530 -Subject: [PATCH] [powerpc] Add support to collect DLPAR and LPM related logs - -This patch updates powerpc plugin to collect -Dynamic Resource Manager (drmgr) log files -i.e. /var/log/drmgr and /var/log/drmgr.0. -In addition, it also adds ctsanp command to collect the information -about the Reliable Scalable Cluster Technology (RSCT) components. - -Resolves: #1443 - -Signed-off-by: Sourabh Jain -Signed-off-by: Bryn M. Reeves ---- - sos/plugins/powerpc.py | 8 ++++++-- - 1 file changed, 6 insertions(+), 2 deletions(-) - -diff --git a/sos/plugins/powerpc.py b/sos/plugins/powerpc.py -index 94137568..8a859990 100644 ---- a/sos/plugins/powerpc.py -+++ b/sos/plugins/powerpc.py -@@ -59,8 +59,11 @@ class PowerPC(Plugin, RedHatPlugin, UbuntuPlugin, DebianPlugin): - "/proc/ppc64/lparcfg", - "/proc/ppc64/eeh", - "/proc/ppc64/systemcfg", -- "/var/log/platform" -+ "/var/log/platform", -+ "/var/log/drmgr", -+ "/var/log/drmgr.0" - ]) -+ ctsnap_path = self.get_cmd_output_path(name="ctsnap", make=True) - self.add_cmd_output([ - "servicelog --dump", - "servicelog_notify --list", -@@ -68,7 +71,8 @@ class PowerPC(Plugin, RedHatPlugin, UbuntuPlugin, DebianPlugin): - "usysident", - "serv_config -l", - "bootlist -m both -r", -- "lparstat -i" -+ "lparstat -i", -+ "ctsnap -xrunrpttr -d %s" % (ctsnap_path) - ]) - - if isPowerNV: --- -2.17.1 - diff --git a/SOURCES/sos-bz1638492-system-wide-crypto-policies.patch b/SOURCES/sos-bz1638492-system-wide-crypto-policies.patch deleted file mode 100644 index b5aa388..0000000 --- a/SOURCES/sos-bz1638492-system-wide-crypto-policies.patch +++ /dev/null @@ -1,45 +0,0 @@ -From b44d8d0f2de8fb35e98d5d171afd00945fc5036c Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Sun, 14 Oct 2018 14:20:04 +0200 -Subject: [PATCH] [crypto] collect more configs and commands - -Commands: -fips-mode-setup --check -update-crypto-policies --show -update-crypto-policies --is-applied - -Files: -/etc/system-fips -/etc/crypto-policies/* - -Resolves: #1448 - -Signed-off-by: Pavel Moravec ---- - sos/plugins/crypto.py | 10 +++++++++- - 1 file changed, 9 insertions(+), 1 deletion(-) - -diff --git a/sos/plugins/crypto.py b/sos/plugins/crypto.py -index df21bb3e..a9c51bb1 100644 ---- a/sos/plugins/crypto.py -+++ b/sos/plugins/crypto.py -@@ -21,7 +21,15 @@ class Crypto(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin): - def setup(self): - self.add_copy_spec([ - "/proc/crypto", -- "/proc/sys/crypto/fips_enabled" -+ "/proc/sys/crypto/fips_enabled", -+ "/etc/system-fips", -+ "/etc/crypto-policies/*" -+ ]) -+ -+ self.add_cmd_output([ -+ "fips-mode-setup --check", -+ "update-crypto-policies --show", -+ "update-crypto-policies --is-applied" - ]) - - # vim: et ts=4 sw=4 --- -2.17.2 - diff --git a/SOURCES/sos-bz1638637-kernel-dont-collect-tracing-instance.patch b/SOURCES/sos-bz1638637-kernel-dont-collect-tracing-instance.patch deleted file mode 100644 index 058732d..0000000 --- a/SOURCES/sos-bz1638637-kernel-dont-collect-tracing-instance.patch +++ /dev/null @@ -1,33 +0,0 @@ -From d6379b5ba0f381ea8ec2403b9985100a946a5866 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Mon, 8 Oct 2018 10:45:04 +0200 -Subject: [PATCH] [kernel] dont collect some tracing instance files - -As copying of them hangs. - -Resolves: #1445 - -Signed-off-by: Pavel Moravec ---- - sos/plugins/kernel.py | 5 ++++- - 1 file changed, 4 insertions(+), 1 deletion(-) - -diff --git a/sos/plugins/kernel.py b/sos/plugins/kernel.py -index 73109326..558e7143 100644 ---- a/sos/plugins/kernel.py -+++ b/sos/plugins/kernel.py -@@ -93,7 +93,10 @@ class Kernel(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin): - '/sys/kernel/debug/tracing/events/*', - '/sys/kernel/debug/tracing/free_buffer', - '/sys/kernel/debug/tracing/trace_marker', -- '/sys/kernel/debug/tracing/trace_marker_raw' -+ '/sys/kernel/debug/tracing/trace_marker_raw', -+ '/sys/kernel/debug/tracing/instances/*/per_cpu/*/snapshot_raw', -+ '/sys/kernel/debug/tracing/instances/*/per_cpu/*/trace_pipe*', -+ '/sys/kernel/debug/tracing/instances/*/trace_pipe' - ]) - - self.add_copy_spec([ --- -2.17.1 - diff --git a/SOURCES/sos-bz1638638-openstack-relax-enabling-plugins.patch b/SOURCES/sos-bz1638638-openstack-relax-enabling-plugins.patch deleted file mode 100644 index 283f844..0000000 --- a/SOURCES/sos-bz1638638-openstack-relax-enabling-plugins.patch +++ /dev/null @@ -1,424 +0,0 @@ -From 9b3d0b7d8732f53dbbd5e02182a9b0a0e1d6d249 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Fri, 31 Aug 2018 17:19:32 +0200 -Subject: [PATCH 1/2] [openstack_nova] remove too restrictive check_enabled - -Enable the plugin just based on package presence. - -Resolves: #1411 - -Signed-off-by: Pavel Moravec ---- - sos/plugins/openstack_nova.py | 8 -------- - 1 file changed, 8 deletions(-) - -diff --git a/sos/plugins/openstack_nova.py b/sos/plugins/openstack_nova.py -index b041a59a..77c3b49a 100644 ---- a/sos/plugins/openstack_nova.py -+++ b/sos/plugins/openstack_nova.py -@@ -200,10 +200,6 @@ class DebianNova(OpenStackNova, DebianPlugin, UbuntuPlugin): - 'python-novnc' - ) - -- def check_enabled(self): -- self.nova = self.is_installed("nova-common") -- return self.nova -- - def setup(self): - super(DebianNova, self).setup() - self.add_copy_spec([ -@@ -233,10 +229,6 @@ class RedHatNova(OpenStackNova, RedHatPlugin): - 'novnc' - ) - -- def check_enabled(self): -- self.nova = self.is_installed("openstack-nova-common") -- return self.nova -- - def setup(self): - super(RedHatNova, self).setup() - self.add_copy_spec([ --- -2.17.1 - - -From f8ee9c4b87c6c3b8aa2bda3425f0e53499515363 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Fri, 31 Aug 2018 20:04:47 +0200 -Subject: [PATCH 2/2] [openstack_*] relax enabling of OSP RedHat plugins - -Allow automatic enabling of OSP packages also on containerized -environment. - -Relevant to: #1411 - -Signed-off-by: Pavel Moravec -Signed-off-by: Bryn M. Reeves ---- - sos/plugins/openstack_aodh.py | 8 +------- - sos/plugins/openstack_ceilometer.py | 10 +--------- - sos/plugins/openstack_cinder.py | 12 +----------- - sos/plugins/openstack_glance.py | 5 +---- - sos/plugins/openstack_heat.py | 10 +--------- - sos/plugins/openstack_horizon.py | 5 +---- - sos/plugins/openstack_instack.py | 14 +------------- - sos/plugins/openstack_ironic.py | 6 +----- - sos/plugins/openstack_keystone.py | 7 +------ - sos/plugins/openstack_manila.py | 9 +-------- - sos/plugins/openstack_neutron.py | 21 +-------------------- - sos/plugins/openstack_nova.py | 18 +----------------- - sos/plugins/openstack_octavia.py | 13 +++++++++++-- - sos/plugins/openstack_sahara.py | 7 +------ - sos/plugins/openstack_swift.py | 10 +--------- - sos/plugins/openstack_trove.py | 2 +- - 16 files changed, 26 insertions(+), 131 deletions(-) - -diff --git a/sos/plugins/openstack_aodh.py b/sos/plugins/openstack_aodh.py -index 9fcdf932..2c9057a6 100644 ---- a/sos/plugins/openstack_aodh.py -+++ b/sos/plugins/openstack_aodh.py -@@ -18,13 +18,7 @@ class OpenStackAodh(Plugin, RedHatPlugin): - plugin_name = "openstack_aodh" - profiles = ('openstack', 'openstack_controller') - -- packages = ( -- 'openstack-aodh-api', -- 'openstack-aodh-listener', -- 'openstack-aodh-notifier', -- 'openstack-aodh-evaluator,' -- 'openstack-aodh-common' -- ) -+ packages = ('openstack-selinux',) - - requires_root = False - -diff --git a/sos/plugins/openstack_ceilometer.py b/sos/plugins/openstack_ceilometer.py -index 3bdd74c8..bb89fa68 100644 ---- a/sos/plugins/openstack_ceilometer.py -+++ b/sos/plugins/openstack_ceilometer.py -@@ -86,15 +86,7 @@ class DebianCeilometer(OpenStackCeilometer, DebianPlugin, - - class RedHatCeilometer(OpenStackCeilometer, RedHatPlugin): - -- packages = ( -- 'openstack-ceilometer', -- 'openstack-ceilometer-api', -- 'openstack-ceilometer-central', -- 'openstack-ceilometer-collector', -- 'openstack-ceilometer-common', -- 'openstack-ceilometer-compute', -- 'python-ceilometerclient' -- ) -+ packages = ('openstack-selinux',) - - def setup(self): - super(RedHatCeilometer, self).setup() -diff --git a/sos/plugins/openstack_cinder.py b/sos/plugins/openstack_cinder.py -index f097fd5b..4fa753c4 100644 ---- a/sos/plugins/openstack_cinder.py -+++ b/sos/plugins/openstack_cinder.py -@@ -130,10 +130,6 @@ class DebianCinder(OpenStackCinder, DebianPlugin, UbuntuPlugin): - 'python-cinderclient' - ) - -- def check_enabled(self): -- self.cinder = self.is_installed("cinder-common") -- return self.cinder -- - def setup(self): - super(DebianCinder, self).setup() - -@@ -141,13 +137,7 @@ class DebianCinder(OpenStackCinder, DebianPlugin, UbuntuPlugin): - class RedHatCinder(OpenStackCinder, RedHatPlugin): - - cinder = False -- packages = ('openstack-cinder', -- 'python-cinder', -- 'python-cinderclient') -- -- def check_enabled(self): -- self.cinder = self.is_installed("openstack-cinder") -- return self.cinder -+ packages = ('openstack-selinux',) - - def setup(self): - super(RedHatCinder, self).setup() -diff --git a/sos/plugins/openstack_glance.py b/sos/plugins/openstack_glance.py -index fa68dd8e..bfb5f9fe 100644 ---- a/sos/plugins/openstack_glance.py -+++ b/sos/plugins/openstack_glance.py -@@ -130,9 +130,6 @@ class DebianGlance(OpenStackGlance, DebianPlugin, UbuntuPlugin): - - class RedHatGlance(OpenStackGlance, RedHatPlugin): - -- packages = ( -- 'openstack-glance', -- 'python-glanceclient' -- ) -+ packages = ('openstack-selinux',) - - # vim: set et ts=4 sw=4 : -diff --git a/sos/plugins/openstack_heat.py b/sos/plugins/openstack_heat.py -index 26f3f511..1dab72d0 100644 ---- a/sos/plugins/openstack_heat.py -+++ b/sos/plugins/openstack_heat.py -@@ -152,14 +152,6 @@ class DebianHeat(OpenStackHeat, DebianPlugin, UbuntuPlugin): - - class RedHatHeat(OpenStackHeat, RedHatPlugin): - -- packages = ( -- 'openstack-heat-api', -- 'openstack-heat-api-cfn', -- 'openstack-heat-api-cloudwatch', -- 'openstack-heat-cli', -- 'openstack-heat-common', -- 'openstack-heat-engine', -- 'python-heatclient' -- ) -+ packages = ('openstack-selinux',) - - # vim: set et ts=4 sw=4 : -diff --git a/sos/plugins/openstack_horizon.py b/sos/plugins/openstack_horizon.py -index 677a7c28..4299d8db 100644 ---- a/sos/plugins/openstack_horizon.py -+++ b/sos/plugins/openstack_horizon.py -@@ -103,10 +103,7 @@ class UbuntuHorizon(OpenStackHorizon, UbuntuPlugin): - - class RedHatHorizon(OpenStackHorizon, RedHatPlugin): - -- packages = ( -- 'python-django-horizon', -- 'openstack-dashboard' -- ) -+ packages = ('openstack-selinux',) - - def setup(self): - super(RedHatHorizon, self).setup() -diff --git a/sos/plugins/openstack_instack.py b/sos/plugins/openstack_instack.py -index cf90003e..37a75e02 100644 ---- a/sos/plugins/openstack_instack.py -+++ b/sos/plugins/openstack_instack.py -@@ -125,19 +125,7 @@ class OpenStackInstack(Plugin): - - class RedHatRDOManager(OpenStackInstack, RedHatPlugin): - -- packages = [ -- 'instack', -- 'instack-undercloud', -- 'openstack-tripleo', -- 'openstack-tripleo-common', -- 'openstack-tripleo-heat-templates', -- 'openstack-tripleo-image-elements', -- 'openstack-tripleo-puppet-elements', -- 'openstack-tripleo-ui', -- 'openstack-tripleo-validations', -- 'puppet-tripleo', -- 'python-tripleoclient' -- ] -+ packages = ('openstack-selinux',) - - def setup(self): - super(RedHatRDOManager, self).setup() -diff --git a/sos/plugins/openstack_ironic.py b/sos/plugins/openstack_ironic.py -index b4cdee6d..84055b67 100644 ---- a/sos/plugins/openstack_ironic.py -+++ b/sos/plugins/openstack_ironic.py -@@ -118,11 +118,7 @@ class DebianIronic(OpenStackIronic, DebianPlugin, UbuntuPlugin): - - class RedHatIronic(OpenStackIronic, RedHatPlugin): - -- packages = [ -- 'openstack-ironic-api', -- 'openstack-ironic-common', -- 'openstack-ironic-conductor', -- ] -+ packages = ('openstack-selinux',) - - discoverd_packages = [ - 'openstack-ironic-discoverd', -diff --git a/sos/plugins/openstack_keystone.py b/sos/plugins/openstack_keystone.py -index a6b1360f..76e4b380 100644 ---- a/sos/plugins/openstack_keystone.py -+++ b/sos/plugins/openstack_keystone.py -@@ -118,12 +118,7 @@ class DebianKeystone(OpenStackKeystone, DebianPlugin, UbuntuPlugin): - - class RedHatKeystone(OpenStackKeystone, RedHatPlugin): - -- packages = ( -- 'openstack-keystone', -- 'python-keystone', -- 'python-django-openstack-auth', -- 'python-keystoneclient' -- ) -+ packages = ('openstack-selinux',) - - def setup(self): - super(RedHatKeystone, self).setup() -diff --git a/sos/plugins/openstack_manila.py b/sos/plugins/openstack_manila.py -index ef926cda..e6409d00 100644 ---- a/sos/plugins/openstack_manila.py -+++ b/sos/plugins/openstack_manila.py -@@ -85,14 +85,7 @@ class DebianManila(OpenStackManila, DebianPlugin, UbuntuPlugin): - class RedHatManila(OpenStackManila, RedHatPlugin): - """OpenStackManila related information for Red Hat distributions.""" - -- packages = ( -- 'puppet-manila', -- 'openstack-manila', -- 'openstack-manila-share', -- 'python-manila', -- 'python-manilaclient', -- 'python-manila-tests' -- ) -+ packages = ('openstack-selinux',) - - def setup(self): - super(RedHatManila, self).setup() -diff --git a/sos/plugins/openstack_neutron.py b/sos/plugins/openstack_neutron.py -index a5134c9f..9ae741f3 100644 ---- a/sos/plugins/openstack_neutron.py -+++ b/sos/plugins/openstack_neutron.py -@@ -120,26 +120,7 @@ class DebianNeutron(OpenStackNeutron, DebianPlugin, UbuntuPlugin): - - class RedHatNeutron(OpenStackNeutron, RedHatPlugin): - -- packages = [ -- 'openstack-neutron', -- 'openstack-neutron-linuxbridge' -- 'openstack-neutron-metaplugin', -- 'openstack-neutron-openvswitch', -- 'openstack-neutron-bigswitch', -- 'openstack-neutron-brocade', -- 'openstack-neutron-cisco', -- 'openstack-neutron-hyperv', -- 'openstack-neutron-midonet', -- 'openstack-neutron-nec' -- 'openstack-neutron-nicira', -- 'openstack-neutron-plumgrid', -- 'openstack-neutron-ryu', -- 'python-neutron', -- 'python-neutronclient' -- ] -- -- def check_enabled(self): -- return self.is_installed("openstack-neutron") -+ packages = ('openstack-selinux',) - - def setup(self): - super(RedHatNeutron, self).setup() -diff --git a/sos/plugins/openstack_nova.py b/sos/plugins/openstack_nova.py -index 77c3b49a..4fde7565 100644 ---- a/sos/plugins/openstack_nova.py -+++ b/sos/plugins/openstack_nova.py -@@ -211,23 +211,7 @@ class DebianNova(OpenStackNova, DebianPlugin, UbuntuPlugin): - class RedHatNova(OpenStackNova, RedHatPlugin): - - nova = False -- packages = ( -- 'openstack-nova-common', -- 'openstack-nova-network', -- 'openstack-nova-conductor', -- 'openstack-nova-conductor', -- 'openstack-nova-scheduler', -- 'openstack-nova-console', -- 'openstack-nova-novncproxy', -- 'openstack-nova-compute', -- 'openstack-nova-api', -- 'openstack-nova-cert', -- 'openstack-nova-cells', -- 'openstack-nova-objectstore', -- 'python-nova', -- 'python-novaclient', -- 'novnc' -- ) -+ packages = ('openstack-selinux',) - - def setup(self): - super(RedHatNova, self).setup() -diff --git a/sos/plugins/openstack_octavia.py b/sos/plugins/openstack_octavia.py -index 46a943a5..86a91dc1 100644 ---- a/sos/plugins/openstack_octavia.py -+++ b/sos/plugins/openstack_octavia.py -@@ -9,12 +9,11 @@ - from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin - - --class OpenStackOctavia(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin): -+class OpenStackOctavia(Plugin): - """Openstack Octavia""" - - plugin_name = "openstack_octavia" - profiles = ('openstack', 'openstack_controller') -- packages = ('openstack-octavia-common',) - - var_puppet_gen = "/var/lib/config-data/puppet-generated/octavia" - -@@ -101,4 +100,14 @@ class OpenStackOctavia(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin): - regexp, r"\1*********" - ) - -+ -+class DebianOctavia(OpenStackOctavia, DebianPlugin, UbuntuPlugin): -+ -+ packages = ('openstack-octavia-common',) -+ -+ -+class RedHatOctavia(OpenStackOctavia, RedHatPlugin): -+ -+ packages = ('openstack-selinux',) -+ - # vim: set et ts=4 sw=4 : -diff --git a/sos/plugins/openstack_sahara.py b/sos/plugins/openstack_sahara.py -index cdb4b02d..83661b0f 100644 ---- a/sos/plugins/openstack_sahara.py -+++ b/sos/plugins/openstack_sahara.py -@@ -86,12 +86,7 @@ class DebianSahara(OpenStackSahara, DebianPlugin, UbuntuPlugin): - class RedHatSahara(OpenStackSahara, RedHatPlugin): - """OpenStack sahara related information for Red Hat distributions.""" - -- packages = ( -- 'openstack-sahara', -- 'openstack-sahara-api', -- 'openstack-sahara-engine', -- 'python-saharaclient' -- ) -+ packages = ('openstack-selinux',) - - def setup(self): - super(RedHatSahara, self).setup() -diff --git a/sos/plugins/openstack_swift.py b/sos/plugins/openstack_swift.py -index fdf101a9..6637bfa5 100644 ---- a/sos/plugins/openstack_swift.py -+++ b/sos/plugins/openstack_swift.py -@@ -91,14 +91,6 @@ class DebianSwift(OpenStackSwift, DebianPlugin, UbuntuPlugin): - - class RedHatSwift(OpenStackSwift, RedHatPlugin): - -- packages = ( -- 'openstack-swift', -- 'openstack-swift-account', -- 'openstack-swift-container', -- 'openstack-swift-object', -- 'openstack-swift-proxy', -- 'swift', -- 'python-swiftclient' -- ) -+ packages = ('openstack-selinux',) - - # vim: set et ts=4 sw=4 : -diff --git a/sos/plugins/openstack_trove.py b/sos/plugins/openstack_trove.py -index 6ec8aff8..699ae43d 100644 ---- a/sos/plugins/openstack_trove.py -+++ b/sos/plugins/openstack_trove.py -@@ -83,7 +83,7 @@ class DebianTrove(OpenStackTrove, DebianPlugin, UbuntuPlugin): - - class RedHatTrove(OpenStackTrove, RedHatPlugin): - -- packages = ['openstack-trove'] -+ packages = ('openstack-selinux',) - - def setup(self): - super(RedHatTrove, self).setup() --- -2.17.1 - diff --git a/SOURCES/sos-bz1638855-block-luks.patch b/SOURCES/sos-bz1638855-block-luks.patch deleted file mode 100644 index d300131..0000000 --- a/SOURCES/sos-bz1638855-block-luks.patch +++ /dev/null @@ -1,49 +0,0 @@ -From 5f40365c453128f2ee7f0a22f11eb3434fedd64a Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Sun, 14 Oct 2018 14:41:34 +0200 -Subject: [PATCH] [block] proper parsing of luks partition on self device - -Simplify identification of LUKS partitions by collecting lsblk with -option -l, such that the device name is the very first string every -time. That is required for LUKS partition located on the device itself -where standard lsblk output does not contain '|-' before the device name. - -Resolves: #1449 - -Signed-off-by: Pavel Moravec ---- - sos/plugins/block.py | 10 +++++----- - 1 file changed, 5 insertions(+), 5 deletions(-) - -diff --git a/sos/plugins/block.py b/sos/plugins/block.py -index 059686c5..e7e25bba 100644 ---- a/sos/plugins/block.py -+++ b/sos/plugins/block.py -@@ -27,11 +27,11 @@ class Block(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin): - return out - for line in lsblk_out.splitlines(): - # find in output lines like -- # |-sda2 crypto_LUKS -- # and separate device name - it will be 1st string on the line -- # after first '-' -+ # sda2 crypto_LUKS -+ # loop0 crypto_LUKS -+ # and separate device name - it will be the 1st string on the line - if 'crypto_LUKS' in line: -- dev = line.split()[0].split('-', 1)[1] -+ dev = line.split()[0] - out.append(dev) - return out - -@@ -67,7 +67,7 @@ class Block(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin): - "fdisk -l %s" % disk_path - ]) - -- lsblk_file = self.get_cmd_output_now("lsblk -f -a") -+ lsblk_file = self.get_cmd_output_now("lsblk -f -a -l") - # for LUKS devices, collect cryptsetup luksDump - if lsblk_file: - for dev in self.get_luks_devices(lsblk_file): --- -2.17.2 - diff --git a/SOURCES/sos-bz1642377-opendaylight-karaf-logs.patch b/SOURCES/sos-bz1642377-opendaylight-karaf-logs.patch deleted file mode 100644 index f694329..0000000 --- a/SOURCES/sos-bz1642377-opendaylight-karaf-logs.patch +++ /dev/null @@ -1,53 +0,0 @@ -From 27c61e198f47af7ba5899ff1f50ab3d0e089d5eb Mon Sep 17 00:00:00 2001 -From: Victor Pickard -Date: Tue, 25 Sep 2018 11:04:07 -0400 -Subject: [PATCH] [opendaylight] Update directory for openDaylight logs - -OpenDaylight karaf logs are now located in: -/var/log/containers/opendaylight/karaf/logs, so -deprecate the old paths, and update the plugin -to get the karaf.log files from new location. - -Resolves: #1438 - -Signed-off-by: Victor Pickard -Signed-off-by: Bryn M. Reeves ---- - sos/plugins/opendaylight.py | 13 +++++++++++++ - 1 file changed, 13 insertions(+) - -diff --git a/sos/plugins/opendaylight.py b/sos/plugins/opendaylight.py -index 07c3d11e..2dcb1b88 100644 ---- a/sos/plugins/opendaylight.py -+++ b/sos/plugins/opendaylight.py -@@ -27,14 +27,27 @@ class OpenDaylight(Plugin, RedHatPlugin): - ]) - - if self.get_option("all_logs"): -+ -+ # /var/log/containers/opendaylight/ path is specific to ODL -+ # Oxygen-SR3 and earlier versions, and may be removed in a future -+ # update. -+ - self.add_copy_spec([ - "/opt/opendaylight/data/log/", - "/var/log/containers/opendaylight/", -+ "/var/log/containers/opendaylight/karaf/logs/", - ]) -+ - else: -+ -+ # /var/log/containers/opendaylight/ path is specific to ODL -+ # Oxygen-SR3 and earlier versions, and may be removed in a future -+ # update. -+ - self.add_copy_spec([ - "/opt/opendaylight/data/log/*.log*", - "/var/log/containers/opendaylight/*.log*", -+ "/var/log/containers/opendaylight/karaf/logs/*.log*", - ]) - - self.add_cmd_output("docker logs opendaylight_api") --- -2.17.2 - diff --git a/SOURCES/sos-bz1644021-brctl-deprecated.patch b/SOURCES/sos-bz1644021-brctl-deprecated.patch deleted file mode 100644 index da4cb5c..0000000 --- a/SOURCES/sos-bz1644021-brctl-deprecated.patch +++ /dev/null @@ -1,91 +0,0 @@ -From 4c377f04f571c2d265a564bb27961bac5fd4a854 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Mon, 5 Nov 2018 14:33:50 +0100 -Subject: [PATCH] [networking] Replace "brctl: by "bridge" commands - -As bridge-utils containing brctl command are being deprecated, -sosreport should call bridge command instead. - -Although the mapping of the commands is not 1:1, the data collected -(together with few "ip .." commands) will remain the same. - -Resolves: #1472 - -Signed-off-by: Pavel Moravec ---- - sos/plugins/networking.py | 38 ++++++++------------------------------ - sos/plugins/xen.py | 2 +- - 2 files changed, 9 insertions(+), 31 deletions(-) - -diff --git a/sos/plugins/networking.py b/sos/plugins/networking.py -index fa3d0cda..5f532707 100644 ---- a/sos/plugins/networking.py -+++ b/sos/plugins/networking.py -@@ -25,24 +25,6 @@ class Networking(Plugin): - # switch to enable netstat "wide" (non-truncated) output mode - ns_wide = "-W" - -- def get_bridge_name(self, brctl_file): -- """Return a list for which items are bridge name according to the -- output of brctl show stored in brctl_file. -- """ -- out = [] -- try: -- brctl_out = open(brctl_file).read() -- except IOError: -- return out -- for line in brctl_out.splitlines(): -- if line.startswith("bridge name") \ -- or line.isspace() \ -- or line[:1].isspace(): -- continue -- br_name, br_rest = line.split(None, 1) -- out.append(br_name) -- return out -- - def get_eth_interfaces(self, ip_link_out): - """Return a dictionary for which keys are ethernet interface - names taken from the output of "ip -o link". -@@ -215,18 +197,14 @@ class Networking(Plugin): - "ethtool -g "+eth - ]) - -- # brctl command will load bridge and related kernel modules -- # if those modules are not loaded at the time of brctl command running -- # This behaviour causes an unexpected configuration change for system. -- # sosreport should aovid such situation. -- if self.is_module_loaded("bridge"): -- brctl_file = self.get_cmd_output_now("brctl show") -- if brctl_file: -- for br_name in self.get_bridge_name(brctl_file): -- self.add_cmd_output([ -- "brctl showstp "+br_name, -- "brctl showmacs "+br_name -- ]) -+ # Collect information about bridges (some data already collected via -+ # "ip .." commands) -+ self.add_cmd_output([ -+ "bridge -s -s -d link show", -+ "bridge -s -s -d -t fdb show", -+ "bridge -s -s -d -t mdb show", -+ "bridge -d vlan show" -+ ]) - - if self.get_option("traceroute"): - self.add_cmd_output("/bin/traceroute -n %s" % self.trace_host) -diff --git a/sos/plugins/xen.py b/sos/plugins/xen.py -index 2ad8dae9..ace6c362 100644 ---- a/sos/plugins/xen.py -+++ b/sos/plugins/xen.py -@@ -78,7 +78,7 @@ class Xen(Plugin, RedHatPlugin): - "xm info", - "xm list", - "xm list --long", -- "brctl show" -+ "bridge link show" - ]) - self.dom_collect_proc() - if self.is_running_xenstored(): --- -2.17.2 - diff --git a/SOURCES/sos-bz1644022-nftables-ruleset.patch b/SOURCES/sos-bz1644022-nftables-ruleset.patch deleted file mode 100644 index dd398d9..0000000 --- a/SOURCES/sos-bz1644022-nftables-ruleset.patch +++ /dev/null @@ -1,31 +0,0 @@ -From af0c5a1160198fde8a79b956a4c665dc574cc466 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Thu, 1 Nov 2018 22:35:26 +0100 -Subject: [PATCH] [firewalld] collect nftables ruleset - -Collect "nft list ruleset". - -Resolves: #1470 - -Signed-off-by: Pavel Moravec ---- - sos/plugins/firewalld.py | 3 +++ - 1 file changed, 3 insertions(+) - -diff --git a/sos/plugins/firewalld.py b/sos/plugins/firewalld.py -index 51dd475c..7185b5d4 100644 ---- a/sos/plugins/firewalld.py -+++ b/sos/plugins/firewalld.py -@@ -32,6 +32,9 @@ class FirewallD(Plugin, RedHatPlugin): - "/var/log/firewalld", - ]) - -+ # collect nftables ruleset -+ self.add_cmd_output("nft list ruleset") -+ - # use a 10s timeout to workaround dbus problems in - # docker containers. - self.add_cmd_output([ --- -2.17.2 - diff --git a/SOURCES/sos-bz1644062-lorax-composer.patch b/SOURCES/sos-bz1644062-lorax-composer.patch deleted file mode 100644 index 59104ee..0000000 --- a/SOURCES/sos-bz1644062-lorax-composer.patch +++ /dev/null @@ -1,96 +0,0 @@ -From 55a21b9ef43d596a797325379b8acd3100850b50 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Sat, 10 Nov 2018 16:44:17 +0100 -Subject: [PATCH] [composer] New plugin for lorax-composer - -lorax-composer is an API server for building disk images using -Blueprints. The plugin collects composer config and logs and few -composer-cli command outputs. - -Resolves: #1477 - -Signed-off-by: Pavel Moravec ---- - sos/plugins/composer.py | 39 +++++++++++++++++++++++++++++++++++++++ - 1 file changed, 39 insertions(+) - create mode 100644 sos/plugins/composer.py - -diff --git a/sos/plugins/composer.py b/sos/plugins/composer.py -new file mode 100644 -index 000000000..34901bcee ---- /dev/null -+++ b/sos/plugins/composer.py -@@ -0,0 +1,39 @@ -+from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin -+ -+ -+class Composer(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin): -+ """Lorax Composer -+ """ -+ -+ plugin_name = 'composer' -+ profiles = ('sysmgmt', 'virt', ) -+ -+ packages = ('composer-cli',) -+ -+ def _get_blueprints(self): -+ blueprints = [] -+ bp_result = self.get_command_output("composer-cli blueprints list") -+ if bp_result['status'] != 0: -+ return blueprints -+ for line in bp_result['output'].splitlines(): -+ blueprints.append(line) -+ return blueprints -+ -+ def setup(self): -+ self.add_copy_spec([ -+ "/etc/lorax/composer.conf", -+ "/var/log/lorax-composer/composer.log" -+ "/var/log/lorax-composer/dnf.log" -+ "/var/log/lorax-composer/program.log" -+ "/var/log/lorax-composer/server.log" -+ ]) -+ blueprints = self._get_blueprints() -+ for blueprint in blueprints: -+ self.add_cmd_output("composer-cli blueprints show %s" % blueprint) -+ -+ self.add_cmd_output([ -+ "composer-cli blueprints list", -+ "composer-cli sources list" -+ ]) -+ -+# vim: set et ts=4 sw=4 : -From 7907bb4fbb3279d61d30d46372bc729557a5049a Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Thu, 10 Jan 2019 15:18:04 +0100 -Subject: [PATCH] [composer] add missing commas in list in add_copy_spec - -Resolves: #1535 - -Signed-off-by: Pavel Moravec ---- - sos/plugins/composer.py | 8 ++++---- - 1 file changed, 4 insertions(+), 4 deletions(-) - -diff --git a/sos/plugins/composer.py b/sos/plugins/composer.py -index ff3aa49b..0f926398 100644 ---- a/sos/plugins/composer.py -+++ b/sos/plugins/composer.py -@@ -22,10 +22,10 @@ class Composer(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin): - def setup(self): - self.add_copy_spec([ - "/etc/lorax/composer.conf", -- "/var/log/lorax-composer/composer.log" -- "/var/log/lorax-composer/dnf.log" -- "/var/log/lorax-composer/program.log" -- "/var/log/lorax-composer/server.log" -+ "/var/log/lorax-composer/composer.log", -+ "/var/log/lorax-composer/dnf.log", -+ "/var/log/lorax-composer/program.log", -+ "/var/log/lorax-composer/server.log", - ]) - blueprints = self._get_blueprints() - for blueprint in blueprints: --- -2.17.2 - diff --git a/SOURCES/sos-bz1645085-networking-collect-all-numa.patch b/SOURCES/sos-bz1645085-networking-collect-all-numa.patch deleted file mode 100644 index e119fa0..0000000 --- a/SOURCES/sos-bz1645085-networking-collect-all-numa.patch +++ /dev/null @@ -1,31 +0,0 @@ -From 516d97bbfcd58d665dffff0e02a15b15249dd530 Mon Sep 17 00:00:00 2001 -From: Jamie Bainbridge -Date: Mon, 15 Oct 2018 15:51:39 +1000 -Subject: [PATCH] [networking] Collect NUMA Node of each NIC - -It is often useful to know the NUMA locality of each network device. -Collect /sys/class/net/*/device/numa_node to add this information. - -Resolves: #1451 - -Signed-off-by: Jamie Bainbridge -Signed-off-by: Bryn M. Reeves ---- - sos/plugins/networking.py | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/sos/plugins/networking.py b/sos/plugins/networking.py -index 5f532707..f3e78935 100644 ---- a/sos/plugins/networking.py -+++ b/sos/plugins/networking.py -@@ -111,6 +111,7 @@ class Networking(Plugin): - "/etc/sysconfig/nftables.conf", - "/etc/nftables.conf", - "/etc/dnsmasq*", -+ "/sys/class/net/*/device/numa_node", - "/sys/class/net/*/flags", - "/sys/class/net/*/statistics/", - "/etc/iproute2" --- -2.17.2 - diff --git a/SOURCES/sos-bz1655984-lvmdump-am-ignored.patch b/SOURCES/sos-bz1655984-lvmdump-am-ignored.patch deleted file mode 100644 index b22ac6a..0000000 --- a/SOURCES/sos-bz1655984-lvmdump-am-ignored.patch +++ /dev/null @@ -1,119 +0,0 @@ -From 9db825247452d54152f1c866b6b90f897be32f15 Mon Sep 17 00:00:00 2001 -From: "Bryn M. Reeves" -Date: Wed, 12 Dec 2018 15:41:42 +0000 -Subject: [PATCH] [Plugin] clean up Plugin.get_option() - -There's a lot of ancient junk in this method (and associated code -strewn around sos.sosreport and tests). Remove the ability to pass -a list of options to the method since nothing uses this, and also -delete the incomplete implementation of global plugin options via -the commons dictionary (this work was already completed some time -ago by mapping these options directly to the command line args). - -Resolves: #1498 - -Signed-off-by: Bryn M. Reeves ---- - sos/plugins/__init__.py | 13 +------------ - sos/sosreport.py | 5 ----- - tests/option_tests.py | 16 ++++------------ - 3 files changed, 5 insertions(+), 29 deletions(-) - -diff --git a/sos/plugins/__init__.py b/sos/plugins/__init__.py -index 3abe29db..c87ae19b 100644 ---- a/sos/plugins/__init__.py -+++ b/sos/plugins/__init__.py -@@ -531,23 +531,12 @@ class Plugin(object): - if optionname in global_options: - return getattr(self.commons['cmdlineopts'], optionname) - -- def _check(key): -- if hasattr(optionname, "__iter__"): -- return key in optionname -- else: -- return key == optionname -- - for name, parms in zip(self.opt_names, self.opt_parms): -- if _check(name): -+ if name == optionname: - val = parms['enabled'] - if val is not None: - return val - -- items = six.iteritems(self.commons.get('global_plugin_options', {})) -- for key, value in items: -- if _check(key): -- return value -- - return default - - def get_option_as_list(self, optionname, delimiter=",", default=None): -diff --git a/sos/sosreport.py b/sos/sosreport.py -index 77ae7161..97bee10c 100644 ---- a/sos/sosreport.py -+++ b/sos/sosreport.py -@@ -336,7 +336,6 @@ class SoSReport(object): - self.skipped_plugins = [] - self.all_options = [] - self.xml_report = XmlReport() -- self.global_plugin_options = {} - self.archive = None - self.tempfile_util = None - self._args = args -@@ -432,7 +431,6 @@ class SoSReport(object): - 'xmlreport': self.xml_report, - 'cmdlineopts': self.opts, - 'config': self.config, -- 'global_plugin_options': self.global_plugin_options, - } - - def get_temp_file(self): -@@ -1426,9 +1424,6 @@ class SoSReport(object): - return False - return True - -- def set_global_plugin_option(self, key, value): -- self.global_plugin_options[key] = value -- - def _cleanup(self): - # archive and tempfile cleanup may fail due to a fatal - # OSError exception (ENOSPC, EROFS etc.). -diff --git a/tests/option_tests.py b/tests/option_tests.py -index a4267e2e..a99be4b0 100644 ---- a/tests/option_tests.py -+++ b/tests/option_tests.py -@@ -12,27 +12,19 @@ class GlobalOptionTest(unittest.TestCase): - self.commons = { - 'sysroot': '/', - 'policy': LinuxPolicy(), -- 'global_plugin_options': { -- 'test_option': 'foobar', -- 'baz': None, -- 'empty_global': True -- }, - } - self.plugin = Plugin(self.commons) -- self.plugin.opt_names = ['baz', 'empty'] -- self.plugin.opt_parms = [{'enabled': False}, {'enabled': None}] -+ self.plugin.opt_names = ['baz', 'empty', 'test_option'] -+ self.plugin.opt_parms = [ -+ {'enabled': False}, {'enabled': None}, {'enabled': 'foobar'} -+ ] - - def test_simple_lookup(self): - self.assertEquals(self.plugin.get_option('test_option'), 'foobar') - -- def test_multi_lookup(self): -- self.assertEquals(self.plugin.get_option(('not_there', 'test_option')), 'foobar') -- - def test_cascade(self): - self.assertEquals(self.plugin.get_option(('baz')), False) - -- def test_none_should_cascade(self): -- self.assertEquals(self.plugin.get_option(('empty', 'empty_global')), True) - - if __name__ == "__main__": - unittest.main() --- -2.17.2 - diff --git a/SOURCES/sos-bz1658937-ovirt_node-plugin.patch b/SOURCES/sos-bz1658937-ovirt_node-plugin.patch deleted file mode 100644 index 8b2e3d8..0000000 --- a/SOURCES/sos-bz1658937-ovirt_node-plugin.patch +++ /dev/null @@ -1,68 +0,0 @@ -From 0cddb4c820d39cae8bf6681c644fa353b0c20800 Mon Sep 17 00:00:00 2001 -From: Nijin Ashok -Date: Mon, 16 Jul 2018 14:42:43 +0530 -Subject: [PATCH] [ovirt_node] New plugin for oVirt Node - -oVirt Node is a small scaled down version used for hosting virtual -machines. The plugin collects node specific information like -upgrade log, the layer structure etc. - -Resolves: #1381 - -Signed-off-by: Nijin Ashok nashok@redhat.com -Signed-off-by: Bryn M. Reeves bmr@redhat.com ---- - sos/plugins/ovirt_node.py | 41 +++++++++++++++++++++++++++++++++++++++ - 1 file changed, 41 insertions(+) - create mode 100644 sos/plugins/ovirt_node.py - -diff --git a/sos/plugins/ovirt_node.py b/sos/plugins/ovirt_node.py -new file mode 100644 -index 00000000..ccb5d3c6 ---- /dev/null -+++ b/sos/plugins/ovirt_node.py -@@ -0,0 +1,41 @@ -+# Copyright (C) 2018 Red Hat, Inc., -+# This file is part of the sos project: https://github.com/sosreport/sos -+# -+# This copyrighted material is made available to anyone wishing to use, -+# modify, copy, or redistribute it subject to the terms and conditions of -+# version 2 of the GNU General Public License. -+# -+# See the LICENSE file in the source distribution for further information. -+ -+from sos.plugins import Plugin, RedHatPlugin -+ -+ -+class OvirtNode(Plugin, RedHatPlugin): -+ """oVirt Node specific information""" -+ -+ packages = ( -+ 'imgbased', -+ 'ovirt-node-ng-nodectl', -+ ) -+ -+ plugin_name = 'ovirt_node' -+ profiles = ('virt',) -+ -+ def setup(self): -+ -+ # Add log files -+ self.add_copy_spec([ -+ '/var/log/imgbased.log', -+ # Required for node versions < 4.2 -+ '/tmp/imgbased.log', -+ ]) -+ -+ # Collect runtime info -+ self.add_cmd_output([ -+ 'imgbase layout', -+ 'nodectl --machine-readable check', -+ 'nodectl info', -+ ]) -+ -+ -+# vim: expandtab tabstop=4 shiftwidth=4 --- -2.17.2 - diff --git a/SOURCES/sos-bz1658938-docker-podman-containers.patch b/SOURCES/sos-bz1658938-docker-podman-containers.patch deleted file mode 100644 index dd29950..0000000 --- a/SOURCES/sos-bz1658938-docker-podman-containers.patch +++ /dev/null @@ -1,186 +0,0 @@ -From 77c72b415feccd828fd7bc13caebf9841afc40c2 Mon Sep 17 00:00:00 2001 -From: "Bryn M. Reeves" -Date: Mon, 3 Sep 2018 17:11:06 +0100 -Subject: [PATCH] [docker] combine docker 'inspect' and 'logs' loops - -We're iterating over all the containers: might as well only do it -one time. - -Related: #1406, #1407 - -Signed-off-by: Bryn M. Reeves ---- - sos/plugins/docker.py | 4 +--- - 1 file changed, 1 insertion(+), 3 deletions(-) - -diff --git a/sos/plugins/docker.py b/sos/plugins/docker.py -index a44264a4..5b2acff5 100644 ---- a/sos/plugins/docker.py -+++ b/sos/plugins/docker.py -@@ -80,9 +80,7 @@ class Docker(Plugin): - if insp: - for container in insp: - self.add_cmd_output("docker inspect %s" % container) -- -- if self.get_option('logs'): -- for container in insp: -+ if self.get_option('logs'): - self.add_cmd_output("docker logs -t %s" % container) - - --- -2.17.2 - -From e3cfb1428592390166237e715471bb62d9bd9db6 Mon Sep 17 00:00:00 2001 -From: Daniel J Walsh -Date: Wed, 29 Aug 2018 06:50:10 -0400 -Subject: [PATCH] [podman] Add support for gathering information on podman - containers - -Resolves: #1407. - -Signed-off-by: Daniel J Walsh -Signed-off-by: Bryn M. Reeves ---- - sos/plugins/podman.py | 79 +++++++++++++++++++++++++++++++++++++++++++ - 1 file changed, 79 insertions(+) - create mode 100644 sos/plugins/podman.py - -diff --git a/sos/plugins/podman.py b/sos/plugins/podman.py -new file mode 100644 -index 00000000..c43246fc ---- /dev/null -+++ b/sos/plugins/podman.py -@@ -0,0 +1,79 @@ -+# Copyright (C) 2018 Red Hat, Inc. Daniel Walsh -+ -+# This file is part of the sos project: https://github.com/sosreport/sos -+# -+# This copyrighted material is made available to anyone wishing to use, -+# modify, copy, or redistribute it subject to the terms and conditions of -+# version 2 of the GNU General Public License. -+# -+# See the LICENSE file in the source distribution for further information. -+ -+from sos.plugins import Plugin, RedHatPlugin, UbuntuPlugin -+ -+ -+class Podman(Plugin): -+ -+ """Podman containers -+ """ -+ -+ plugin_name = 'podman' -+ profiles = ('container',) -+ packages = ('podman') -+ -+ option_list = [ -+ ("all", "enable capture for all containers, even containers " -+ "that have terminated", 'fast', False), -+ ("logs", "capture logs for running containers", -+ 'fast', False), -+ ("size", "capture image sizes for podman ps", 'slow', False) -+ ] -+ -+ def setup(self): -+ self.add_copy_spec([ -+ "/etc/containers/registries.conf", -+ "/etc/containers/storage.conf", -+ "/etc/containers/mounts.conf", -+ "/etc/containers/policy.json", -+ ]) -+ -+ subcmds = [ -+ 'info', -+ 'images', -+ 'pod ps', -+ 'pod ps -a', -+ 'ps', -+ 'ps -a', -+ 'stats --no-stream', -+ 'version', -+ ] -+ -+ self.add_cmd_output(["podman %s" % s for s in subcmds]) -+ -+ # separately grab ps -s as this can take a *very* long time -+ if self.get_option('size'): -+ self.add_cmd_output('podman ps -as') -+ -+ self.add_journal(units="podman") -+ self.add_cmd_output("ls -alhR /etc/cni") -+ -+ ps_cmd = 'podman ps -q' -+ if self.get_option('all'): -+ ps_cmd = "%s -a" % ps_cmd -+ -+ img_cmd = 'podman images -q' -+ insp = set() -+ -+ for icmd in [ps_cmd, img_cmd]: -+ result = self.get_command_output(icmd) -+ if result['status'] == 0: -+ for con in result['output'].splitlines(): -+ insp.add(con) -+ -+ if insp: -+ for container in insp: -+ self.add_cmd_output("podman inspect %s" % container) -+ if self.get_option('logs'): -+ self.add_cmd_output("podman logs -t %s" % container) -+ -+ -+# vim: set et ts=4 sw=4 : --- -2.17.2 - -From 1401c7153dda9bd0558035ba0692cf05a93ca419 Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Tue, 6 Nov 2018 08:13:52 +0100 -Subject: [PATCH] [podman] allow the plugin for RedHatPlugin and UbuntuPlugin - -Until Podman inherits RedHatPlugin and/or UbuntuPlugin, the plugin -can not be executed on underlying distros. - -Further, remove one redundant test as "for container in insp" will -work properly also for empty "insp". - -Resolves: #1473 - -Signed-off-by: Pavel Moravec ---- - sos/plugins/podman.py | 11 +++++------ - 1 file changed, 5 insertions(+), 6 deletions(-) - -diff --git a/sos/plugins/podman.py b/sos/plugins/podman.py -index c43246fc..72e22558 100644 ---- a/sos/plugins/podman.py -+++ b/sos/plugins/podman.py -@@ -11,7 +11,7 @@ - from sos.plugins import Plugin, RedHatPlugin, UbuntuPlugin - - --class Podman(Plugin): -+class Podman(Plugin, RedHatPlugin, UbuntuPlugin): - - """Podman containers - """ -@@ -69,11 +69,10 @@ class Podman(Plugin): - for con in result['output'].splitlines(): - insp.add(con) - -- if insp: -- for container in insp: -- self.add_cmd_output("podman inspect %s" % container) -- if self.get_option('logs'): -- self.add_cmd_output("podman logs -t %s" % container) -+ for container in insp: -+ self.add_cmd_output("podman inspect %s" % container) -+ if self.get_option('logs'): -+ self.add_cmd_output("podman logs -t %s" % container) - - - # vim: set et ts=4 sw=4 : --- -2.17.2 - diff --git a/SOURCES/sos-bz1658939-postgresql-collect-full-dump.patch b/SOURCES/sos-bz1658939-postgresql-collect-full-dump.patch deleted file mode 100644 index f2a06ab..0000000 --- a/SOURCES/sos-bz1658939-postgresql-collect-full-dump.patch +++ /dev/null @@ -1,76 +0,0 @@ -From 47e6b3d92c8a13560b248e6f0e2ffb334b547d07 Mon Sep 17 00:00:00 2001 -From: Yedidyah Bar David -Date: Tue, 4 Dec 2018 13:08:44 +0200 -Subject: [PATCH] [Plugin] Obey sizelimit=0 - -If sizelimit is 0, do not limit. Only use the default if it's None. - -Bug-Url: https://bugzilla.redhat.com/1654068 -Signed-off-by: Yedidyah Bar David ---- - sos/plugins/__init__.py | 6 ++++-- - 1 file changed, 4 insertions(+), 2 deletions(-) - -diff --git a/sos/plugins/__init__.py b/sos/plugins/__init__.py -index 7d2a8b2d..97f3cc59 100644 ---- a/sos/plugins/__init__.py -+++ b/sos/plugins/__init__.py -@@ -569,7 +569,8 @@ class Plugin(object): - a single file the file will be tailed to meet sizelimit. If the first - file in a glob is too large it will be tailed to meet the sizelimit. - """ -- sizelimit = sizelimit or self.get_option("log_size") -+ if sizelimit is None: -+ sizelimit = self.get_option("log_size") - - if self.get_option('all_logs'): - sizelimit = None -@@ -703,7 +704,8 @@ class Plugin(object): - cmds = [cmds] - if len(cmds) > 1 and (suggest_filename or root_symlink): - self._log_warn("ambiguous filename or symlink for command list") -- sizelimit = sizelimit or self.get_option("log_size") -+ if sizelimit is None: -+ sizelimit = self.get_option("log_size") - for cmd in cmds: - self._add_cmd_output(cmd, suggest_filename=suggest_filename, - root_symlink=root_symlink, timeout=timeout, --- -2.17.2 - -From 254d93499d64acaff5103e15c25649d418004737 Mon Sep 17 00:00:00 2001 -From: Yedidyah Bar David -Date: Tue, 4 Dec 2018 13:10:32 +0200 -Subject: [PATCH] [postgresql] Do not limit dump size - -In principle, this might be risky - if a database is huge, we might not -want to collect all of it. But there is no sense in collecting only its -tail. If this turns out problematic, a future patch might check db size -and do not collect it at all if it's too large. - -Bug-Url: https://bugzilla.redhat.com/1654068 - -Resolves: #1497 - -Signed-off-by: Yedidyah Bar David -Signed-off-by: Bryn M. Reeves ---- - sos/plugins/postgresql.py | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/sos/plugins/postgresql.py b/sos/plugins/postgresql.py -index d47f7e8b..aef431f8 100644 ---- a/sos/plugins/postgresql.py -+++ b/sos/plugins/postgresql.py -@@ -64,7 +64,7 @@ class PostgreSQL(Plugin): - if scl is not None: - cmd = self.convert_cmd_scl(scl, cmd) - self.add_cmd_output(cmd, suggest_filename=filename, -- binary=True) -+ binary=True, sizelimit=0) - - else: # no password in env or options - self.soslog.warning( --- -2.17.2 - diff --git a/SOURCES/sos-bz1665929-nvme-config.patch b/SOURCES/sos-bz1665929-nvme-config.patch new file mode 100644 index 0000000..a370d24 --- /dev/null +++ b/SOURCES/sos-bz1665929-nvme-config.patch @@ -0,0 +1,36 @@ +From 8eff8f82604cb83a4c43f4a3ba0308940863c1b5 Mon Sep 17 00:00:00 2001 +From: Pavel Moravec +Date: Mon, 29 Jul 2019 11:34:06 +0200 +Subject: [PATCH] [nvme] collect config file everytime + +Collect /etc/nvme/discovery.conf every time, outside any for-loop block. + +Resolves: #1740 + +Signed-off-by: Pavel Moravec +--- + sos/plugins/nvme.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/sos/plugins/nvme.py b/sos/plugins/nvme.py +index 6e2cda73..bd9d2df0 100644 +--- a/sos/plugins/nvme.py ++++ b/sos/plugins/nvme.py +@@ -21,6 +21,7 @@ class Nvme(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin): + return [dev for dev in sys_block if dev.startswith('nvme')] + + def setup(self): ++ self.add_copy_spec("/etc/nvme/discovery.conf") + self.add_cmd_output([ + "nvme list", + "nvme list-subsys", +@@ -39,6 +40,5 @@ class Nvme(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin): + "nvme show-regs /dev/%s" % dev, + "nvme get-ns-id /dev/%s" % dev + ]) +- self.add_copy_spec("/etc/nvme/discovery.conf") + + # vim: set et ts=4 sw=4 : +-- +2.21.0 + diff --git a/SOURCES/sos-bz1665981-peripety-config.patch b/SOURCES/sos-bz1665981-peripety-config.patch new file mode 100644 index 0000000..a06f27f --- /dev/null +++ b/SOURCES/sos-bz1665981-peripety-config.patch @@ -0,0 +1,30 @@ +From 34219ce0e15de5423baaaa1054a98cf1b58f0eae Mon Sep 17 00:00:00 2001 +From: Pavel Moravec +Date: Thu, 25 Jul 2019 15:03:50 +0200 +Subject: [PATCH] [peripety] collect proper config file + +The config file is /etc/peripetyd.conf . + +Resolves: #1737 + +Signed-off-by: Pavel Moravec +--- + sos/plugins/peripety.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/sos/plugins/peripety.py b/sos/plugins/peripety.py +index 7567731d..42877c66 100644 +--- a/sos/plugins/peripety.py ++++ b/sos/plugins/peripety.py +@@ -20,7 +20,7 @@ class Peripety(Plugin, RedHatPlugin): + services = ('peripetyd',) + + def setup(self): +- self.add_copy_spec('/etc/peripety.conf') ++ self.add_copy_spec('/etc/peripetyd.conf') + + forbid_reg = [ + 'vd.*', +-- +2.21.0 + diff --git a/SOURCES/sos-bz1666214-grub2-boot-data.patch b/SOURCES/sos-bz1666214-grub2-boot-data.patch deleted file mode 100644 index f9e46b3..0000000 --- a/SOURCES/sos-bz1666214-grub2-boot-data.patch +++ /dev/null @@ -1,41 +0,0 @@ -From d579fb745de0130132beea214edf85e597ffc54d Mon Sep 17 00:00:00 2001 -From: Pavel Moravec -Date: Sun, 20 Jan 2019 12:01:47 +0100 -Subject: [PATCH] [grub2] Enable plugin by grub2-common package also - -Newer Fedora systems, grub2 package is replaced by grub2-common -that needs to enable grub2 plugin by default as well. - -Additionally, collect /boot/loader/entries with boot list entries. - -Resolves: #1543 - -Signed-off-by: Pavel Moravec ---- - sos/plugins/grub2.py | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - -diff --git a/sos/plugins/grub2.py b/sos/plugins/grub2.py -index ca1da620..9786de44 100644 ---- a/sos/plugins/grub2.py -+++ b/sos/plugins/grub2.py -@@ -15,7 +15,7 @@ class Grub2(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin): - - plugin_name = 'grub2' - profiles = ('boot',) -- packages = ('grub2', 'grub2-efi') -+ packages = ('grub2', 'grub2-efi', 'grub2-common') - - def setup(self): - self.add_copy_spec([ -@@ -23,6 +23,7 @@ class Grub2(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin): - "/boot/grub2/grub.cfg", - "/boot/grub2/grubenv", - "/boot/grub/grub.cfg", -+ "/boot/loader/entries", - "/etc/default/grub", - "/etc/grub2.cfg", - "/etc/grub.d" --- -2.17.2 - diff --git a/SOURCES/sos-bz1687954-buildah-index-out-of-range.patch b/SOURCES/sos-bz1687954-buildah-index-out-of-range.patch new file mode 100644 index 0000000..5491201 --- /dev/null +++ b/SOURCES/sos-bz1687954-buildah-index-out-of-range.patch @@ -0,0 +1,31 @@ +From 0100bd129e8fc809cc5b0258f6c0c7c64ad084ab Mon Sep 17 00:00:00 2001 +From: Pavel Moravec +Date: Mon, 15 Apr 2019 11:50:55 +0200 +Subject: [PATCH] [buildah] parse container list properly even for scratch ones + +Scratch containers dont have id, therefore we shall get container name +as the latest string on each line instead of 5th. + +Resolves: #1647 + +Signed-off-by: Pavel Moravec +--- + sos/plugins/buildah.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/sos/plugins/buildah.py b/sos/plugins/buildah.py +index c6c57553..fd8653eb 100644 +--- a/sos/plugins/buildah.py ++++ b/sos/plugins/buildah.py +@@ -47,7 +47,7 @@ class Buildah(Plugin, RedHatPlugin): + if containahs['is_wicked_pissah']: + for containah in containahs['auutput'].splitlines(): + # obligatory Tom Brady +- goat = containah.split()[4] ++ goat = containah.split()[-1] + self.add_cmd_output('buildah inspect -t container %s' % goat) + + pitchez = make_chowdah('buildah images -n') +-- +2.17.2 + diff --git a/SOURCES/sos-bz1695583-postgres-mysql-incorrect-defaults.patch b/SOURCES/sos-bz1695583-postgres-mysql-incorrect-defaults.patch new file mode 100644 index 0000000..a5d1036 --- /dev/null +++ b/SOURCES/sos-bz1695583-postgres-mysql-incorrect-defaults.patch @@ -0,0 +1,122 @@ +From c71b41547442d23daf5c3bf88450151d13903214 Mon Sep 17 00:00:00 2001 +From: Pavel Moravec +Date: Thu, 4 Apr 2019 13:54:18 +0200 +Subject: [PATCH] [maas,mysql,npm,pacemaker,postgresql] fix plugopts data types + +With new enforcement of implicit data type for plugin options, the +plugopts must have proper data types of default values and plugins must work +with them accordingly (in tests or so). + +Resolves: #1635 + +Signed-off-by: Pavel Moravec +--- + sos/plugins/maas.py | 6 +++--- + sos/plugins/mysql.py | 2 +- + sos/plugins/npm.py | 4 ++-- + sos/plugins/pacemaker.py | 4 ++-- + sos/plugins/postgresql.py | 6 +++--- + 5 files changed, 11 insertions(+), 11 deletions(-) + +diff --git a/sos/plugins/maas.py b/sos/plugins/maas.py +index f8305406..ea038e86 100644 +--- a/sos/plugins/maas.py ++++ b/sos/plugins/maas.py +@@ -21,10 +21,10 @@ class Maas(Plugin, UbuntuPlugin): + + option_list = [ + ('profile-name', +- 'The name with which you will later refer to this remote', '', False), +- ('url', 'The URL of the remote API', '', False), ++ 'The name with which you will later refer to this remote', '', ''), ++ ('url', 'The URL of the remote API', '', ''), + ('credentials', +- 'The credentials, also known as the API key', '', False) ++ 'The credentials, also known as the API key', '', '') + ] + + def _has_login_options(self): +diff --git a/sos/plugins/mysql.py b/sos/plugins/mysql.py +index 49bc4168..411d90b8 100644 +--- a/sos/plugins/mysql.py ++++ b/sos/plugins/mysql.py +@@ -22,7 +22,7 @@ class Mysql(Plugin): + + option_list = [ + ("dbuser", "username for database dumps", "", "mysql"), +- ("dbpass", "password for database dumps" + pw_warn_text, "", False), ++ ("dbpass", "password for database dumps" + pw_warn_text, "", ""), + ("dbdump", "collect a database dump", "", False) + ] + +diff --git a/sos/plugins/npm.py b/sos/plugins/npm.py +index 0fc95801..ca00d0c0 100644 +--- a/sos/plugins/npm.py ++++ b/sos/plugins/npm.py +@@ -25,7 +25,7 @@ class Npm(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin, SuSEPlugin): + option_list = [("project_path", + 'List npm modules of a project specified by path', + 'fast', +- 0)] ++ '')] + + # in Fedora, Debian, Ubuntu and Suse the package is called npm + packages = ('npm',) +@@ -79,7 +79,7 @@ class Npm(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin, SuSEPlugin): + self.add_string_as_file(json.dumps(output), outfn) + + def setup(self): +- if self.get_option("project_path") != 0: ++ if self.get_option("project_path"): + project_path = os.path.abspath(os.path.expanduser( + self.get_option("project_path"))) + self._get_npm_output("npm ls --json", "npm_ls_project", +diff --git a/sos/plugins/pacemaker.py b/sos/plugins/pacemaker.py +index a1b64ea5..940389ee 100644 +--- a/sos/plugins/pacemaker.py ++++ b/sos/plugins/pacemaker.py +@@ -25,7 +25,7 @@ class Pacemaker(Plugin): + ) + + option_list = [ +- ("crm_from", "specify the start time for crm_report", "fast", False), ++ ("crm_from", "specify the start time for crm_report", "fast", ''), + ("crm_scrub", "enable password scrubbing for crm_report", "", True), + ] + +@@ -87,7 +87,7 @@ class Pacemaker(Plugin): + # time in order to collect data. + crm_from = (datetime.today() - + timedelta(hours=72)).strftime("%Y-%m-%d %H:%m:%S") +- if self.get_option("crm_from") is not False: ++ if self.get_option("crm_from"): + if re.match(r'\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}', + str(self.get_option("crm_from"))): + crm_from = self.get_option("crm_from") +diff --git a/sos/plugins/postgresql.py b/sos/plugins/postgresql.py +index 1698b62f..a04dca8f 100644 +--- a/sos/plugins/postgresql.py ++++ b/sos/plugins/postgresql.py +@@ -31,7 +31,7 @@ class PostgreSQL(Plugin): + option_list = [ + ('pghome', 'PostgreSQL server home directory.', '', '/var/lib/pgsql'), + ('username', 'username for pg_dump', '', 'postgres'), +- ('password', 'password for pg_dump' + password_warn_text, '', False), ++ ('password', 'password for pg_dump' + password_warn_text, '', ''), + ('dbname', 'database name to dump for pg_dump', '', ''), + ('dbhost', 'database hostname/IP (do not use unix socket)', '', ''), + ('dbport', 'database server port number', '', '5432') +@@ -43,8 +43,8 @@ class PostgreSQL(Plugin): + # We're only modifying this for ourself and our children so + # there is no need to save and restore environment variables if + # the user decided to pass the password on the command line. +- if self.get_option("password") is not False: +- os.environ["PGPASSWORD"] = str(self.get_option("password")) ++ if self.get_option("password"): ++ os.environ["PGPASSWORD"] = self.get_option("password") + + if self.get_option("dbhost"): + cmd = "pg_dump -U %s -h %s -p %s -w -F t %s" % ( +-- +2.17.2 + diff --git a/SOURCES/sos-bz1700780-vdsm-plugin.patch b/SOURCES/sos-bz1700780-vdsm-plugin.patch new file mode 100644 index 0000000..2ae0c5a --- /dev/null +++ b/SOURCES/sos-bz1700780-vdsm-plugin.patch @@ -0,0 +1,335 @@ +From 1b4f8dfb8ac85708441faa3b2c2b9c2624dfa155 Mon Sep 17 00:00:00 2001 +From: "irit.go" +Date: Tue, 24 Jul 2018 11:01:55 +0300 +Subject: [PATCH 1/2] [Plugin] add get_process_pids() to return PIDs by process + name + +Signed-off-by: Irit Goihman igoihman@redhat.com +Signed-off-by: Bryn M. Reeves +--- + sos/plugins/__init__.py | 16 ++++++++++++++++ + 1 file changed, 16 insertions(+) + +diff --git a/sos/plugins/__init__.py b/sos/plugins/__init__.py +index 4c8822b7..cdeda77a 100644 +--- a/sos/plugins/__init__.py ++++ b/sos/plugins/__init__.py +@@ -1389,6 +1389,22 @@ class Plugin(object): + return False + return status + ++ def get_process_pids(self, process): ++ """Returns PIDs of all processes with process name. ++ If the process doesn't exist, returns an empty list""" ++ pids = [] ++ cmd_line_glob = "/proc/[0-9]*/cmdline" ++ cmd_line_paths = glob.glob(cmd_line_glob) ++ for path in cmd_line_paths: ++ try: ++ with open(path, 'r') as f: ++ cmd_line = f.read().strip() ++ if process in cmd_line: ++ pids.append(path.split("/")[2]) ++ except IOError as e: ++ continue ++ return pids ++ + + class RedHatPlugin(object): + """Tagging class for Red Hat's Linux distributions""" +-- +2.17.2 + + +From 0618db904dadb05fde70c181a5940989ac127fe2 Mon Sep 17 00:00:00 2001 +From: Irit Goihman +Date: Thu, 1 Feb 2018 16:44:32 +0200 +Subject: [PATCH 2/2] [plugins] add vdsm plugin + +Add a plugin for vdsm + +Resolves: #1205 + +Signed-off-by: Irit Goihman +Signed-off-by: Bryn M. Reeves +--- + sos/plugins/vdsm.py | 146 ++++++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 146 insertions(+) + create mode 100644 sos/plugins/vdsm.py + +diff --git a/sos/plugins/vdsm.py b/sos/plugins/vdsm.py +new file mode 100644 +index 00000000..c648abbf +--- /dev/null ++++ b/sos/plugins/vdsm.py +@@ -0,0 +1,146 @@ ++# Copyright (C) 2018 Red Hat, Inc. ++ ++# This file is part of the sos project: https://github.com/sosreport/sos ++# ++# This copyrighted material is made available to anyone wishing to use, ++# modify, copy, or redistribute it subject to the terms and conditions of ++# version 2 of the GNU General Public License. ++# ++# See the LICENSE file in the source distribution for further information. ++ ++from sos.plugins import Plugin, RedHatPlugin ++ ++import glob ++import json ++import re ++ ++ ++# This configuration is based on vdsm.storage.lvm.LVM_CONF_TEMPLATE. ++# ++# locking_type is set to 0 in order to match lvm sos commands. With this ++# configuration we don't take any locks, so we will never block because ++# there is a stuck lvm command. ++# locking_type=0 ++# ++# use_lvmetad is set to 0 in order not to show cached, old lvm metadata. ++# use_lvmetad=0 ++# ++# preferred_names and filter config values are set to capture Vdsm devices. ++# preferred_names=[ '^/dev/mapper/' ] ++# filter=[ 'a|^/dev/mapper/.*|', 'r|.*|' ] ++LVM_CONFIG = """ ++global { ++ locking_type=0 ++ use_lvmetad=0 ++} ++devices { ++ preferred_names=["^/dev/mapper/"] ++ ignore_suspended_devices=1 ++ write_cache_state=0 ++ disable_after_error_count=3 ++ filter=["a|^/dev/mapper/.*|", "r|.*|"] ++} ++""" ++LVM_CONFIG = re.sub(r"\s+", " ", LVM_CONFIG).strip() ++ ++ ++class Vdsm(Plugin, RedHatPlugin): ++ """vdsm Plugin""" ++ ++ packages = ( ++ 'vdsm', ++ 'vdsm-client', ++ ) ++ ++ plugin_name = 'vdsm' ++ ++ def setup(self): ++ self.add_forbidden_path('/etc/pki/vdsm/keys/*') ++ self.add_forbidden_path('/etc/pki/vdsm/libvirt-spice/*-key.*') ++ self.add_forbidden_path('/etc/pki/libvirt/private/*') ++ ++ self.add_cmd_output('service vdsmd status') ++ ++ self.add_copy_spec([ ++ '/tmp/vds_installer*', ++ '/tmp/vds_bootstrap*', ++ '/etc/vdsm/*' ++ ]) ++ ++ limit = self.get_option('log_size') ++ ++ self.add_copy_spec('/var/log/vdsm/*', sizelimit=limit) ++ ++ self._add_vdsm_forbidden_paths() ++ self.add_copy_spec([ ++ '/var/run/vdsm/*', ++ '/usr/libexec/vdsm/hooks', ++ '/var/lib/vdsm' ++ ]) ++ ++ qemu_pids = self.get_process_pids('qemu-kvm') ++ if qemu_pids: ++ files = ["cmdline", "status", "mountstats"] ++ self.add_copy_spec([ ++ "/proc/%s/%s" % (pid, name) ++ for pid in qemu_pids ++ for name in files ++ ]) ++ self.add_cmd_output([ ++ "ls -ldZ /etc/vdsm", ++ "su vdsm -s sh -c 'tree -l /rhev/data-center'", ++ "su vdsm -s sh -c 'ls -lR /rhev/data-center'" ++ ]) ++ self.add_cmd_output([ ++ "lvm vgs -v -o +tags --config \'%s\'" % LVM_CONFIG, ++ "lvm lvs -v -o +tags --config \'%s\'" % LVM_CONFIG, ++ "lvm pvs -v -o +all --config \'%s\'" % LVM_CONFIG ++ ]) ++ ++ self.add_cmd_output([ ++ 'vdsm-client Host getCapabilities', ++ 'vdsm-client Host getStats', ++ 'vdsm-client Host getAllVmStats', ++ 'vdsm-client Host getVMFullList', ++ 'vdsm-client Host getDeviceList', ++ 'vdsm-client Host hostdevListByCaps', ++ 'vdsm-client Host getAllTasksInfo', ++ 'vdsm-client Host getAllTasksStatuses' ++ ]) ++ ++ try: ++ res = self.call_ext_prog( ++ 'vdsm-client Host getConnectedStoragePools' ++ ) ++ if res['status'] == 0: ++ pools = json.loads(res['output']) ++ for pool in pools: ++ self.add_cmd_output( ++ 'vdsm-client StoragePool getSpmStatus' ++ ' storagepoolID={}'.format(pool) ++ ) ++ except ValueError as e: ++ self._log_error( ++ 'vdsm-client Host getConnectedStoragePools: %s' % (e) ++ ) ++ ++ try: ++ res = self.call_ext_prog('vdsm-client Host getStorageDomains') ++ if res['status'] == 0: ++ sd_uuids = json.loads(res['output']) ++ dump_volume_chains_cmd = 'vdsm-tool dump-volume-chains %s' ++ self.add_cmd_output([ ++ dump_volume_chains_cmd % uuid for uuid in sd_uuids ++ ]) ++ except ValueError as e: ++ self._log_error( ++ 'vdsm-client Host getStorageDomains: %s' % (e) ++ ) ++ ++ def _add_vdsm_forbidden_paths(self): ++ """Add confidential sysprep vfds under /var/run/vdsm to ++ forbidden paths """ ++ ++ for file_path in glob.glob("/var/run/vdsm/*"): ++ if file_path.endswith(('.vfd', '/isoUploader', '/storage')): ++ self.add_forbidden_path(file_path) +-- +2.17.2 + +From 7141ebf3b2071c84286ced29154c33502c4da934 Mon Sep 17 00:00:00 2001 +From: Irit goihman +Date: Sun, 7 Apr 2019 14:03:55 +0300 +Subject: [PATCH] [vdsm] fix plugin docstring capitalisation + +--- + sos/plugins/vdsm.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/sos/plugins/vdsm.py b/sos/plugins/vdsm.py +index c648abbf9..4549c372e 100644 +--- a/sos/plugins/vdsm.py ++++ b/sos/plugins/vdsm.py +@@ -45,7 +45,7 @@ + + + class Vdsm(Plugin, RedHatPlugin): +- """vdsm Plugin""" ++ """vdsm plugin""" + + packages = ( + 'vdsm', +From 208a1d9622dfa13d923882793cd19e9e6cf1e488 Mon Sep 17 00:00:00 2001 +From: Irit goihman +Date: Sun, 7 Apr 2019 14:04:48 +0300 +Subject: [PATCH] [vdsm] use metadata_read_only=1 for LVM2 commands + +--- + sos/plugins/vdsm.py | 5 +++++ + 1 file changed, 5 insertions(+) + +diff --git a/sos/plugins/vdsm.py b/sos/plugins/vdsm.py +index 4549c372e..913d49a53 100644 +--- a/sos/plugins/vdsm.py ++++ b/sos/plugins/vdsm.py +@@ -22,6 +22,10 @@ + # there is a stuck lvm command. + # locking_type=0 + # ++# To prevent modifications to volume group metadata (for e.g. due to a ++# automatically detected inconsistency), metadata_read_only is set to 1. ++# metadata_read_only=1 ++# + # use_lvmetad is set to 0 in order not to show cached, old lvm metadata. + # use_lvmetad=0 + # +@@ -31,6 +35,7 @@ + LVM_CONFIG = """ + global { + locking_type=0 ++ metadata_read_only=1 + use_lvmetad=0 + } + devices { +From 97c21901ddb6f7d5e3169d1777983f784b103bc4 Mon Sep 17 00:00:00 2001 +From: Irit goihman +Date: Sun, 7 Apr 2019 14:05:30 +0300 +Subject: [PATCH] [vdsm] drop explicit size limiting + +--- + sos/plugins/vdsm.py | 4 +--- + 1 file changed, 1 insertion(+), 3 deletions(-) + +diff --git a/sos/plugins/vdsm.py b/sos/plugins/vdsm.py +index 913d49a53..2dc4b6bea 100644 +--- a/sos/plugins/vdsm.py ++++ b/sos/plugins/vdsm.py +@@ -72,9 +72,7 @@ def setup(self): + '/etc/vdsm/*' + ]) + +- limit = self.get_option('log_size') +- +- self.add_copy_spec('/var/log/vdsm/*', sizelimit=limit) ++ self.add_copy_spec('/var/log/vdsm/*') + + self._add_vdsm_forbidden_paths() + self.add_copy_spec([ +From cfaf930e58f4996919d0da6c356135cfce26dacb Mon Sep 17 00:00:00 2001 +From: Irit goihman +Date: Sun, 7 Apr 2019 14:13:59 +0300 +Subject: [PATCH] [vdsm] change filter + +--- + sos/plugins/vdsm.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/sos/plugins/vdsm.py b/sos/plugins/vdsm.py +index 2dc4b6bea..ab5c6130b 100644 +--- a/sos/plugins/vdsm.py ++++ b/sos/plugins/vdsm.py +@@ -43,7 +43,7 @@ + ignore_suspended_devices=1 + write_cache_state=0 + disable_after_error_count=3 +- filter=["a|^/dev/mapper/.*|", "r|.*|"] ++ filter=["a|^/dev/disk/by-id/dm-uuid-mpath-|", "r|.+|"] + } + """ + LVM_CONFIG = re.sub(r"\s+", " ", LVM_CONFIG).strip() +From 2ebc04da53dc871c8dd5243567afa4f8592dca29 Mon Sep 17 00:00:00 2001 +From: Irit goihman +Date: Sun, 7 Apr 2019 14:14:32 +0300 +Subject: [PATCH] [vdsm] capture supervdsmd status + +--- + sos/plugins/vdsm.py | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/sos/plugins/vdsm.py b/sos/plugins/vdsm.py +index ab5c6130b..ae9c17c96 100644 +--- a/sos/plugins/vdsm.py ++++ b/sos/plugins/vdsm.py +@@ -65,6 +65,7 @@ def setup(self): + self.add_forbidden_path('/etc/pki/libvirt/private/*') + + self.add_cmd_output('service vdsmd status') ++ self.add_cmd_output('service supervdsmd status') + + self.add_copy_spec([ + '/tmp/vds_installer*', diff --git a/SOURCES/sos-bz1702806-openstack_instack-ansible-log.patch b/SOURCES/sos-bz1702806-openstack_instack-ansible-log.patch new file mode 100644 index 0000000..938273b --- /dev/null +++ b/SOURCES/sos-bz1702806-openstack_instack-ansible-log.patch @@ -0,0 +1,75 @@ +From 1b9c2032149488a2372d188a8ed3251e364f41cf Mon Sep 17 00:00:00 2001 +From: Emilien Macchi +Date: Wed, 24 Apr 2019 20:54:05 -0400 +Subject: [PATCH] [openstack_instack] add ansible.log + +Collect /var/lib/mistral/config-download-latest/ansible.log which is an +important log to be able to debug issues with Ansible playbooks. + +/var/lib/mistral/config-download-latest is a directory that exists +anyway on the undercloud and is the place where the ansible logs is +stored. + +Note: we don't want to collect the whole /var/lib/mistral because it +contains sensitive informations like username/passwords/endpoints. + +rhbz#1702806 +rhbz#1702802 + +Resolves: #1661 + +Signed-off-by: Emilien Macchi +Signed-off-by: Bryn M. Reeves +--- + sos/plugins/openstack_instack.py | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/sos/plugins/openstack_instack.py b/sos/plugins/openstack_instack.py +index e3470f5a..15e6c384 100644 +--- a/sos/plugins/openstack_instack.py ++++ b/sos/plugins/openstack_instack.py +@@ -22,7 +22,8 @@ NON_CONTAINERIZED_DEPLOY = [ + CONTAINERIZED_DEPLOY = [ + '/var/log/heat-launcher/', + '/home/stack/install-undercloud.log', +- '/home/stack/undercloud-install-*.tar.bzip2' ++ '/home/stack/undercloud-install-*.tar.bzip2', ++ '/var/lib/mistral/config-download-latest/ansible.log' + ] + + +-- +2.17.2 + +From ba77701624dccf3ba98fee6e9cdb9b9d804068c2 Mon Sep 17 00:00:00 2001 +From: Pavel Moravec +Date: Tue, 9 Jul 2019 13:23:37 +0200 +Subject: [PATCH] [archive] convert absolute symlink targets to relative + +Calling _make_leading_paths for a symlink with absolute symlink target +must create the symlink relative to the source. This will prevent +creating symlinks outside sosreport build dir. + +Resolves: #1710 + +Signed-off-by: Pavel Moravec +--- + sos/archive.py | 5 +++++ + 1 file changed, 5 insertions(+) + +diff --git a/sos/archive.py b/sos/archive.py +index 9e19426c5..dcd6908d1 100644 +--- a/sos/archive.py ++++ b/sos/archive.py +@@ -232,6 +232,11 @@ def in_archive(path): + dest = self._make_leading_paths(target_src, mode=mode) + dest = os.path.normpath(dest) + ++ # In case symlink target is an absolute path, make it ++ # relative to the directory with symlink source ++ if os.path.isabs(target): ++ target = os.path.relpath(target, target_dir) ++ + self.log_debug("Making symlink '%s' -> '%s'" % + (abs_path, target)) + os.symlink(target, abs_path) diff --git a/SOURCES/sos-bz1705377-composer-sources-info.patch b/SOURCES/sos-bz1705377-composer-sources-info.patch deleted file mode 100644 index 5dedf7d..0000000 --- a/SOURCES/sos-bz1705377-composer-sources-info.patch +++ /dev/null @@ -1,107 +0,0 @@ -From 9135d767e6244d370d8cbd59a75e1a56b928d4a3 Mon Sep 17 00:00:00 2001 -From: "Bryn M. Reeves" -Date: Wed, 12 Dec 2018 10:36:09 +0000 -Subject: [PATCH] [composer] avoid running 'blueprints list' twice - -Use get_cmd_output_now() to store the first call to composer-cli's -'blueprints list' command in the report, and then use that file to -find the list of available blueprints. - -Related: #1447 - -Signed-off-by: Bryn M. Reeves ---- - sos/plugins/composer.py | 15 ++++++--------- - 1 file changed, 6 insertions(+), 9 deletions(-) - -diff --git a/sos/plugins/composer.py b/sos/plugins/composer.py -index 34901bcee..ff3aa49bf 100644 ---- a/sos/plugins/composer.py -+++ b/sos/plugins/composer.py -@@ -12,11 +12,11 @@ class Composer(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin): - - def _get_blueprints(self): - blueprints = [] -- bp_result = self.get_command_output("composer-cli blueprints list") -- if bp_result['status'] != 0: -- return blueprints -- for line in bp_result['output'].splitlines(): -- blueprints.append(line) -+ bp_file = self.get_cmd_output_now("composer-cli blueprints list") -+ if bp_file: -+ with open(bp_file, "r") as bps: -+ for line in bps.read().splitlines(): -+ blueprints.append(line) - return blueprints - - def setup(self): -@@ -31,9 +31,6 @@ def setup(self): - for blueprint in blueprints: - self.add_cmd_output("composer-cli blueprints show %s" % blueprint) - -- self.add_cmd_output([ -- "composer-cli blueprints list", -- "composer-cli sources list" -- ]) -+ self.add_cmd_output("composer-cli sources list") - - # vim: set et ts=4 sw=4 : -From e456ae4f23e49c36d5efe6b0584c5ec29be21ba5 Mon Sep 17 00:00:00 2001 -From: Jake Hunsaker -Date: Mon, 18 Feb 2019 12:32:11 -0500 -Subject: [PATCH] [composer] Collect sources info for all sources - -Adds collection of info on each source found by composer-cli. The -_get_blueprints() method has been made more generic to accomodate both -blueprints and sources. - -Resolves: #1571 - -Signed-off-by: Jake Hunsaker -Signed-off-by: Bryn M. Reeves ---- - sos/plugins/composer.py | 22 ++++++++++++---------- - 1 file changed, 12 insertions(+), 10 deletions(-) - -diff --git a/sos/plugins/composer.py b/sos/plugins/composer.py -index 0f926398b..e4f30f5f0 100644 ---- a/sos/plugins/composer.py -+++ b/sos/plugins/composer.py -@@ -10,14 +10,14 @@ class Composer(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin): - - packages = ('composer-cli',) - -- def _get_blueprints(self): -- blueprints = [] -- bp_file = self.get_cmd_output_now("composer-cli blueprints list") -- if bp_file: -- with open(bp_file, "r") as bps: -- for line in bps.read().splitlines(): -- blueprints.append(line) -- return blueprints -+ def _get_entries(self, cmd): -+ entries = [] -+ ent_file = self.get_cmd_output_now(cmd) -+ if ent_file: -+ with open(ent_file, "r") as ents: -+ for line in ents.read().splitlines(): -+ entries.append(line) -+ return entries - - def setup(self): - self.add_copy_spec([ -@@ -27,10 +27,12 @@ def setup(self): - "/var/log/lorax-composer/program.log", - "/var/log/lorax-composer/server.log", - ]) -- blueprints = self._get_blueprints() -+ blueprints = self._get_entries("composer-cli blueprints list") - for blueprint in blueprints: - self.add_cmd_output("composer-cli blueprints show %s" % blueprint) - -- self.add_cmd_output("composer-cli sources list") -+ sources = self._get_entries("composer-cli sources list") -+ for src in sources: -+ self.add_cmd_output("composer-cli sources info %s" % src) - - # vim: set et ts=4 sw=4 : diff --git a/SOURCES/sos-bz1709906-frr-plugin.patch b/SOURCES/sos-bz1709906-frr-plugin.patch new file mode 100644 index 0000000..dc43402 --- /dev/null +++ b/SOURCES/sos-bz1709906-frr-plugin.patch @@ -0,0 +1,47 @@ +From 5969fe8ca4dcfd315c3df714c03c7e7344fa8047 Mon Sep 17 00:00:00 2001 +From: Pavel Moravec +Date: Tue, 9 Jul 2019 13:38:34 +0200 +Subject: [PATCH] [frr] FRR plugin + +A plugin for FRR alternative to Quagga. Path to conf is /etc/frr/. + +Resolves: #1666 + +Signed-off-by: Filip Krska +Signed-off-by: Pavel Moravec +--- + sos/plugins/frr.py | 25 +++++++++++++++++++++++++ + 1 file changed, 25 insertions(+) + create mode 100644 sos/plugins/frr.py + +diff --git a/sos/plugins/frr.py b/sos/plugins/frr.py +new file mode 100644 +index 000000000..031b2d935 +--- /dev/null ++++ b/sos/plugins/frr.py +@@ -0,0 +1,25 @@ ++# This file is part of the sos project: https://github.com/sosreport/sos ++# ++# This copyrighted material is made available to anyone wishing to use, ++# modify, copy, or redistribute it subject to the terms and conditions of ++# version 2 of the GNU General Public License. ++# ++# See the LICENSE file in the source distribution for further information. ++ ++from sos.plugins import Plugin, RedHatPlugin ++ ++ ++class Frr(Plugin, RedHatPlugin): ++ """Frr routing service ++ """ ++ ++ plugin_name = 'frr' ++ profiles = ('network',) ++ ++ files = ('/etc/frr/zebra.conf',) ++ packages = ('frr',) ++ ++ def setup(self): ++ self.add_copy_spec("/etc/frr/") ++ ++# vim: set et ts=4 sw=4 : diff --git a/SOURCES/sos-bz1714243-sar-collect-older-data.patch b/SOURCES/sos-bz1714243-sar-collect-older-data.patch new file mode 100644 index 0000000..5984608 --- /dev/null +++ b/SOURCES/sos-bz1714243-sar-collect-older-data.patch @@ -0,0 +1,48 @@ +From b3d9fdf568e25997eb006e575d26fb301b91414b Mon Sep 17 00:00:00 2001 +From: Pavel Moravec +Date: Tue, 9 Jul 2019 13:15:48 +0200 +Subject: [PATCH] [sar] collect whole sar log dir + +Currently, sosreport does not collect files in saYYYYMMDD or +similar format. Assuming sar log dir contains only sar related data +it is safe to collect whole dir (up to size limit, from newest to +oldest files). + +Resolves: #1700 + +Signed-off-by: Pavel Moravec +--- + sos/plugins/sar.py | 19 ++----------------- + 1 file changed, 2 insertions(+), 17 deletions(-) + +diff --git a/sos/plugins/sar.py b/sos/plugins/sar.py +index 492c38a21..5a1b9f30a 100644 +--- a/sos/plugins/sar.py ++++ b/sos/plugins/sar.py +@@ -22,24 +22,9 @@ class Sar(Plugin,): + option_list = [("all_sar", "gather all system activity records", + "", False)] + +- # size-limit SAR data collected by default (MB) +- sa_size = 20 +- + def setup(self): +- if self.get_option("all_sar"): +- self.sa_size = 0 +- +- # Copy all sa??, sar??, sa??.* and sar??.* files, which will net +- # compressed and uncompressed versions, typically. +- for suffix in ('', '.*'): +- self.add_copy_spec( +- os.path.join(self.sa_path, "sa[0-3][0-9]" + suffix), +- sizelimit=self.sa_size, tailit=False +- ) +- self.add_copy_spec( +- os.path.join(self.sa_path, "sar[0-3][0-9]" + suffix), +- sizelimit=self.sa_size, tailit=False +- ) ++ self.add_copy_spec(self.sa_path, ++ sizelimit=0 if self.get_option("all_sar") else None) + + try: + dir_list = os.listdir(self.sa_path) diff --git a/SOURCES/sos-bz1717882-openstack-placement.patch b/SOURCES/sos-bz1717882-openstack-placement.patch new file mode 100644 index 0000000..931c9f9 --- /dev/null +++ b/SOURCES/sos-bz1717882-openstack-placement.patch @@ -0,0 +1,145 @@ +From b5d72bd91c8685e3551d6e796ca8559304b45785 Mon Sep 17 00:00:00 2001 +From: Lee Yarwood +Date: Mon, 24 Dec 2018 10:03:59 +0000 +Subject: [PATCH] [openstack] Extract Placement plugin from Nova + +The OpenStack Placement service is being extracted from Nova [1] +duringthe Stein development cycle [2]. This change extracts the +required plugin logic from the original Nova plugin into a new +Placement plugin ahead of this extraction. + +[1] https://docs.openstack.org/placement/latest/ +[2] https://releases.openstack.org/stein/schedule.html + +Co-Authored-by: Piotr Kopec + +Resolves: #1676 + +Signed-off-by: Lee Yarwood +Signed-off-by: Bryn M. Reeves +--- + sos/plugins/openstack_placement.py | 112 +++++++++++++++++++++++++++++ + 1 file changed, 112 insertions(+) + create mode 100644 sos/plugins/openstack_placement.py + +diff --git a/sos/plugins/openstack_placement.py b/sos/plugins/openstack_placement.py +new file mode 100644 +index 00000000..26b1a520 +--- /dev/null ++++ b/sos/plugins/openstack_placement.py +@@ -0,0 +1,112 @@ ++# Copyright (C) 2019 Red Hat, Inc., Lee Yarwood ++ ++# This file is part of the sos project: https://github.com/sosreport/sos ++# ++# This copyrighted material is made available to anyone wishing to use, ++# modify, copy, or redistribute it subject to the terms and conditions of ++# version 2 of the GNU General Public License. ++# ++# See the LICENSE file in the source distribution for further information. ++ ++from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin ++ ++ ++class OpenStackPlacement(Plugin): ++ """OpenStack Placement ++ """ ++ plugin_name = "openstack_placement" ++ profiles = ('openstack', 'openstack_controller') ++ ++ var_puppet_gen = "/var/lib/config-data/puppet-generated/placement" ++ ++ def setup(self): ++ ++ # collect commands output only if the openstack-placement-api service ++ # is running ++ ++ in_container = self.running_in_container() ++ ++ if self.service_is_running('openstack-placement-api') or in_container: ++ placement_config = "" ++ # if containerized we need to pass the config to the cont. ++ if in_container: ++ placement_config = "--config-dir " + self.var_puppet_gen + \ ++ "/etc/placement/" ++ self.add_cmd_output( ++ "placement-manage " + placement_config + " db version", ++ suggest_filename="placement-manage_db_version" ++ ) ++ ++ if self.get_option("all_logs"): ++ self.add_copy_spec([ ++ "/var/log/placement/", ++ "/var/log/containers/placement/", ++ "/var/log/containers/httpd/placement-api/" ++ ]) ++ else: ++ self.add_copy_spec([ ++ "/var/log/placement/*.log", ++ "/var/log/containers/placement/*.log", ++ "/var/log/containers/httpd/placement-api/*log", ++ ]) ++ ++ self.add_copy_spec([ ++ "/etc/placement/", ++ self.var_puppet_gen + "/etc/placement/", ++ self.var_puppet_gen + "/etc/my.cnf.d/tripleo.cnf", ++ self.var_puppet_gen + "/etc/httpd/conf/", ++ self.var_puppet_gen + "/etc/httpd/conf.d/", ++ self.var_puppet_gen + "/etc/httpd/conf.modules.d/*.conf", ++ ]) ++ ++ def running_in_container(self): ++ for runtime in ["docker", "podman"]: ++ container_status = self.get_command_output(runtime + " ps") ++ if container_status['status'] == 0: ++ for line in container_status['output'].splitlines(): ++ if line.endswith("placement_api"): ++ return True ++ return False ++ ++ def apply_regex_sub(self, regexp, subst): ++ self.do_path_regex_sub("/etc/placement/*", regexp, subst) ++ self.do_path_regex_sub( ++ self.var_puppet_gen + "/etc/placement/*", ++ regexp, subst ++ ) ++ ++ def postproc(self): ++ protect_keys = ["password"] ++ connection_keys = ["database_connection", "slave_connection"] ++ ++ self.apply_regex_sub( ++ r"((?m)^\s*(%s)\s*=\s*)(.*)" % "|".join(protect_keys), ++ r"\1*********" ++ ) ++ self.apply_regex_sub( ++ r"((?m)^\s*(%s)\s*=\s*(.*)://(\w*):)(.*)(@(.*))" % ++ "|".join(connection_keys), ++ r"\1*********\6" ++ ) ++ ++ ++class DebianPlacement(OpenStackPlacement, DebianPlugin, UbuntuPlugin): ++ ++ packages = ('placement') ++ ++ def setup(self): ++ super(DebianPlacement, self).setup() ++ ++ ++class RedHatPlacement(OpenStackPlacement, RedHatPlugin): ++ ++ packages = ('openstack-selinux') ++ ++ def setup(self): ++ super(RedHatPlacement, self).setup() ++ if self.get_option("all_logs"): ++ self.add_copy_spec("/var/log/httpd/placement*") ++ else: ++ self.add_copy_spec("/var/log/httpd/placement*.log") ++ ++# vim: set et ts=4 sw=4 : +-- +2.21.0 + diff --git a/SOURCES/sos-bz1718087-redhat-local-name-not-rhn.patch b/SOURCES/sos-bz1718087-redhat-local-name-not-rhn.patch new file mode 100644 index 0000000..b3177c1 --- /dev/null +++ b/SOURCES/sos-bz1718087-redhat-local-name-not-rhn.patch @@ -0,0 +1,64 @@ +From 4aa7e6e383fd9a36931b1125a8e8cd8e3ebcf1de Mon Sep 17 00:00:00 2001 +From: Pavel Moravec +Date: Thu, 13 Jun 2019 16:39:54 +0200 +Subject: [PATCH] [policies] redhat policy to use hostname instead of rhn id + for local name + +As rhn is getting obsoleted and each host has its hostname, even redhat policy +should use host name as the local name. + +Resolves: #1702 + +Signed-off-by: Pavel Moravec +--- + sos/policies/redhat.py | 23 ----------------------- + 1 file changed, 23 deletions(-) + +diff --git a/sos/policies/redhat.py b/sos/policies/redhat.py +index 28d48d2d8..5fa1695c5 100644 +--- a/sos/policies/redhat.py ++++ b/sos/policies/redhat.py +@@ -19,14 +19,6 @@ + from sos import _sos as _ + from sos import SoSOptions + +-sys.path.insert(0, "/usr/share/rhn/") +-try: +- from up2date_client import up2dateAuth +- from rhn import rpclib +-except ImportError: +- # might fail if non-RHEL +- pass +- + OS_RELEASE = "/etc/os-release" + + +@@ -174,9 +166,6 @@ def get_tmp_dir(self, opt_tmp_dir): + return self._tmp_dir + return opt_tmp_dir + +- def get_local_name(self): +- return self.host_name() +- + + # Container environment variables on Red Hat systems. + ENV_CONTAINER = 'container' +@@ -305,18 +294,6 @@ def dist_version(self): + pass + return False + +- def rhn_username(self): +- try: +- rhn_username = rpclib.xmlrpclib.loads( +- up2dateAuth.getSystemId())[0][0]['username'] +- return rhn_username.encode('utf-8', 'ignore') +- except Exception: +- # ignore any exception and return an empty username +- return "" +- +- def get_local_name(self): +- return self.rhn_username() or self.host_name() +- + def probe_preset(self): + # Package based checks + if self.pkg_by_name("satellite-common") is not None: diff --git a/SOURCES/sos-bz1719884-pcp-pmlogger-no-limit.patch b/SOURCES/sos-bz1719884-pcp-pmlogger-no-limit.patch new file mode 100644 index 0000000..8c12a1b --- /dev/null +++ b/SOURCES/sos-bz1719884-pcp-pmlogger-no-limit.patch @@ -0,0 +1,30 @@ +From 380737d0bf4021434db4d5e479f0b8a2aece6ec9 Mon Sep 17 00:00:00 2001 +From: Pavel Moravec +Date: Thu, 4 Apr 2019 10:43:24 +0200 +Subject: [PATCH] [pcp] collect pmlogger without a sizelimit + +sizelimit=None defaults to --log-size, use sizelimit=0 instead + +Resolves: #1632 + +Signed-off-by: Pavel Moravec +--- + sos/plugins/pcp.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/sos/plugins/pcp.py b/sos/plugins/pcp.py +index da7158a6..da2a6611 100644 +--- a/sos/plugins/pcp.py ++++ b/sos/plugins/pcp.py +@@ -130,7 +130,7 @@ class Pcp(Plugin, RedHatPlugin, DebianPlugin): + pmlogger_ls = self.get_cmd_output_now("ls -t1 %s" % path) + if pmlogger_ls: + for line in open(pmlogger_ls).read().splitlines(): +- self.add_copy_spec(line, sizelimit=None) ++ self.add_copy_spec(line, sizelimit=0) + files_collected = files_collected + 1 + if self.countlimit and files_collected == self.countlimit: + break +-- +2.17.2 + diff --git a/SOURCES/sos-bz1719885-plugin-vs-command-timeouts.patch b/SOURCES/sos-bz1719885-plugin-vs-command-timeouts.patch new file mode 100644 index 0000000..00e896e --- /dev/null +++ b/SOURCES/sos-bz1719885-plugin-vs-command-timeouts.patch @@ -0,0 +1,45 @@ +From 7c8c45dad3481cfaae3d3af9c188218aa14a3a6a Mon Sep 17 00:00:00 2001 +From: Pavel Moravec +Date: Tue, 9 Apr 2019 09:50:34 +0200 +Subject: [PATCH] [foreman,satellite] increase plugin default timeouts + +Those two plugins call commands with bigger timeouts than the default +plugin timeout is. That can unexpectedly kill the plugin execution when +the commands execution took longer than the plugin timeout (but within +cmd timeout). + +Resolves: #1642 + +Signed-off-by: Pavel Moravec +--- + sos/plugins/foreman.py | 1 + + sos/plugins/satellite.py | 1 + + 2 files changed, 2 insertions(+) + +diff --git a/sos/plugins/foreman.py b/sos/plugins/foreman.py +index a1e937f3..8bcd26bd 100644 +--- a/sos/plugins/foreman.py ++++ b/sos/plugins/foreman.py +@@ -19,6 +19,7 @@ class Foreman(Plugin): + """ + + plugin_name = 'foreman' ++ plugin_timeout = 1800 + profiles = ('sysmgmt',) + packages = ('foreman', 'foreman-proxy') + option_list = [ +diff --git a/sos/plugins/satellite.py b/sos/plugins/satellite.py +index c50c2ec7..83733076 100644 +--- a/sos/plugins/satellite.py ++++ b/sos/plugins/satellite.py +@@ -14,6 +14,7 @@ class Satellite(Plugin, RedHatPlugin): + """ + + plugin_name = 'satellite' ++ plugin_timeout = 1200 + profiles = ('sysmgmt',) + verify_packages = ('spacewalk.*',) + satellite = False +-- +2.17.2 + diff --git a/SOURCES/sos-bz1719886-sos-conf-disabled-plugins-manpages.patch b/SOURCES/sos-bz1719886-sos-conf-disabled-plugins-manpages.patch new file mode 100644 index 0000000..bed3bde --- /dev/null +++ b/SOURCES/sos-bz1719886-sos-conf-disabled-plugins-manpages.patch @@ -0,0 +1,110 @@ +From 4cb21e2c16b55e7506a3cefd9148ba4bf49dbce1 Mon Sep 17 00:00:00 2001 +From: Pavel Moravec +Date: Wed, 17 Apr 2019 13:17:24 +0200 +Subject: [PATCH] [sosreport] update sos.conf manpages by [general] section + description + +Since PR #1530, sosreport supports all command line options. Man pages +should document the enhancement. + +Resolves: #1652 + +Signed-off-by: Pavel Moravec +--- + man/en/sos.conf.5 | 38 ++++++++++++++++++++++++++++++++++---- + 1 file changed, 34 insertions(+), 4 deletions(-) + +diff --git a/man/en/sos.conf.5 b/man/en/sos.conf.5 +index b40a48e1..ad18d5f2 100644 +--- a/man/en/sos.conf.5 ++++ b/man/en/sos.conf.5 +@@ -6,19 +6,49 @@ sos.conf \- sosreport configuration + sosreport uses a configuration file at /etc/sos.conf. + .SH PARAMETERS + .sp +-There are two sections in the sosreport configuration file: +-plugins, and tunables. Options are set using 'ini'-style +-\fBname = value\fP pairs. ++There are three sections in the sosreport configuration file: ++general, plugins and tunables. Options are set using 'ini'-style ++\fBname = value\fP pairs. Disabling/enabling a boolean option ++is done the same way like on command line (e.g. process.lsof=off). + + Some options accept a comma separated list of values. + ++Using options that dont expect a value (like all-logs or no-report) ++will result in enabling those options, regardless of value set. ++ ++Sections are parsed in the ordering: ++.br ++- \fB[general]\fP ++.br ++- \fB[plugins]\fP (disable) ++.br ++- \fB[plugins]\fP (enable) ++.br ++- \fB[tunables]\fP ++ ++.TP ++\fB[general]\fP ++