diff --git a/.gitignore b/.gitignore
index 737aeda..af17557 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1 +1 @@
-SOURCES/3.5.tar.gz
+SOURCES/3.6.tar.gz
diff --git a/.sos.metadata b/.sos.metadata
index 6563c38..092d6e5 100644
--- a/.sos.metadata
+++ b/.sos.metadata
@@ -1 +1 @@
-645a3c29e0cde8ba07b876dc3698afdaec224c46 SOURCES/3.5.tar.gz
+aa090f917b4f54421e2ad2294a60fc124ef66a85 SOURCES/3.6.tar.gz
diff --git a/SOURCES/skip-generating-doc.patch b/SOURCES/skip-generating-doc.patch
index 6508f3c..faedf3b 100644
--- a/SOURCES/skip-generating-doc.patch
+++ b/SOURCES/skip-generating-doc.patch
@@ -21,5 +21,5 @@ diff -rup a/Makefile b/Makefile
 +#docs:
 +#	make -C docs html man
  
+ .PHONY: build
  build:
- 	for d in $(SUBDIRS); do make -C $$d; [ $$? = 0 ] || exit 1 ; done
diff --git a/SOURCES/sos-bz1353873-pcp-logsize.patch b/SOURCES/sos-bz1353873-pcp-logsize.patch
deleted file mode 100644
index 9a816bd..0000000
--- a/SOURCES/sos-bz1353873-pcp-logsize.patch
+++ /dev/null
@@ -1,27 +0,0 @@
-From 1932d96dcf9071d1180286b96abfd998e20a09fd Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Thu, 11 Jan 2018 12:52:34 +0100
-Subject: [PATCH] [pcp] really apply sizelimit to logs collected
-
-add_copy_spec must be called to files and not dirs, to apply sizelimit
-
-Resolves: #1187
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/plugins/pcp.py | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/sos/plugins/pcp.py b/sos/plugins/pcp.py
-index 12537526b..8f17a5de5 100644
---- a/sos/plugins/pcp.py
-+++ b/sos/plugins/pcp.py
-@@ -122,7 +122,7 @@ def setup(self):
-         if self.pcp_hostname != '':
-             for pmdir in ('pmlogger', 'pmmgr'):
-                 path = os.path.join(self.pcp_log_dir, pmdir,
--                                    self.pcp_hostname)
-+                                    self.pcp_hostname, '*')
-                 self.add_copy_spec(path, sizelimit=self.limit)
- 
-         self.add_copy_spec([
diff --git a/SOURCES/sos-bz1463509-oc-adm-diagnostics.patch b/SOURCES/sos-bz1463509-oc-adm-diagnostics.patch
deleted file mode 100644
index 64a765d..0000000
--- a/SOURCES/sos-bz1463509-oc-adm-diagnostics.patch
+++ /dev/null
@@ -1,40 +0,0 @@
-From 30285352cad7b393c67a7c7c64a6fce902190bef Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Thu, 16 Nov 2017 16:51:40 +0100
-Subject: [PATCH] [origin] fix typo in oc adm diagnostics
- --prevent-modification
-
-The argument is singular --prevent-modification .
-
-Resolves: #1150
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/plugins/origin.py | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/sos/plugins/origin.py b/sos/plugins/origin.py
-index d7ba0817..4e50bdee 100644
---- a/sos/plugins/origin.py
-+++ b/sos/plugins/origin.py
-@@ -48,7 +48,7 @@ class OpenShiftOrigin(Plugin):
-     option_list = [
-         ("diag", "run 'oc adm diagnostics' to collect its output",
-          'fast', True),
--        ("diag-prevent", "set --prevent-modifications on 'oc adm diagnostics'",
-+        ("diag-prevent", "set --prevent-modification on 'oc adm diagnostics'",
-          'fast', False),
-     ]
- 
-@@ -137,7 +137,7 @@ class OpenShiftOrigin(Plugin):
-             if self.get_option('diag'):
-                 diag_cmd = "%s adm diagnostics -l 0" % self.oc_cmd_admin
-                 if self.get_option('diag-prevent'):
--                    diag_cmd += " --prevent-modifications=true"
-+                    diag_cmd += " --prevent-modification=true"
-                 self.add_cmd_output(diag_cmd)
-             self.add_journal(units=["atomic-openshift-master",
-                                     "atomic-openshift-master-api",
--- 
-2.13.6
-
diff --git a/SOURCES/sos-bz1474976-regexp-sub.patch b/SOURCES/sos-bz1474976-regexp-sub.patch
new file mode 100644
index 0000000..7ffcd64
--- /dev/null
+++ b/SOURCES/sos-bz1474976-regexp-sub.patch
@@ -0,0 +1,66 @@
+From b96bdab03f06408e162b1733b20e8ba9fbf8e012 Mon Sep 17 00:00:00 2001
+From: "Bryn M. Reeves" <bmr@redhat.com>
+Date: Mon, 2 Jul 2018 12:01:04 +0100
+Subject: [PATCH] [archive] fix add_string()/do_*_sub() regression
+
+A change in the handling of add_string() operations in the archive
+class causes the Plugin string substitution methods to fail (since
+the archive was enforcing a check that the path did not already
+exist - for substitutions this is always the case).
+
+Maintain the check for content that is being copied into the
+archive anew, but make the add_string() method override this and
+disable the existence checks.
+
+Signed-off-by: Bryn M. Reeves <bmr@redhat.com>
+---
+ sos/archive.py         | 14 ++++++++++----
+ tests/archive_tests.py | 12 ++----------
+ 2 files changed, 12 insertions(+), 14 deletions(-)
+
+diff --git a/sos/archive.py b/sos/archive.py
+index d53baf41..e153c09a 100644
+--- a/sos/archive.py
++++ b/sos/archive.py
+@@ -158,7 +158,7 @@ class FileCacheArchive(Archive):
+             name = name.lstrip(os.sep)
+         return (os.path.join(self._archive_root, name))
+ 
+-    def _check_path(self, src, path_type, dest=None):
++    def _check_path(self, src, path_type, dest=None, force=False):
+         """Check a new destination path in the archive.
+ 
+             Since it is possible for multiple plugins to collect the same
+@@ -185,6 +185,7 @@ class FileCacheArchive(Archive):
+             :param src: the source path to be copied to the archive
+             :param path_type: the type of object to be copied
+             :param dest: an optional destination path
++            :param force: force file creation even if the path exists
+             :returns: An absolute destination path if the path should be
+                       copied now or `None` otherwise
+         """
+@@ -208,6 +209,9 @@ class FileCacheArchive(Archive):
+                 stat.ISSOCK(mode)
+             ])
+ 
++        if force:
++            return dest
++
+         # Check destination path presence and type
+         if os.path.exists(dest):
+             # Use lstat: we care about the current object, not the referent.
+@@ -274,9 +278,11 @@ class FileCacheArchive(Archive):
+         with self._path_lock:
+             src = dest
+ 
+-            dest = self._check_path(dest, P_FILE)
+-            if not dest:
+-                return
++            # add_string() is a special case: it must always take precedence
++            # over any exixting content in the archive, since it is used by
++            # the Plugin postprocessing hooks to perform regex substitution
++            # on file content.
++            dest = self._check_path(dest, P_FILE, force=True)
+ 
+             f = codecs.open(dest, 'w', encoding='utf-8')
+             if isinstance(content, bytes):
diff --git a/SOURCES/sos-bz1483414-opendaylight-plugin.patch b/SOURCES/sos-bz1483414-opendaylight-plugin.patch
deleted file mode 100644
index bc8f746..0000000
--- a/SOURCES/sos-bz1483414-opendaylight-plugin.patch
+++ /dev/null
@@ -1,182 +0,0 @@
-From 6013308e2ebfc45d72ef87c3d18fb2edce61d549 Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Thu, 9 Nov 2017 13:10:15 +0100
-Subject: [PATCH] [opendaylight] new plugin for OpenDaylight
-
-Collecting logs and configs.
-
-Resolves: #1143
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/plugins/opendaylight.py | 38 ++++++++++++++++++++++++++++++++++++++
- 1 file changed, 38 insertions(+)
- create mode 100644 sos/plugins/opendaylight.py
-
-diff --git a/sos/plugins/opendaylight.py b/sos/plugins/opendaylight.py
-new file mode 100644
-index 00000000..33b4bdb3
---- /dev/null
-+++ b/sos/plugins/opendaylight.py
-@@ -0,0 +1,38 @@
-+# This program is free software; you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation; either version 2 of the License, or
-+# (at your option) any later version.
-+
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+# GNU General Public License for more details.
-+
-+# You should have received a copy of the GNU General Public License along
-+# with this program; if not, write to the Free Software Foundation, Inc.,
-+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-+
-+from sos.plugins import Plugin, RedHatPlugin
-+
-+
-+class OpenDaylight(Plugin, RedHatPlugin):
-+    """OpenDaylight network manager
-+    """
-+
-+    plugin_name = 'opendaylight'
-+    profiles = ('openstack', 'openstack_controller')
-+
-+    packages = ('opendaylight',)
-+
-+    def setup(self):
-+        self.add_copy_spec("/opt/opendaylight/etc/")
-+
-+        self.limit = self.get_option("log_size")
-+        if self.get_option("all_logs"):
-+            self.add_copy_spec("/opt/opendaylight/data/log/",
-+                               sizelimit=self.limit)
-+        else:
-+            self.add_copy_spec("/opt/opendaylight/data/log/*log",
-+                               sizelimit=self.limit)
-+
-+# vim: set et ts=4 sw=4 :
--- 
-2.13.6
-
-From e7552dd922d262ad13441f050b5223aea68d44ac Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Thu, 9 Nov 2017 13:07:59 +0100
-Subject: [PATCH] [openvswitch] collect few ovs-ofctl dumps and OVS list
-
-Required for/by OpenDaylight.
-
-Related to: #1143
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/plugins/openvswitch.py | 5 +++++
- 1 file changed, 5 insertions(+)
-
-diff --git a/sos/plugins/openvswitch.py b/sos/plugins/openvswitch.py
-index b3bc4054..a137eda7 100644
---- a/sos/plugins/openvswitch.py
-+++ b/sos/plugins/openvswitch.py
-@@ -76,6 +76,8 @@ class OpenVSwitch(Plugin):
-             "ovs-appctl upcall/show",
-             # Capture DPDK and other parameters
-             "ovs-vsctl -t 5 get Open_vSwitch . other_config",
-+            # Capture OVS list
-+            "ovs-vsctl list Open_vSwitch",
-             # Capture DPDK datapath packet counters and config
-             "ovs-appctl dpctl/show -s",
-             # Capture DPDK queue to pmd mapping
-@@ -126,6 +128,9 @@ class OpenVSwitch(Plugin):
-                 for flow in flow_versions:
-                     if flow in br_protos:
-                         self.add_cmd_output([
-+                            "ovs-ofctl -O %s show %s" % (flow, br),
-+                            "ovs-ofctl -O %s dump-groups %s" % (flow, br),
-+                            "ovs-ofctl -O %s dump-group-stats %s" % (flow, br),
-                             "ovs-ofctl -O %s dump-flows %s" % (flow, br),
-                             "ovs-ofctl -O %s dump-ports-desc %s" % (flow, br)
-                         ])
--- 
-2.13.6
-
-From c68f2daf4751e0547743b207aa8e07b4df0759ab Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Tue, 6 Feb 2018 12:07:11 +0100
-Subject: [PATCH] [opendaylight] Enable plugin by puppet-opendaylight package
-
-Required for ODL running in a container.
-
-Resolves: #1207
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/plugins/opendaylight.py | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/sos/plugins/opendaylight.py b/sos/plugins/opendaylight.py
-index 33b4bdb3..44e12b01 100644
---- a/sos/plugins/opendaylight.py
-+++ b/sos/plugins/opendaylight.py
-@@ -22,7 +22,7 @@ class OpenDaylight(Plugin, RedHatPlugin):
-     plugin_name = 'opendaylight'
-     profiles = ('openstack', 'openstack_controller')
-
--    packages = ('opendaylight',)
-+    packages = ('opendaylight', 'puppet-opendaylight')
-
-     def setup(self):
-         self.add_copy_spec("/opt/opendaylight/etc/")
--- 
-2.13.6
-
-From 86d0855b3c41c77b264b0076b128f22335897f7b Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Wed, 20 Dec 2017 12:13:30 +0100
-Subject: [PATCH] [opendaylight] collect more logs and puppet config
-
-collect puppet-generated config, container and docker logs
-
-Resolves: #1171
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/plugins/opendaylight.py | 21 ++++++++++++++++-----
- 1 file changed, 16 insertions(+), 5 deletions(-)
-
-diff --git a/sos/plugins/opendaylight.py b/sos/plugins/opendaylight.py
-index 33b4bdb3d..cb9e1e85a 100644
---- a/sos/plugins/opendaylight.py
-+++ b/sos/plugins/opendaylight.py
-@@ -24,15 +24,26 @@ class OpenDaylight(Plugin, RedHatPlugin):
- 
-     packages = ('opendaylight', 'puppet-opendaylight')
- 
-+    var_puppet_gen = "/var/lib/config-data/puppet-generated/opendaylight"
-+
-     def setup(self):
--        self.add_copy_spec("/opt/opendaylight/etc/")
-+        self.add_copy_spec([
-+            "/opt/opendaylight/etc/",
-+            self.var_puppet_gen + "/opt/opendaylight/etc/",
-+        ])
- 
-         self.limit = self.get_option("log_size")
-         if self.get_option("all_logs"):
--            self.add_copy_spec("/opt/opendaylight/data/log/",
--                               sizelimit=self.limit)
-+            self.add_copy_spec([
-+                "/opt/opendaylight/data/log/",
-+                "/var/log/containers/opendaylight/",
-+            ], sizelimit=self.limit)
-         else:
--            self.add_copy_spec("/opt/opendaylight/data/log/*log",
--                               sizelimit=self.limit)
-+            self.add_copy_spec([
-+                "/opt/opendaylight/data/log/*.log*",
-+                "/var/log/containers/opendaylight/*.log*",
-+            ], sizelimit=self.limit)
-+
-+        self.add_cmd_output("docker logs opendaylight_api")
- 
- # vim: set et ts=4 sw=4 :
diff --git a/SOURCES/sos-bz1491042-keystone-domains.patch b/SOURCES/sos-bz1491042-keystone-domains.patch
deleted file mode 100644
index 9ff1339..0000000
--- a/SOURCES/sos-bz1491042-keystone-domains.patch
+++ /dev/null
@@ -1,69 +0,0 @@
-From 6d5c4d23d1a8fef70ab2e6b907104241322b3a49 Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Wed, 15 Nov 2017 17:28:42 +0100
-Subject: [PATCH] [openstack_keystone] Properly collect (non)default keystone
- domains
-
-- call crudini instead of (wrapper and not necessarily installed)
- openstack-config
-- collect default /etc/keystone/domains only when crudini fails
-- scrub passwords in /etc/keystone/domains as well, when collected
-
-Resolves: #1147
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/plugins/openstack_keystone.py | 21 ++++++++++-----------
- 1 file changed, 10 insertions(+), 11 deletions(-)
-
-diff --git a/sos/plugins/openstack_keystone.py b/sos/plugins/openstack_keystone.py
-index 72c7f8e2..631e02b2 100644
---- a/sos/plugins/openstack_keystone.py
-+++ b/sos/plugins/openstack_keystone.py
-@@ -34,7 +34,6 @@ class OpenStackKeystone(Plugin):
-             "/etc/keystone/keystone.conf",
-             "/etc/keystone/logging.conf",
-             "/etc/keystone/policy.json",
--            "/etc/keystone/domains",
-             self.var_puppet_gen + "/etc/keystone/*.conf",
-             self.var_puppet_gen + "/etc/keystone/*.json",
-             self.var_puppet_gen + "/etc/httpd/conf/",
-@@ -58,14 +57,15 @@ class OpenStackKeystone(Plugin):
-                 "/var/log/containers/httpd/keystone/*log"
-             ], sizelimit=self.limit)
- 
--        # collect domain config directory, if exists
--        self.domain_config_dir_added = False
-+        # collect domain config directory, if specified
-+        # if not, collect default /etc/keystone/domains
-         self.domain_config_dir = self.get_cmd_output_now(
--                "openstack-config --get /etc/keystone/keystone.conf "
-+                "crudini --get /etc/keystone/keystone.conf "
-                 "identity domain_config_dir")
--        if self.domain_config_dir and os.path.isdir(self.domain_config_dir):
--            self.add_copy_spec(self.domain_config_dir)
--            self.domain_config_dir_added = True
-+        if self.domain_config_dir is None or \
-+                not(os.path.isdir(self.domain_config_dir)):
-+            self.domain_config_dir = "/etc/keystone/domains"
-+        self.add_copy_spec(self.domain_config_dir)
- 
-         if self.get_option("verify"):
-             self.add_cmd_output("rpm -V %s" % ' '.join(self.packages))
-@@ -98,10 +98,9 @@ class OpenStackKeystone(Plugin):
-             regexp, r"\1*********"
-         )
- 
--        # obfuscate LDAP plaintext passwords in domain config dir, if collected
--        if self.domain_config_dir_added:
--            self.do_path_regex_sub(self.domain_config_dir,
--                                   r"((?m)^\s*(%s)\s*=\s*)(.*)", r"\1********")
-+        # obfuscate LDAP plaintext passwords in domain config dir
-+        self.do_path_regex_sub(self.domain_config_dir,
-+                               r"((?m)^\s*(%s)\s*=\s*)(.*)", r"\1********")
- 
- 
- class DebianKeystone(OpenStackKeystone, DebianPlugin, UbuntuPlugin):
--- 
-2.13.6
-
diff --git a/SOURCES/sos-bz1494420-postgresql-scl-path.patch b/SOURCES/sos-bz1494420-postgresql-scl-path.patch
deleted file mode 100644
index 0262a16..0000000
--- a/SOURCES/sos-bz1494420-postgresql-scl-path.patch
+++ /dev/null
@@ -1,467 +0,0 @@
-From 0b93d1f69ccfcc76e1896ea0e5ff7854be69be13 Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Sat, 25 Nov 2017 12:47:35 +0100
-Subject: [PATCH] [plugins] set proper PATH for SCL commands
-
-As SCL packages are deployed under /opt/${provider}/${scl}/,
-calling a SCL command needs that prefix in any path in PATH.
-
-Consequently, distro-specific SCL default path prefix of the provider must be
-defined in sos policies.
-
-Relevant to: #1154
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/plugins/__init__.py  | 37 ++++++++++++++++++++++++++++++-------
- sos/policies/__init__.py |  4 ++++
- sos/policies/redhat.py   |  1 +
- 3 files changed, 35 insertions(+), 7 deletions(-)
-
-diff --git a/sos/plugins/__init__.py b/sos/plugins/__init__.py
-index aa69b19d..2a8bc516 100644
---- a/sos/plugins/__init__.py
-+++ b/sos/plugins/__init__.py
-@@ -1066,25 +1066,48 @@ class SCLPlugin(RedHatPlugin):
-         output = sos_get_command_output("scl -l")["output"]
-         return [scl.strip() for scl in output.splitlines()]
- 
-+    def convert_cmd_scl(self, scl, cmd):
-+        """wrapping command in "scl enable" call and adds proper PATH
-+        """
-+        # load default SCL prefix to PATH
-+        prefix = self.policy().get_default_scl_prefix()
-+        # read prefix from /etc/scl/prefixes/${scl} and strip trailing '\n'
-+        try:
-+            prefix = open('/etc/scl/prefixes/%s' % scl, 'r').read()\
-+                     .rstrip('\n')
-+        except Exception as e:
-+            self._log_error("Failed to find prefix for SCL %s, using %s"
-+                            % (scl, prefix))
-+
-+        # expand PATH by equivalent prefixes under the SCL tree
-+        path = os.environ["PATH"]
-+        for p in path.split(':'):
-+            path = '%s/%s%s:%s' % (prefix, scl, p, path)
-+
-+        scl_cmd = "scl enable %s \"PATH=%s %s\"" % (scl, path, cmd)
-+        return scl_cmd
-+
-     def add_cmd_output_scl(self, scl, cmds, **kwargs):
-         """Same as add_cmd_output, except that it wraps command in
--        "scl enable" call.
-+        "scl enable" call and sets proper PATH.
-         """
-         if isinstance(cmds, six.string_types):
-             cmds = [cmds]
-         scl_cmds = []
--        scl_cmd_tpl = "scl enable %s \"%s\""
-         for cmd in cmds:
--            scl_cmds.append(scl_cmd_tpl % (scl, cmd))
-+            scl_cmds.append(convert_cmd_scl(scl, cmd))
-         self.add_cmd_output(scl_cmds, **kwargs)
- 
--    # config files for Software Collections are under /etc/opt/rh/${scl} and
--    # var files are under /var/opt/rh/${scl}. So we need to insert the paths
--    # after the appropriate root dir.
-+    # config files for Software Collections are under /etc/${prefix}/${scl} and
-+    # var files are under /var/${prefix}/${scl} where the ${prefix} is distro
-+    # specific path. So we need to insert the paths after the appropriate root
-+    # dir.
-     def convert_copyspec_scl(self, scl, copyspec):
-+        scl_prefix = self.policy().get_default_scl_prefix()
-         for rootdir in ['etc', 'var']:
-             p = re.compile('^/%s/' % rootdir)
--            copyspec = p.sub('/%s/opt/rh/%s/' % (rootdir, scl), copyspec)
-+            copyspec = p.sub('/%s/%s/%s/' % (rootdir, scl_prefix, scl),
-+                             copyspec)
-         return copyspec
- 
-     def add_copy_spec_scl(self, scl, copyspecs):
-diff --git a/sos/policies/__init__.py b/sos/policies/__init__.py
-index dffd801c..dc043105 100644
---- a/sos/policies/__init__.py
-+++ b/sos/policies/__init__.py
-@@ -194,6 +194,7 @@ No changes will be made to system configuration.
-     vendor_url = "http://www.example.com/"
-     vendor_text = ""
-     PATH = ""
-+    default_scl_prefix = ""
- 
-     _in_container = False
-     _host_sysroot = '/'
-@@ -271,6 +272,9 @@ No changes will be made to system configuration.
-             return tempfile.gettempdir()
-         return opt_tmp_dir
- 
-+    def get_default_scl_prefix(self):
-+        return self.default_scl_prefix
-+
-     def match_plugin(self, plugin_classes):
-         if len(plugin_classes) > 1:
-             for p in plugin_classes:
-diff --git a/sos/policies/redhat.py b/sos/policies/redhat.py
-index c7449439..2dfe0589 100644
---- a/sos/policies/redhat.py
-+++ b/sos/policies/redhat.py
-@@ -44,6 +44,7 @@ class RedHatPolicy(LinuxPolicy):
-     _rpmv_filter = ["debuginfo", "-devel"]
-     _in_container = False
-     _host_sysroot = '/'
-+    default_scl_prefix = '/opt/rh'
- 
-     def __init__(self, sysroot=None):
-         super(RedHatPolicy, self).__init__(sysroot=sysroot)
--- 
-2.13.6
-
-From 419ebe48ea408b6596ff4d7d9837079dc3057fcf Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Sat, 25 Nov 2017 12:58:16 +0100
-Subject: [PATCH] [postgresql] Call SCL pg_dump with proper path
-
-Also stop storing pg_dump in an auxiliary tempdir but under regular
-sos_commands/postgresql directory.
-
-Resolves: #1154
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/plugins/postgresql.py | 43 ++++++++-----------------------------------
- 1 file changed, 8 insertions(+), 35 deletions(-)
-
-diff --git a/sos/plugins/postgresql.py b/sos/plugins/postgresql.py
-index 45c87e89..9ba696be 100644
---- a/sos/plugins/postgresql.py
-+++ b/sos/plugins/postgresql.py
-@@ -34,8 +34,6 @@ class PostgreSQL(Plugin):
- 
-     packages = ('postgresql',)
- 
--    tmp_dir = None
--
-     password_warn_text = " (password visible in process listings)"
- 
-     option_list = [
-@@ -47,11 +45,9 @@ class PostgreSQL(Plugin):
-         ('dbport', 'database server port number', '', '5432')
-     ]
- 
--    def pg_dump(self, pg_dump_command="pg_dump", filename="sos_pgdump.tar"):
-+    def do_pg_dump(self, scl=None, filename="pgdump.tar"):
-         if self.get_option("dbname"):
-             if self.get_option("password") or "PGPASSWORD" in os.environ:
--                self.tmp_dir = tempfile.mkdtemp()
--                dest_file = os.path.join(self.tmp_dir, filename)
-                 # We're only modifying this for ourself and our children so
-                 # there is no need to save and restore environment variables if
-                 # the user decided to pass the password on the command line.
-@@ -59,30 +55,21 @@ class PostgreSQL(Plugin):
-                     os.environ["PGPASSWORD"] = str(self.get_option("password"))
- 
-                 if self.get_option("dbhost"):
--                    cmd = "%s -U %s -h %s -p %s -w -f %s -F t %s" % (
--                        pg_dump_command,
-+                    cmd = "pg_dump -U %s -h %s -p %s -w -F t %s" % (
-                         self.get_option("username"),
-                         self.get_option("dbhost"),
-                         self.get_option("dbport"),
--                        dest_file,
-                         self.get_option("dbname")
-                     )
-                 else:
--                    cmd = "%s -C -U %s -w -f %s -F t %s " % (
--                        pg_dump_command,
-+                    cmd = "pg_dump -C -U %s -w -F t %s " % (
-                         self.get_option("username"),
--                        dest_file,
-                         self.get_option("dbname")
-                     )
- 
--                result = self.call_ext_prog(cmd)
--                if (result['status'] == 0):
--                    self.add_copy_spec(dest_file)
--                else:
--                    self._log_info(
--                        "Unable to execute pg_dump. Error(%s)" %
--                        (result['output'])
--                    )
-+                if scl is not None:
-+                    cmd = self.convert_cmd_scl(scl, cmd)
-+                self.add_cmd_output(cmd, suggest_filename=filename)
-             else:  # no password in env or options
-                 self.soslog.warning(
-                     "password must be supplied to dump a database."
-@@ -92,18 +79,7 @@ class PostgreSQL(Plugin):
-                 )
- 
-     def setup(self):
--        self.pg_dump()
--
--    def postproc(self):
--        import shutil
--        if self.tmp_dir:
--            try:
--                shutil.rmtree(self.tmp_dir)
--            except shutil.Error:
--                self.soslog.exception(
--                    "Unable to remove %s." % (self.tmp_dir)
--                )
--                self.add_alert("ERROR: Unable to remove %s." % (self.tmp_dir))
-+        self.do_pg_dump()
- 
- 
- class RedHatPostgreSQL(PostgreSQL, SCLPlugin):
-@@ -140,10 +116,7 @@ class RedHatPostgreSQL(PostgreSQL, SCLPlugin):
-         )
- 
-         if scl in self.scls_matched:
--            self.pg_dump(
--                pg_dump_command="scl enable rh-postgresql95 -- pg_dump",
--                filename="sos_scl_pgdump.tar"
--            )
-+            self.do_pg_dump(scl=scl, filename="pgdump-scl-%s.tar" % scl)
- 
- 
- class DebianPostgreSQL(PostgreSQL, DebianPlugin, UbuntuPlugin):
--- 
-2.13.6
-
-From 3f0fa8ef20bcc8ec2fb1ff54815141813d07b033 Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Wed, 20 Dec 2017 11:47:33 +0100
-Subject: [PATCH] [plugins] allow add_cmd_output to collect binary output
-
-If a command output is a true binary data, allow add_cmd_output to
-collect the raw content and dont try to decode it as UTF-8.
-
-Resolves: #1169
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/archive.py            | 11 ++++++++++
- sos/plugins/__init__.py   | 51 +++++++++++++++++++++++++++++++----------------
- sos/plugins/postgresql.py |  4 +++-
- sos/utilities.py          |  5 +++--
- 4 files changed, 51 insertions(+), 20 deletions(-)
-
-diff --git a/sos/archive.py b/sos/archive.py
-index 607312a71..80e27b846 100644
---- a/sos/archive.py
-+++ b/sos/archive.py
-@@ -85,6 +85,9 @@ def add_file(self, src, dest=None):
-     def add_string(self, content, dest):
-         raise NotImplementedError
- 
-+    def add_binary(self, content, dest):
-+        raise NotImplementedError
-+
-     def add_link(self, source, link_name):
-         raise NotImplementedError
- 
-@@ -215,6 +218,14 @@ def add_string(self, content, dest):
-         self.log_debug("added string at '%s' to FileCacheArchive '%s'"
-                        % (src, self._archive_root))
- 
-+    def add_binary(self, content, dest):
-+        dest = self.dest_path(dest)
-+        self._check_path(dest)
-+        f = codecs.open(dest, 'wb', encoding=None)
-+        f.write(content)
-+        self.log_debug("added binary content at '%s' to FileCacheArchive '%s'"
-+                       % (dest, self._archive_root))
-+
-     def add_link(self, source, link_name):
-         dest = self.dest_path(link_name)
-         self._check_path(dest)
-diff --git a/sos/plugins/__init__.py b/sos/plugins/__init__.py
-index 2a8bc516e..0eccd40a1 100644
---- a/sos/plugins/__init__.py
-+++ b/sos/plugins/__init__.py
-@@ -222,6 +222,11 @@ def do_cmd_private_sub(self, cmd):
-             for called in self.executed_commands:
-                 if called['file'] is None:
-                     continue
-+                if called['binary'] == 'yes':
-+                    self._log_warn(("Command output '%s' collected as " +
-+                                    "binary, output isn't scrubbed despite " +
-+                                    "asked for") % called['exe'])
-+                    continue
-                 if fnmatch.fnmatch(called['exe'], globstr):
-                     path = os.path.join(self.commons['cmddir'], called['file'])
-                     readable = self.archive.open_file(path)
-@@ -260,6 +265,11 @@ def do_cmd_output_sub(self, cmd, regexp, subst):
-                 # was anything collected?
-                 if called['file'] is None:
-                     continue
-+                if called['binary'] == 'yes':
-+                    self._log_warn(("Command output '%s' collected as " +
-+                                    "binary, output isn't scrubbed despite " +
-+                                    "asked for") % called['exe'])
-+                    continue
-                 if fnmatch.fnmatch(called['exe'], globstr):
-                     path = os.path.join(self.commons['cmddir'], called['file'])
-                     self._log_debug("applying substitution to '%s'" % path)
-@@ -587,7 +597,8 @@ def getmtime(path):
-                 self.archive.add_link(link_path, _file)
- 
-     def get_command_output(self, prog, timeout=300, stderr=True,
--                           chroot=True, runat=None, env=None):
-+                           chroot=True, runat=None, env=None,
-+                           binary=False):
-         if chroot or self.commons['cmdlineopts'].chroot == 'always':
-             root = self.sysroot
-         else:
-@@ -595,7 +606,7 @@ def get_command_output(self, prog, timeout=300, stderr=True,
- 
-         result = sos_get_command_output(prog, timeout=timeout, stderr=stderr,
-                                         chroot=root, chdir=runat,
--                                        env=env)
-+                                        env=env, binary=binary)
- 
-         if result['status'] == 124:
-             self._log_warn("command '%s' timed out after %ds"
-@@ -611,7 +622,8 @@ def get_command_output(self, prog, timeout=300, stderr=True,
-                                    % (prog.split()[0], root))
-                     return self.get_command_output(prog, timeout=timeout,
-                                                    chroot=False, runat=runat,
--                                                   env=env)
-+                                                   env=env,
-+                                                   binary=binary)
-             self._log_debug("could not run '%s': command not found" % prog)
-         return result
- 
-@@ -632,14 +644,14 @@ def check_ext_prog(self, prog):
- 
-     def _add_cmd_output(self, cmd, suggest_filename=None,
-                         root_symlink=None, timeout=300, stderr=True,
--                        chroot=True, runat=None, env=None):
-+                        chroot=True, runat=None, env=None, binary=False):
-         """Internal helper to add a single command to the collection list."""
-         cmdt = (
-             cmd, suggest_filename,
-             root_symlink, timeout, stderr,
--            chroot, runat, env
-+            chroot, runat, env, binary
-         )
--        _tuplefmt = "('%s', '%s', '%s', %s, '%s', '%s', '%s', '%s')"
-+        _tuplefmt = "('%s', '%s', '%s', %s, '%s', '%s', '%s', '%s', '%s')"
-         _logstr = "packed command tuple: " + _tuplefmt
-         self._log_debug(_logstr % cmdt)
-         self.collect_cmds.append(cmdt)
-@@ -647,7 +659,7 @@ def _add_cmd_output(self, cmd, suggest_filename=None,
- 
-     def add_cmd_output(self, cmds, suggest_filename=None,
-                        root_symlink=None, timeout=300, stderr=True,
--                       chroot=True, runat=None, env=None):
-+                       chroot=True, runat=None, env=None, binary=False):
-         """Run a program or a list of programs and collect the output"""
-         if isinstance(cmds, six.string_types):
-             cmds = [cmds]
-@@ -656,7 +668,7 @@ def add_cmd_output(self, cmds, suggest_filename=None,
-         for cmd in cmds:
-             self._add_cmd_output(cmd, suggest_filename,
-                                  root_symlink, timeout, stderr,
--                                 chroot, runat, env)
-+                                 chroot, runat, env, binary)
- 
-     def get_cmd_output_path(self, name=None, make=True):
-         """Return a path into which this module should store collected
-@@ -712,14 +724,15 @@ def add_string_as_file(self, content, filename):
- 
-     def get_cmd_output_now(self, exe, suggest_filename=None,
-                            root_symlink=False, timeout=300, stderr=True,
--                           chroot=True, runat=None, env=None):
-+                           chroot=True, runat=None, env=None,
-+                           binary=False):
-         """Execute a command and save the output to a file for inclusion in the
-         report.
-         """
-         start = time()
-         result = self.get_command_output(exe, timeout=timeout, stderr=stderr,
-                                          chroot=chroot, runat=runat,
--                                         env=env)
-+                                         env=env, binary=binary)
-         self._log_debug("collected output of '%s' in %s"
-                         % (exe.split()[0], time() - start))
- 
-@@ -729,13 +742,17 @@ def get_cmd_output_now(self, exe, suggest_filename=None,
-             outfn = self._make_command_filename(exe)
- 
-         outfn_strip = outfn[len(self.commons['cmddir'])+1:]
--        self.archive.add_string(result['output'], outfn)
-+        if binary:
-+            self.archive.add_binary(result['output'], outfn)
-+        else:
-+            self.archive.add_string(result['output'], outfn)
-         if root_symlink:
-             self.archive.add_link(outfn, root_symlink)
- 
-         # save info for later
-         # save in our list
--        self.executed_commands.append({'exe': exe, 'file': outfn_strip})
-+        self.executed_commands.append({'exe': exe, 'file': outfn_strip,
-+                                       'binary': 'yes' if binary else 'no'})
-         self.commons['xmlreport'].add_command(cmdline=exe,
-                                               exitcode=result['status'],
-                                               f_stdout=outfn_strip)
-@@ -839,16 +856,16 @@ def _collect_cmd_output(self):
-                 timeout,
-                 stderr,
-                 chroot, runat,
--                env
-+                env, binary
-             ) = progs[0]
--            self._log_debug("unpacked command tuple: " +
--                            "('%s', '%s', '%s', %s, '%s', '%s', '%s', '%s')" %
--                            progs[0])
-+            self._log_debug(("unpacked command tuple: " +
-+                             "('%s', '%s', '%s', %s, '%s', '%s', '%s', '%s'," +
-+                             "'%s')") % progs[0])
-             self._log_info("collecting output of '%s'" % prog)
-             self.get_cmd_output_now(prog, suggest_filename=suggest_filename,
-                                     root_symlink=root_symlink, timeout=timeout,
-                                     stderr=stderr, chroot=chroot, runat=runat,
--                                    env=env)
-+                                    env=env, binary=binary)
- 
-     def _collect_strings(self):
-         for string, file_name in self.copy_strings:
-diff --git a/sos/plugins/postgresql.py b/sos/plugins/postgresql.py
-index 9ba696be2..2e330c9b5 100644
---- a/sos/plugins/postgresql.py
-+++ b/sos/plugins/postgresql.py
-@@ -69,7 +69,9 @@ def do_pg_dump(self, scl=None, filename="pgdump.tar"):
- 
-                 if scl is not None:
-                     cmd = self.convert_cmd_scl(scl, cmd)
--                self.add_cmd_output(cmd, suggest_filename=filename)
-+                self.add_cmd_output(cmd, suggest_filename=filename,
-+                                    binary=True)
-+
-             else:  # no password in env or options
-                 self.soslog.warning(
-                     "password must be supplied to dump a database."
-diff --git a/sos/utilities.py b/sos/utilities.py
-index 55bb1dc96..b5aa571b7 100644
---- a/sos/utilities.py
-+++ b/sos/utilities.py
-@@ -110,7 +110,8 @@ def is_executable(command):
- 
- 
- def sos_get_command_output(command, timeout=300, stderr=False,
--                           chroot=None, chdir=None, env=None):
-+                           chroot=None, chdir=None, env=None,
-+                           binary=False):
-     """Execute a command and return a dictionary of status and output,
-     optionally changing root or current working directory before
-     executing command.
-@@ -164,7 +165,7 @@ def _child_prep_fn():
- 
-     return {
-         'status': p.returncode,
--        'output': stdout.decode('utf-8', 'ignore')
-+        'output': stdout if binary else stdout.decode('utf-8', 'ignore')
-     }
- 
- 
diff --git a/SOURCES/sos-bz1506908-openstack-containerized.patch b/SOURCES/sos-bz1506908-openstack-containerized.patch
deleted file mode 100644
index 167e435..0000000
--- a/SOURCES/sos-bz1506908-openstack-containerized.patch
+++ /dev/null
@@ -1,503 +0,0 @@
-From e63c17d902f99d96cbd4cb2a06d9cbbf8a4d4c18 Mon Sep 17 00:00:00 2001
-From: Martin Schuppert <mschuppert@redhat.com>
-Date: Tue, 7 Nov 2017 18:07:47 +0100
-Subject: [PATCH] [openstack_nova] added missing nova container config
-
-Tripleo Pike opinionated config+log paths to be collected
-for services, when running in containers.
-
-The nova configuration for the nova and placement container
-was included, but the nova configuration libvirt container
-was missing. Also the httpd configs for the nova contaier
-were added.
-
-This is a change to #1130
-
-Signed-off-by: Martin Schuppert mschuppe@redhat.com
----
- sos/plugins/openstack_nova.py | 6 +++++-
- 1 file changed, 5 insertions(+), 1 deletion(-)
-
-diff --git a/sos/plugins/openstack_nova.py b/sos/plugins/openstack_nova.py
-index 1fbfa76a..cdd29760 100644
---- a/sos/plugins/openstack_nova.py
-+++ b/sos/plugins/openstack_nova.py
-@@ -87,7 +87,10 @@ class OpenStackNova(Plugin):
-             "/etc/nova/",
-             self.var_puppet_gen + "/etc/nova/",
-             self.var_puppet_gen + "/etc/my.cnf.d/tripleo.cnf",
--            self.var_puppet_gen + "_placement/var/spool/cron/nova",
-+            self.var_puppet_gen + "/var/spool/cron/nova",
-+            self.var_puppet_gen + "/etc/httpd/conf/",
-+            self.var_puppet_gen + "/etc/httpd/conf.d/",
-+            self.var_puppet_gen + "/etc/httpd/conf.modules.d/*.conf",
-             self.var_puppet_gen + "_placement/etc/nova/",
-             self.var_puppet_gen + "_placement/etc/httpd/conf/",
-             self.var_puppet_gen + "_placement/etc/httpd/conf.d/",
-@@ -96,6 +99,7 @@ class OpenStackNova(Plugin):
-             self.var_puppet_gen + "/../memcached/etc/sysconfig/memcached",
-             self.var_puppet_gen + "_libvirt/etc/libvirt/",
-             self.var_puppet_gen + "_libvirt/etc/my.cnf.d/tripleo.cnf",
-+            self.var_puppet_gen + "_libvirt/etc/nova/",
-             self.var_puppet_gen + "_libvirt/etc/nova/migration/"
-             "authorized_keys",
-             self.var_puppet_gen + "_libvirt/var/lib/nova/.ssh/config",
--- 
-2.13.6
-
-From 410733862a1f5ea1f9666d1fa41a7b5d3390e3c6 Mon Sep 17 00:00:00 2001
-From: Martin Schuppert <mschuppert@redhat.com>
-Date: Wed, 8 Nov 2017 17:57:54 +0100
-Subject: [PATCH] [openstack_[glance|heat|cinder|nova]] limit command run
-
-Collect "glance-manage db_version" and similar commands from the
-four plugins only if the relevant services or containers are
-running. Otherwise the commands get stuck and timeout.
-
-This is an enhancement to #1124 to check for containers + do the
-same for nova + cinder.
-
-Signed-off-by: Martin Schuppert mschuppe@redhat.com
-
-Edited to remove use of shell syntax.
-
-Fixes: #1139
-
-Signed-off-by: Bryn M. Reeves <bmr@redhat.com>
----
- sos/plugins/openstack_cinder.py | 30 ++++++++++---
- sos/plugins/openstack_glance.py | 33 +++++++++++----
- sos/plugins/openstack_heat.py   | 23 ++++++++--
- sos/plugins/openstack_nova.py   | 94 +++++++++++++++++++++++++++--------------
- 4 files changed, 129 insertions(+), 51 deletions(-)
-
-diff --git a/sos/plugins/openstack_cinder.py b/sos/plugins/openstack_cinder.py
-index abfd267b..a023105c 100644
---- a/sos/plugins/openstack_cinder.py
-+++ b/sos/plugins/openstack_cinder.py
-@@ -27,16 +27,34 @@ class OpenStackCinder(Plugin):
-     plugin_name = "openstack_cinder"
-     profiles = ('openstack', 'openstack_controller')
- 
--    option_list = [("db", "gathers openstack cinder db version", "slow",
--                    False)]
--
-     var_puppet_gen = "/var/lib/config-data/puppet-generated/cinder"
- 
-     def setup(self):
--        if self.get_option("db"):
-+
-+        # collect commands output only if the openstack-cinder-api service
-+        # is running
-+        service_status = self.get_command_output(
-+            "systemctl status openstack-cinder-api.service"
-+        )
-+
-+        container_status = self.get_command_output("docker ps")
-+        in_container = False
-+        if container_status['status'] == 0:
-+            for line in container_status['output'].splitlines():
-+                if line.endswith("cinder_api"):
-+                    in_container = True
-+
-+        if (service_status['status'] == 0) or in_container:
-+            cinder_config = ""
-+            # if containerized we need to pass the config to the cont.
-+            if in_container:
-+                cinder_config = "--config-dir " + self.var_puppet_gen + \
-+                                "/etc/cinder/"
-+
-             self.add_cmd_output(
--                "cinder-manage db version",
--                suggest_filename="cinder_db_version")
-+                "cinder-manage " + cinder_config + " db version",
-+                suggest_filename="cinder_db_version"
-+            )
- 
-         self.add_copy_spec([
-             "/etc/cinder/",
-diff --git a/sos/plugins/openstack_glance.py b/sos/plugins/openstack_glance.py
-index fdd789a8..4cdc6dc6 100644
---- a/sos/plugins/openstack_glance.py
-+++ b/sos/plugins/openstack_glance.py
-@@ -54,22 +54,37 @@ class OpenStackGlance(Plugin):
-         if self.get_option("verify"):
-             self.add_cmd_output("rpm -V %s" % ' '.join(self.packages))
- 
--        vars_all = [p in os.environ for p in [
--                    'OS_USERNAME', 'OS_PASSWORD']]
--
--        vars_any = [p in os.environ for p in [
--                    'OS_TENANT_NAME', 'OS_PROJECT_NAME']]
--
-         # collect commands output only if the openstack-glance-api service
-         # is running
-         service_status = self.get_command_output(
--                "systemctl status openstack-glance-api.service"
-+            "systemctl status openstack-glance-api.service"
-         )
--        if service_status['status'] == 0:
-+
-+        container_status = self.get_command_output("docker ps")
-+        in_container = False
-+        if container_status['status'] == 0:
-+            for line in container_status['output'].splitlines():
-+                if line.endswith("cinder_api"):
-+                    in_container = True
-+
-+        if (service_status['status'] == 0) or in_container:
-+            glance_config = ""
-+            # if containerized we need to pass the config to the cont.
-+            if in_container:
-+                glance_config = "--config-dir " + self.var_puppet_gen + \
-+                                "/etc/glance/"
-+
-             self.add_cmd_output(
--                "glance-manage db_version",
-+                "glance-manage " + glance_config + " db_version",
-                 suggest_filename="glance_db_version"
-             )
-+
-+            vars_all = [p in os.environ for p in [
-+                        'OS_USERNAME', 'OS_PASSWORD']]
-+
-+            vars_any = [p in os.environ for p in [
-+                        'OS_TENANT_NAME', 'OS_PROJECT_NAME']]
-+
-             if not (all(vars_all) and any(vars_any)):
-                 self.soslog.warning("Not all environment variables set. "
-                                     "Source the environment file for the user "
-diff --git a/sos/plugins/openstack_heat.py b/sos/plugins/openstack_heat.py
-index de34ed15..e3395fab 100644
---- a/sos/plugins/openstack_heat.py
-+++ b/sos/plugins/openstack_heat.py
-@@ -32,11 +32,26 @@ class OpenStackHeat(Plugin):
- 
-         # collect commands output only if the openstack-heat-api service
-         # is running
--        service_status = self.get_command_output("systemctl status "
--                                                 "openstack-heat-api.service")
--        if service_status['status'] == 0:
-+        service_status = self.get_command_output(
-+            "systemctl status openstack-heat-api.service"
-+        )
-+
-+        container_status = self.get_command_output("docker ps")
-+        in_container = False
-+        if container_status['status'] == 0:
-+            for line in container_status['output'].splitlines():
-+                if line.endswith("cinder_api"):
-+                    in_container = True
-+
-+        if (service_status['status'] == 0) or in_container:
-+            heat_config = ""
-+            # if containerized we need to pass the config to the cont.
-+            if in_container:
-+                heat_config = "--config-dir " + self.var_puppet_gen + \
-+                                "_api/etc/heat/"
-+
-             self.add_cmd_output(
--                "heat-manage db_version",
-+                "heat-manage " + heat_config + " db_version",
-                 suggest_filename="heat_db_version"
-             )
- 
-diff --git a/sos/plugins/openstack_nova.py b/sos/plugins/openstack_nova.py
-index cdd29760..e8839a2a 100644
---- a/sos/plugins/openstack_nova.py
-+++ b/sos/plugins/openstack_nova.py
-@@ -32,40 +32,70 @@ class OpenStackNova(Plugin):
-     var_puppet_gen = "/var/lib/config-data/puppet-generated/nova"
- 
-     def setup(self):
--        # commands we do not need to source the environment file
--        self.add_cmd_output("nova-manage db version")
--        self.add_cmd_output("nova-manage fixed list")
--        self.add_cmd_output("nova-manage floating list")
- 
--        vars_all = [p in os.environ for p in [
--                    'OS_USERNAME', 'OS_PASSWORD']]
--
--        vars_any = [p in os.environ for p in [
--                    'OS_TENANT_NAME', 'OS_PROJECT_NAME']]
-+        # collect commands output only if the openstack-nova-api service
-+        # is running
-+        service_status = self.get_command_output(
-+            "systemctl status openstack-nova-api.service"
-+        )
- 
--        if not (all(vars_all) and any(vars_any)):
--            self.soslog.warning("Not all environment variables set. Source "
--                                "the environment file for the user intended "
--                                "to connect to the OpenStack environment.")
--        else:
--            self.add_cmd_output("nova service-list")
--            self.add_cmd_output("openstack flavor list --long")
--            self.add_cmd_output("nova network-list")
--            self.add_cmd_output("nova list")
--            self.add_cmd_output("nova agent-list")
--            self.add_cmd_output("nova version-list")
--            self.add_cmd_output("nova host-list")
--            self.add_cmd_output("openstack quota show")
--            self.add_cmd_output("openstack hypervisor stats show")
--            # get details for each nova instance
--            cmd = "openstack server list -f value"
--            nova_instances = self.call_ext_prog(cmd)['output']
--            for instance in nova_instances.splitlines():
--                instance = instance.split()[0]
--                cmd = "openstack server show %s" % (instance)
--                self.add_cmd_output(
--                    cmd,
--                    suggest_filename="instance-" + instance + ".log")
-+        container_status = self.get_command_output("docker ps")
-+        in_container = False
-+        if container_status['status'] == 0:
-+            for line in container_status['output'].splitlines():
-+                if line.endswith("cinder_api"):
-+                    in_container = True
-+
-+        if (service_status['status'] == 0) or in_container:
-+            nova_config = ""
-+            # if containerized we need to pass the config to the cont.
-+            if in_container:
-+                nova_config = "--config-dir " + self.var_puppet_gen + \
-+                                "/etc/nova/"
-+
-+            self.add_cmd_output(
-+                "nova-manage " + nova_config + " db version",
-+                suggest_filename="nova-manage_db_version"
-+            )
-+            self.add_cmd_output(
-+                "nova-manage " + nova_config + " fixed list",
-+                suggest_filename="nova-manage_fixed_list"
-+            )
-+            self.add_cmd_output(
-+                "nova-manage " + nova_config + " floating list",
-+                suggest_filename="nova-manage_floating_list"
-+            )
-+
-+            vars_all = [p in os.environ for p in [
-+                        'OS_USERNAME', 'OS_PASSWORD']]
-+
-+            vars_any = [p in os.environ for p in [
-+                        'OS_TENANT_NAME', 'OS_PROJECT_NAME']]
-+
-+            if not (all(vars_all) and any(vars_any)):
-+                self.soslog.warning("Not all environment variables set. "
-+                                    "Source the environment file for the user "
-+                                    "intended to connect to the OpenStack "
-+                                    "environment.")
-+            else:
-+                self.add_cmd_output("nova service-list")
-+                self.add_cmd_output("openstack flavor list --long")
-+                self.add_cmd_output("nova network-list")
-+                self.add_cmd_output("nova list")
-+                self.add_cmd_output("nova agent-list")
-+                self.add_cmd_output("nova version-list")
-+                self.add_cmd_output("nova hypervisor-list")
-+                self.add_cmd_output("openstack quota show")
-+                self.add_cmd_output("openstack hypervisor stats show")
-+                # get details for each nova instance
-+                cmd = "openstack server list -f value"
-+                nova_instances = self.call_ext_prog(cmd)['output']
-+                for instance in nova_instances.splitlines():
-+                    instance = instance.split()[0]
-+                    cmd = "openstack server show %s" % (instance)
-+                    self.add_cmd_output(
-+                        cmd,
-+                        suggest_filename="instance-" + instance + ".log")
- 
-         self.limit = self.get_option("log_size")
-         if self.get_option("all_logs"):
--- 
-2.13.6
-
-From 404951b99d5e2e46fe0757d27b984eb5ff94cf76 Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Wed, 8 Nov 2017 19:30:57 +0100
-Subject: [PATCH] [etcd] fix typo in etcdctl subcmds
-
-subcmds variable set while "subcmd" referred instead.
-
-Resolves: #1141
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/plugins/etcd.py | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/sos/plugins/etcd.py b/sos/plugins/etcd.py
-index 4f072a4c..bd5d10d8 100644
---- a/sos/plugins/etcd.py
-+++ b/sos/plugins/etcd.py
-@@ -40,7 +40,7 @@ class etcd(Plugin, RedHatPlugin):
-            'ls --recursive',
-         ]
- 
--        self.add_cmd_output(['%s %s' % (self.cmd, sub) for sub in subcmd])
-+        self.add_cmd_output(['%s %s' % (self.cmd, sub) for sub in subcmds])
- 
-         urls = [
-             '/v2/stats/leader',
--- 
-2.13.6
-
-From 2140b1611565078c4a6536782c013a525722e0da Mon Sep 17 00:00:00 2001
-From: Martin Schuppert <mschuppert@redhat.com>
-Date: Thu, 21 Dec 2017 08:00:41 +0100
-Subject: [PATCH] [openstack_glance|heat|nova] fix api container names
-
-Container names of glance, heat and nova api was not correct
-when verify if a the container is running.
-
-Signed-off-by: Martin Schuppert <mschuppe@redhat.com>
----
- sos/plugins/openstack_glance.py | 2 +-
- sos/plugins/openstack_heat.py   | 2 +-
- sos/plugins/openstack_nova.py   | 2 +-
- 3 files changed, 3 insertions(+), 3 deletions(-)
-
-diff --git a/sos/plugins/openstack_glance.py b/sos/plugins/openstack_glance.py
-index 4cdc6dc62..d7588abe0 100644
---- a/sos/plugins/openstack_glance.py
-+++ b/sos/plugins/openstack_glance.py
-@@ -64,7 +64,7 @@ def setup(self):
-         in_container = False
-         if container_status['status'] == 0:
-             for line in container_status['output'].splitlines():
--                if line.endswith("cinder_api"):
-+                if line.endswith("glance_api"):
-                     in_container = True
- 
-         if (service_status['status'] == 0) or in_container:
-diff --git a/sos/plugins/openstack_heat.py b/sos/plugins/openstack_heat.py
-index e3395fabd..0cf7c8595 100644
---- a/sos/plugins/openstack_heat.py
-+++ b/sos/plugins/openstack_heat.py
-@@ -40,7 +40,7 @@ def setup(self):
-         in_container = False
-         if container_status['status'] == 0:
-             for line in container_status['output'].splitlines():
--                if line.endswith("cinder_api"):
-+                if line.endswith("heat_api"):
-                     in_container = True
- 
-         if (service_status['status'] == 0) or in_container:
-diff --git a/sos/plugins/openstack_nova.py b/sos/plugins/openstack_nova.py
-index e8839a2a6..951e69cba 100644
---- a/sos/plugins/openstack_nova.py
-+++ b/sos/plugins/openstack_nova.py
-@@ -43,7 +43,7 @@ def setup(self):
-         in_container = False
-         if container_status['status'] == 0:
-             for line in container_status['output'].splitlines():
--                if line.endswith("cinder_api"):
-+                if line.endswith("nova_api"):
-                     in_container = True
- 
-         if (service_status['status'] == 0) or in_container:
-From 6f5295056cbea8220407fe42159b15ea1a135e46 Mon Sep 17 00:00:00 2001
-From: Martin Schuppert <mschuppert@redhat.com>
-Date: Wed, 10 Jan 2018 20:50:46 +0100
-Subject: [PATCH] [plugins] add method to check process list for a named
- process
-
-In openstack plugins we collect data depending if processes with
-different names are there. This introduces a check_process_by_name
-Plugin method to have a consistent way to do this from any plugin
-where needed.
-
-Signed-off-by: Martin Schuppert mschuppe@redhat.com
----
- sos/plugins/__init__.py | 16 ++++++++++++++++
- 1 file changed, 16 insertions(+)
-
-diff --git a/sos/plugins/__init__.py b/sos/plugins/__init__.py
-index 2a8bc516e..3f3a19558 100644
---- a/sos/plugins/__init__.py
-+++ b/sos/plugins/__init__.py
-@@ -1026,6 +1026,22 @@ def report(self):
-         else:
-             return html
- 
-+    def check_process_by_name(self, process):
-+        """Checks if a named process is found in /proc/[0-9]*/cmdline.
-+        Returns either True or False."""
-+        status = False
-+        cmd_line_glob = "/proc/[0-9]*/cmdline"
-+        try:
-+            cmd_line_paths = glob.glob(cmd_line_glob)
-+            for path in cmd_line_paths:
-+                f = open(path, 'r')
-+                cmd_line = f.read().strip()
-+                if process in cmd_line:
-+                    status = True
-+        except IOError as e:
-+            return False
-+        return status
-+
- 
- class RedHatPlugin(object):
-     """Tagging class for Red Hat's Linux distributions"""
-From 12f1c4f851c771a0173f6e00657e1a983af8451c Mon Sep 17 00:00:00 2001
-From: Martin Schuppert <mschuppert@redhat.com>
-Date: Fri, 29 Dec 2017 09:20:33 +0100
-Subject: [PATCH] [openstack_cinder] check for api service running via
- cinder_wsgi
-
-With OSP11 cinder api changed to run via https wsgi. To check for
-running cinder-manage command we also need to take this situation.
-The change checks for cinder_wsgi process.
-
-Signed-off-by: Martin Schuppert <mschuppert@redhat.com>
----
- sos/plugins/__init__.py         | 11 +++++++++++
- sos/plugins/openstack_cinder.py | 27 +++++++++++++++------------
- 2 files changed, 26 insertions(+), 12 deletions(-)
-
-diff --git a/sos/plugins/openstack_cinder.py b/sos/plugins/openstack_cinder.py
-index a023105c8..cc9181efa 100644
---- a/sos/plugins/openstack_cinder.py
-+++ b/sos/plugins/openstack_cinder.py
-@@ -31,26 +31,29 @@ class OpenStackCinder(Plugin):
- 
-     def setup(self):
- 
--        # collect commands output only if the openstack-cinder-api service
--        # is running
--        service_status = self.get_command_output(
--            "systemctl status openstack-cinder-api.service"
--        )
-+        # check if either standalone (cinder-api) or httpd wsgi (cinder_wsgi)
-+        # is up and running
-+        cinder_process = ["cinder_wsgi", "cinder-api"]
-+        in_ps = False
-+        for process in cinder_process:
-+            in_ps = self.check_process_by_name(process)
-+            if in_ps:
-+                break
- 
-         container_status = self.get_command_output("docker ps")
-         in_container = False
-+        cinder_config = ""
-         if container_status['status'] == 0:
-             for line in container_status['output'].splitlines():
-                 if line.endswith("cinder_api"):
-                     in_container = True
-+                    # if containerized we need to pass the config to the cont.
-+                    cinder_config = "--config-dir " + self.var_puppet_gen + \
-+                                    "/etc/cinder/"
-+                    break
- 
--        if (service_status['status'] == 0) or in_container:
--            cinder_config = ""
--            # if containerized we need to pass the config to the cont.
--            if in_container:
--                cinder_config = "--config-dir " + self.var_puppet_gen + \
--                                "/etc/cinder/"
--
-+        # collect commands output if the standalone, wsgi or container is up
-+        if in_ps or in_container:
-             self.add_cmd_output(
-                 "cinder-manage " + cinder_config + " db version",
-                 suggest_filename="cinder_db_version"
diff --git a/SOURCES/sos-bz1509079-vdo.patch b/SOURCES/sos-bz1509079-vdo.patch
deleted file mode 100644
index 3aafd7a..0000000
--- a/SOURCES/sos-bz1509079-vdo.patch
+++ /dev/null
@@ -1,33 +0,0 @@
-From 2f51f6b7478e69d8f9e8e3a31bddccf6af88c720 Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Tue, 7 Nov 2017 14:28:33 +0100
-Subject: [PATCH] [vdo] collect proper sys paths
-
-VDO plugin shall collect /sys/kvdo and /sys/uds
-instead of original /sys/vdo and /sys/albireo
-
-Resolves: #1134
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/plugins/vdo.py | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/sos/plugins/vdo.py b/sos/plugins/vdo.py
-index e6eff4ad..bdf79886 100644
---- a/sos/plugins/vdo.py
-+++ b/sos/plugins/vdo.py
-@@ -27,8 +27,8 @@ class Vdo(Plugin, RedHatPlugin):
-     profiles = ('storage',)
-     packages = ('vdo',)
-     files = (
--        '/sys/vdo',
--        '/sys/albireo',
-+        '/sys/kvdo',
-+        '/sys/uds',
-         '/etc/vdoconf.yml',
-         '/etc/vdoconf.xml'
-     )
--- 
-2.13.6
-
diff --git a/SOURCES/sos-bz1517767-osp-ironic.patch b/SOURCES/sos-bz1517767-osp-ironic.patch
deleted file mode 100644
index 697cf3e..0000000
--- a/SOURCES/sos-bz1517767-osp-ironic.patch
+++ /dev/null
@@ -1,160 +0,0 @@
-From 24dc645e5f09af921bd74bf2808ef8c99dd4cfb9 Mon Sep 17 00:00:00 2001
-From: Dmitry Tantsur <divius.inside@gmail.com>
-Date: Thu, 26 Oct 2017 11:59:26 +0000
-Subject: [PATCH 1/4] [openstack_ironic] collect drivers, ports and port groups
-
-Existing collection of ports is extended with the --long option.
-
-Signed-off-by: Dmitry Tantsur <divius.inside@gmail.com>
----
- sos/plugins/openstack_ironic.py | 4 +++-
- 1 file changed, 3 insertions(+), 1 deletion(-)
-
-diff --git a/sos/plugins/openstack_ironic.py b/sos/plugins/openstack_ironic.py
-index f4e0a97d3..b25bf81b2 100644
---- a/sos/plugins/openstack_ironic.py
-+++ b/sos/plugins/openstack_ironic.py
-@@ -82,8 +82,10 @@ def setup(self):
-                                 "the environment file for the user intended "
-                                 "to connect to the OpenStack environment.")
-         else:
-+            self.add_cmd_output("openstack baremetal driver list --long")
-             self.add_cmd_output("openstack baremetal node list --long")
--            self.add_cmd_output("openstack baremetal port list")
-+            self.add_cmd_output("openstack baremetal port list --long")
-+            self.add_cmd_output("openstack baremetal port group list --long")
- 
-     def postproc(self):
-         protect_keys = [
-
-From a03031587a3470a92f3c4002c7e645b18867ff61 Mon Sep 17 00:00:00 2001
-From: Dmitry Tantsur <divius.inside@gmail.com>
-Date: Thu, 26 Oct 2017 12:01:46 +0000
-Subject: [PATCH 2/4] [openstack_ironic] collect information about
- ironic-inspector
-
-ironic-discoverd was renamed to ironic-inspector in the Liberty
-release. This change adds support for the new name.
-
-Signed-off-by: Dmitry Tantsur <divius.inside@gmail.com>
----
- sos/plugins/openstack_ironic.py | 24 +++++++++++++++++++++++-
- 1 file changed, 23 insertions(+), 1 deletion(-)
-
-diff --git a/sos/plugins/openstack_ironic.py b/sos/plugins/openstack_ironic.py
-index b25bf81b2..123d4b7cc 100644
---- a/sos/plugins/openstack_ironic.py
-+++ b/sos/plugins/openstack_ironic.py
-@@ -77,7 +77,9 @@ def setup(self):
-         vars_any = [p in os.environ for p in [
-                     'OS_TENANT_NAME', 'OS_PROJECT_NAME']]
- 
--        if not (all(vars_all) and any(vars_any)):
-+        self.osc_available = all(vars_all) and any(vars_any)
-+
-+        if not self.osc_available:
-             self.soslog.warning("Not all environment variables set. Source "
-                                 "the environment file for the user intended "
-                                 "to connect to the OpenStack environment.")
-@@ -137,4 +139,24 @@ def setup(self):
-             self.add_journal(units="openstack-ironic-discoverd")
-             self.add_journal(units="openstack-ironic-discoverd-dnsmasq")
- 
-+        # ironic-discoverd was renamed to ironic-inspector in Liberty
-+        self.conf_list.append('/etc/ironic-inspector/*')
-+        self.conf_list.append(self.var_puppet_gen + '/etc/ironic-inspector/*')
-+        self.add_copy_spec('/etc/ironic-inspector/')
-+        self.add_copy_spec(self.var_puppet_gen + '/etc/ironic-inspector/')
-+        self.add_copy_spec('/var/lib/ironic-inspector/')
-+        if self.get_option("all_logs"):
-+            self.add_copy_spec('/var/log/ironic-inspector/')
-+            self.add_copy_spec('/var/log/containers/ironic-inspector/')
-+        else:
-+            self.add_copy_spec('/var/log/ironic-inspector/*.log')
-+            self.add_copy_spec('/var/log/ironic-inspector/ramdisk/')
-+            self.add_copy_spec('/var/log/containers/ironic-inspector/*.log')
-+            self.add_copy_spec('/var/log/containers/ironic-inspector/ramdisk/')
-+
-+        self.add_journal(units="openstack-ironic-inspector-dnsmasq")
-+
-+        if self.osc_available:
-+            self.add_cmd_output("openstack baremetal introspection list")
-+
- # vim: set et ts=4 sw=4 :
-
-From 9c91e28aa7356b96a198606a74d1b39d7ec66891 Mon Sep 17 00:00:00 2001
-From: Dmitry Tantsur <divius.inside@gmail.com>
-Date: Thu, 26 Oct 2017 12:12:25 +0000
-Subject: [PATCH 3/4] [openstack_ironic] collect PXE environment information
-
-This adds two directories, /httpboot and /tftpboot, as well as version
-of iPXE boot images package.
-
-Signed-off-by: Dmitry Tantsur <divius.inside@gmail.com>
----
- sos/plugins/openstack_ironic.py | 7 +++----
- 1 file changed, 3 insertions(+), 4 deletions(-)
-
-diff --git a/sos/plugins/openstack_ironic.py b/sos/plugins/openstack_ironic.py
-index 123d4b7cc..51118634b 100644
---- a/sos/plugins/openstack_ironic.py
-+++ b/sos/plugins/openstack_ironic.py
-@@ -63,10 +63,9 @@ def setup(self):
-                 "/var/log/containers/httpd/ironic-api/*log"
-             ], sizelimit=self.limit)
- 
--        self.add_cmd_output('ls -laRt /var/lib/ironic/')
--        self.add_cmd_output(
--            'ls -laRt ' + self.var_puppet_gen + '/var/lib/ironic/'
--        )
-+        for path in ['/var/lib/ironic', '/httpboot', '/tftpboot']:
-+            self.add_cmd_output('ls -laRt %s' % path)
-+            self.add_cmd_output('ls -laRt %s' % (self.var_puppet_gen + path))
- 
-         if self.get_option("verify"):
-             self.add_cmd_output("rpm -V %s" % ' '.join(self.packages))
-
-From c33d82592b7145c6a26a47280b750eea41eeb63e Mon Sep 17 00:00:00 2001
-From: Dmitry Tantsur <divius.inside@gmail.com>
-Date: Thu, 26 Oct 2017 12:25:08 +0000
-Subject: [PATCH 4/4] [openstack_ironic] collect introspection data for all
- nodes
-
-Signed-off-by: Dmitry Tantsur <divius.inside@gmail.com>
----
- sos/plugins/openstack_ironic.py | 16 ++++++++++++++++
- 1 file changed, 16 insertions(+)
-
-diff --git a/sos/plugins/openstack_ironic.py b/sos/plugins/openstack_ironic.py
-index 51118634b..ddb094e01 100644
---- a/sos/plugins/openstack_ironic.py
-+++ b/sos/plugins/openstack_ironic.py
-@@ -125,6 +125,20 @@ class RedHatIronic(OpenStackIronic, RedHatPlugin):
-         'openstack-ironic-discoverd-ramdisk'
-     ]
- 
-+    def collect_introspection_data(self):
-+        uuids_result = self.call_ext_prog('openstack baremetal node list '
-+                                          '-f value -c UUID')
-+        if uuids_result['status']:
-+            self.soslog.warning('Failed to fetch list of ironic node UUIDs, '
-+                                'introspection data won\'t be collected')
-+            return
-+
-+        uuids = [uuid for uuid in uuids_result['output'].split()
-+                 if uuid.strip()]
-+        for uuid in uuids:
-+            self.add_cmd_output('openstack baremetal introspection '
-+                                'data save %s' % uuid)
-+
-     def setup(self):
-         super(RedHatIronic, self).setup()
- 
-@@ -157,5 +171,7 @@ def setup(self):
- 
-         if self.osc_available:
-             self.add_cmd_output("openstack baremetal introspection list")
-+            if self.get_option("all_logs"):
-+                self.collect_introspection_data()
- 
- # vim: set et ts=4 sw=4 :
diff --git a/SOURCES/sos-bz1519267-haproxy-etcd-tracebacks.patch b/SOURCES/sos-bz1519267-haproxy-etcd-tracebacks.patch
deleted file mode 100644
index c440077..0000000
--- a/SOURCES/sos-bz1519267-haproxy-etcd-tracebacks.patch
+++ /dev/null
@@ -1,158 +0,0 @@
-From 0b30e8f72c3c669455209d15b1eb01de20c7d578 Mon Sep 17 00:00:00 2001
-From: Louis Bouchard <louis@ubuntu.com>
-Date: Wed, 8 Nov 2017 14:15:36 +0100
-Subject: [PATCH] [haproxy] Fix py2 specific import syntax for urlparse
-
-urlparse is now part of urllib in python3. Make sure that
-the proxy behaves correctly on both versions.
-
-Closes: #1137
-
-Signed-off-by: Louis Bouchard <louis@ubuntu.com>
-
-Fixes: #1138
-
-Signed-off-by: Bryn M. Reeves <bmr@redhat.com>
----
- sos/plugins/haproxy.py | 6 +++++-
- 1 file changed, 5 insertions(+), 1 deletion(-)
-
-diff --git a/sos/plugins/haproxy.py b/sos/plugins/haproxy.py
-index 390b6ddb..eb696c9f 100644
---- a/sos/plugins/haproxy.py
-+++ b/sos/plugins/haproxy.py
-@@ -15,9 +15,13 @@
- # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- 
- from sos.plugins import Plugin, RedHatPlugin, DebianPlugin
--from urlparse import urlparse
- from re import match
- 
-+try:
-+    from urllib.parse import urlparse
-+except ImportError:
-+    from urlparse import urlparse
-+
- 
- class HAProxy(Plugin, RedHatPlugin, DebianPlugin):
-     """HAProxy load balancer
--- 
-2.13.6
-
-From ae56ea578fe6f7443d2dce73e2b8fcf2bd5542d1 Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Tue, 5 Dec 2017 12:44:42 +0100
-Subject: [PATCH] [etcd] dont traceback when etcd package isnt installed
-
-catch exception when etcd package isnt installed and we inspect its
-version
-
-Resolves: #1159
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/plugins/etcd.py | 17 +++++++++++------
- 1 file changed, 11 insertions(+), 6 deletions(-)
-
-diff --git a/sos/plugins/etcd.py b/sos/plugins/etcd.py
-index bd5d10d8..d80bbeeb 100644
---- a/sos/plugins/etcd.py
-+++ b/sos/plugins/etcd.py
-@@ -61,11 +61,16 @@ class etcd(Plugin, RedHatPlugin):
-                         return line.split('=')[1].replace('"', '').strip()
-         # If we can't read etcd.conf, assume defaults by etcd version
-         except:
--            ver = self.policy().package_manager.get_pkg_list()['etcd']
--            ver = ver['version'][0]
--            if ver == '2':
--                return 'http://localhost:4001'
--            if ver == '3':
--                return 'http://localhost:2379'
-+            # assume v3 is the default
-+            url = 'http://localhost:2379'
-+            try:
-+                ver = self.policy().package_manager.get_pkg_list()['etcd']
-+                ver = ver['version'][0]
-+                if ver == '2':
-+                    url = 'http://localhost:4001'
-+            except:
-+                # fallback when etcd is not installed
-+                pass
-+            return url
- 
- # vim: et ts=5 sw=4
--- 
-2.13.6
-
-From 119593cff13b1d1d8d34b11fbb92893d70e634d6 Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Tue, 5 Dec 2017 12:52:40 +0100
-Subject: [PATCH] [haproxy] catch exception when parsing haproxy.cfg
-
-catch exception when parsed haproxy.cfg file isnt accessible
-
-Resolves: #1160
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/plugins/haproxy.py | 14 +++++++++-----
- 1 file changed, 9 insertions(+), 5 deletions(-)
-
-diff --git a/sos/plugins/haproxy.py b/sos/plugins/haproxy.py
-index eb696c9f..1807e6d7 100644
---- a/sos/plugins/haproxy.py
-+++ b/sos/plugins/haproxy.py
-@@ -49,11 +49,15 @@ class HAProxy(Plugin, RedHatPlugin, DebianPlugin):
-         # from the next line
-         matched = None
-         provision_ip = None
--        for line in open("/etc/haproxy/haproxy.cfg").read().splitlines():
--            if matched:
--                provision_ip = line.split()[1]
--                break
--            matched = match(".*haproxy\.stats.*", line)
-+        try:
-+            for line in open("/etc/haproxy/haproxy.cfg").read().splitlines():
-+                if matched:
-+                    provision_ip = line.split()[1]
-+                    break
-+                matched = match(".*haproxy\.stats.*", line)
-+        except:
-+            # fallback when the cfg file is not accessible
-+            pass
- 
-         if not provision_ip:
-             return
--- 
-2.13.6
-
-From 68e149809d5b487d0c5800b5a1a005aaad83c7be Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Wed, 15 Nov 2017 17:43:45 +0100
-Subject: [PATCH] [docker] fix copy&paste error in a for cycle
-
-"containers" is an unknown variable, "insp" is the correct one
-
-Resolves: #1148
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/plugins/docker.py | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/sos/plugins/docker.py b/sos/plugins/docker.py
-index fea4b96c..4f6c9882 100644
---- a/sos/plugins/docker.py
-+++ b/sos/plugins/docker.py
-@@ -97,7 +97,7 @@ class Docker(Plugin):
-                     )
-                 )
-             if self.get_option('logs'):
--                for container in containers:
-+                for container in insp:
-                     self.add_cmd_output(
-                         "{0} logs {1}".format(
-                             self.docker_cmd,
--- 
-2.13.6
-
diff --git a/SOURCES/sos-bz1525620-rabbitmq-osp12-containerized.patch b/SOURCES/sos-bz1525620-rabbitmq-osp12-containerized.patch
deleted file mode 100644
index 94ca8d2..0000000
--- a/SOURCES/sos-bz1525620-rabbitmq-osp12-containerized.patch
+++ /dev/null
@@ -1,81 +0,0 @@
-From 2780f6d6ec3a4db72ed5a00aea15ac750394314c Mon Sep 17 00:00:00 2001
-From: Michele Baldessari <michele@acksyn.org>
-Date: Wed, 17 Jan 2018 15:07:53 +0100
-Subject: [PATCH] [rabbitmq] Log collection fixes when rabbitmq runs in a
- container
-
-When rabbitmq is running inside a container all the rabbitmqctl
-commands will fail with some obscure (welsh?) error messages like:
-
-$ rabbitmqctl cluster_status
-erno" ienrirto rt elromgigneart ipnrge sienn td)o _booetr"r,o{r: b<a0d.ar2g.,0[>{
-l_prim_loader,check_file_result,3,[]},{init,get_boot,1,[]},{init,get_boot,2,[]},{init,do_boot,3,[]}]}}
-init terminating in do_boot ()
-
-When run inside containers we need to run any rabbitmqctl command inside
-the container, so that output is properly collected:
-$ docker exec -t rabbitmq-bundle-docker-0 rabbitmqctl cluster_status
-Cluster status of node 'rabbit@controller-0' ...
-[{nodes,[{disc,['rabbit@controller-0','rabbit@controller-1',
-                'rabbit@controller-2']}]},
- {running_nodes,['rabbit@controller-2','rabbit@controller-1',
-                 'rabbit@controller-0']},
- {cluster_name,<<"rabbit@controller-0.localdomain">>},
- {partitions,[]},
- {alarms,[{'rabbit@controller-2',[]},
-          {'rabbit@controller-1',[]},
-          {'rabbit@controller-0',[]}]}]
-
-While we're at it we also collect the logs of any rabbitmq container.
-This is particularly useful in containerized openstack deployments
-where short-lived containers are used to set up the cloud.
-
-Since the info output by 'rabbitmqctl cluster_status' and 'rabbitmqctl
-list_policies' is already contained in 'rabbitmqctl report', we just
-remove the former two commands.
-
-First reported via RHBZ#1525620
-
-Signed-off-by: Michele Baldessari <michele@acksyn.org>
-Signed-off-by: John Eckersberg <jeckersb@redhat.com>
----
- sos/plugins/rabbitmq.py | 23 ++++++++++++++++++++---
- 1 file changed, 20 insertions(+), 3 deletions(-)
-
-diff --git a/sos/plugins/rabbitmq.py b/sos/plugins/rabbitmq.py
-index 2c7e428a..8057dd90 100644
---- a/sos/plugins/rabbitmq.py
-+++ b/sos/plugins/rabbitmq.py
-@@ -28,9 +28,26 @@ class RabbitMQ(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):
-     packages = ('rabbitmq-server',)
- 
-     def setup(self):
--        self.add_cmd_output("rabbitmqctl report")
--        self.add_cmd_output("rabbitmqctl cluster_status")
--        self.add_cmd_output("rabbitmqctl list_policies")
-+        container_status = self.get_command_output(
-+            "docker ps -a --format='{{ .Names }}'")
-+
-+        in_container = False
-+        container_names = []
-+        if container_status['status'] == 0:
-+            for line in container_status['output'].splitlines():
-+                if line.startswith("rabbitmq"):
-+                    in_container = True
-+                    container_names.append(line)
-+
-+        if in_container:
-+            for container in container_names:
-+                self.add_cmd_output('docker logs {0}'.format(container))
-+                self.add_cmd_output(
-+                    'docker exec -t {0} rabbitmqctl report'
-+                    .format(container)
-+                )
-+        else:
-+            self.add_cmd_output("rabbitmqctl report")
- 
-         self.add_copy_spec([
-             "/etc/rabbitmq/*",
--- 
-2.13.6
-
diff --git a/SOURCES/sos-bz1535390-ipa-logs.patch b/SOURCES/sos-bz1535390-ipa-logs.patch
deleted file mode 100644
index 787c162..0000000
--- a/SOURCES/sos-bz1535390-ipa-logs.patch
+++ /dev/null
@@ -1,205 +0,0 @@
-From 15ba40684bf4dceb0cc5ae535212c005c5bb7f9a Mon Sep 17 00:00:00 2001
-From: Martin Basti <mbasti@redhat.com>
-Date: Wed, 17 May 2017 13:45:41 +0200
-Subject: [PATCH] [ipa] add KRA logs
-
-IPA v4 can be installed with KRA subsystem. Adding particular logs to
-plugin.
-
-Closes: #1010
-
-Signed-off-by: Martin Basti <mbasti@redhat.com>
----
- sos/plugins/ipa.py | 4 ++++
- 1 file changed, 4 insertions(+)
-
-diff --git a/sos/plugins/ipa.py b/sos/plugins/ipa.py
-index dc0eb839..03c601d4 100644
---- a/sos/plugins/ipa.py
-+++ b/sos/plugins/ipa.py
-@@ -60,6 +60,10 @@ class Ipa(Plugin, RedHatPlugin):
-                "/var/log/pki/pki-tomcat/ca/transactions",
-                "/var/log/pki/pki-tomcat/catalina.*",
-                "/var/log/pki/pki-ca-spawn.*"
-+               "/var/log/pki/pki-tomcat/kra/debug",
-+               "/var/log/pki/pki-tomcat/kra/system",
-+               "/var/log/pki/pki-tomcat/kra/transactions",
-+               "/var/log/pki/pki-kra-spawn.*"
-             ])
-         elif ipa_version == "v3":
-             self.add_copy_spec([
--- 
-2.13.6
-
-From 4562b41f0d9dcfc07e7fc0ab3b0b253d609a459f Mon Sep 17 00:00:00 2001
-From: Thorsten Scherf <tscherf@redhat.com>
-Date: Mon, 11 Dec 2017 11:04:17 +0100
-Subject: [PATCH] [ipa] use correct PKI directories for tomcat version
-
-The PKI subsystem uses different folders in IPA v3 and v4 for the NSS DB and
-the configuration files. The plugin needs to take this into account.
-
-Closes: #1163
-
-Signed-off-by: Thorsten Scherf <tscherf@redhat.com>
-Signed-off-by: Bryn M. Reeves <bmr@redhat.com>
----
- sos/plugins/ipa.py | 23 ++++++++++++++++++-----
- 1 file changed, 18 insertions(+), 5 deletions(-)
-
-diff --git a/sos/plugins/ipa.py b/sos/plugins/ipa.py
-index 683f8254..fe6ddf08 100644
---- a/sos/plugins/ipa.py
-+++ b/sos/plugins/ipa.py
-@@ -83,6 +83,9 @@ class Ipa(Plugin, RedHatPlugin):
-         self.pki_tomcat_dir_v4 = "/var/lib/pki/pki-tomcat"
-         self.pki_tomcat_dir_v3 = "/var/lib/pki-ca"
- 
-+        self.pki_tomcat_conf_dir_v4 = "/etc/pki/pki-tomcat/ca"
-+        self.pki_tomcat_conf_dir_v3 = "/etc/pki-ca"
-+
-         if self.ipa_server_installed():
-             self._log_debug("IPA server install detected")
- 
-@@ -111,7 +114,6 @@ class Ipa(Plugin, RedHatPlugin):
-             "/etc/dirsrv/slapd-*/schema/99user.ldif",
-             "/etc/hosts",
-             "/etc/named.*",
--            "/etc/pki-ca/CS.cfg",
-             "/etc/ipa/ca.crt",
-             "/etc/ipa/default.conf",
-             "/var/lib/certmonger/requests/[0-9]*",
-@@ -119,22 +121,33 @@ class Ipa(Plugin, RedHatPlugin):
-         ])
- 
-         self.add_forbidden_path("/etc/pki/nssdb/key*")
--        self.add_forbidden_path("/etc/pki-ca/flatfile.txt")
--        self.add_forbidden_path("/etc/pki-ca/password.conf")
--        self.add_forbidden_path("/var/lib/pki-ca/alias/key*")
-         self.add_forbidden_path("/etc/dirsrv/slapd-*/key*")
-         self.add_forbidden_path("/etc/dirsrv/slapd-*/pin.txt")
-         self.add_forbidden_path("/etc/dirsrv/slapd-*/pwdfile.txt")
-         self.add_forbidden_path("/etc/named.keytab")
- 
-+        #  Make sure to use the right PKI config and NSS DB folders
-+        if ipa_version == "v4":
-+            self.pki_tomcat_dir = self.pki_tomcat_dir_v4
-+            self.pki_tomcat_conf_dir = self.pki_tomcat_conf_dir_v4
-+        else:
-+            self.pki_tomcat_dir = self.pki_tomcat_dir_v3
-+            self.pki_tomcat_conf_dir = self.pki_tomcat_conf_dir_v3
-+
-+        self.add_cmd_output("certutil -L -d %s/alias" % self.pki_tomcat_dir)
-+        self.add_copy_spec("%s/CS.cfg" % self.pki_tomcat_conf_dir)
-+        self.add_forbidden_path("%s/alias/key*" % self.pki_tomcat_dir)
-+        self.add_forbidden_path("%s/flatfile.txt" % self.pki_tomcat_conf_dir)
-+        self.add_forbidden_path("%s/password.conf" % self.pki_tomcat_conf_dir)
-+
-         self.add_cmd_output([
-             "ls -la /etc/dirsrv/slapd-*/schema/",
-             "getcert list",
--            "certutil -L -d /var/lib/pki-ca/alias",
-             "certutil -L -d /etc/httpd/alias/",
-             "klist -ket /etc/dirsrv/ds.keytab",
-             "klist -ket /etc/httpd/conf/ipa.keytab"
-         ])
-+
-         for certdb_directory in glob("/etc/dirsrv/slapd-*/"):
-             self.add_cmd_output(["certutil -L -d %s" % certdb_directory])
-         return
--- 
-2.13.6
-
-From 66ef850794ad250bfe5c72795f442f908e1e3e19 Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Fri, 26 Jan 2018 15:11:15 +0100
-Subject: [PATCH] [ipa] fix implicit concatenation of one copy_spec
-
-Missing comma between "/var/log/pki/pki-ca-spawn.*"
-and "/var/log/pki/pki-tomcat/kra/debug"
-
-Resolves: #1195
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/plugins/ipa.py | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/sos/plugins/ipa.py b/sos/plugins/ipa.py
-index fe6ddf08..08f9bcf1 100644
---- a/sos/plugins/ipa.py
-+++ b/sos/plugins/ipa.py
-@@ -59,7 +59,7 @@ class Ipa(Plugin, RedHatPlugin):
-                "/var/log/pki/pki-tomcat/ca/system",
-                "/var/log/pki/pki-tomcat/ca/transactions",
-                "/var/log/pki/pki-tomcat/catalina.*",
--               "/var/log/pki/pki-ca-spawn.*"
-+               "/var/log/pki/pki-ca-spawn.*",
-                "/var/log/pki/pki-tomcat/kra/debug",
-                "/var/log/pki/pki-tomcat/kra/system",
-                "/var/log/pki/pki-tomcat/kra/transactions",
--- 
-2.13.6
-
-From 37c6601ddbc5ab6559a8420ce8f630d00086b1e1 Mon Sep 17 00:00:00 2001
-From: Martin Basti <mbasti@redhat.com>
-Date: Wed, 17 May 2017 13:53:20 +0200
-Subject: [PATCH] [ipa] add apache profile
-
-httpd error_log collected by apache plugin contains useful
-information about IPA API operations
-
-Closes: #1010
-
-Signed-off-by: Martin Basti <mbasti@redhat.com>
----
- sos/plugins/ipa.py | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/sos/plugins/ipa.py b/sos/plugins/ipa.py
-index 3a0565bc..683f8254 100644
---- a/sos/plugins/ipa.py
-+++ b/sos/plugins/ipa.py
-@@ -24,7 +24,7 @@ class Ipa(Plugin, RedHatPlugin):
-     """
- 
-     plugin_name = 'ipa'
--    profiles = ('identity',)
-+    profiles = ('identity', 'apache')
- 
-     ipa_server = False
-     ipa_client = False
--- 
-2.13.6
-
-From 400f61627fe0e45192fd05c7323ee9c96d2cad37 Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Tue, 13 Feb 2018 16:42:59 +0100
-Subject: [PATCH] [ipa] set ipa_version variable before referencing it
-
-In case neither IPA v3 or v4 is installed, ipa_version remains
-uninitialized before referencing it.
-
-Resolves: #1214
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
----
- sos/plugins/ipa.py | 2 ++
- 1 file changed, 2 insertions(+)
-
-diff --git a/sos/plugins/ipa.py b/sos/plugins/ipa.py
-index 08f9bcf1..0d79063f 100644
---- a/sos/plugins/ipa.py
-+++ b/sos/plugins/ipa.py
-@@ -86,6 +86,8 @@ class Ipa(Plugin, RedHatPlugin):
-         self.pki_tomcat_conf_dir_v4 = "/etc/pki/pki-tomcat/ca"
-         self.pki_tomcat_conf_dir_v3 = "/etc/pki-ca"
- 
-+        ipa_version = None
-+
-         if self.ipa_server_installed():
-             self._log_debug("IPA server install detected")
- 
--- 
-2.13.6
-
diff --git a/SOURCES/sos-bz1539038-etcd-private-keys.patch b/SOURCES/sos-bz1539038-etcd-private-keys.patch
deleted file mode 100644
index a764cc2..0000000
--- a/SOURCES/sos-bz1539038-etcd-private-keys.patch
+++ /dev/null
@@ -1,28 +0,0 @@
-From b9ace2788c2f9c327ac519fa007bc08470f4fd2b Mon Sep 17 00:00:00 2001
-From: Jake Hunsaker <jhunsake@redhat.com>
-Date: Thu, 25 Jan 2018 17:43:12 -0500
-Subject: [PATCH] [etcd] Do not collect private etcd keys
-
-Prevents sos from capturing the /etc/etcd/ca directory and its contents,
-which is primarily private keys and the like.
-
-Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
----
- sos/plugins/etcd.py | 1 +
- 1 file changed, 1 insertion(+)
-
-diff --git a/sos/plugins/etcd.py b/sos/plugins/etcd.py
-index d80bbeeb..884aa1bd 100644
---- a/sos/plugins/etcd.py
-+++ b/sos/plugins/etcd.py
-@@ -31,6 +31,7 @@ class etcd(Plugin, RedHatPlugin):
-     def setup(self):
-         etcd_url = self.get_etcd_url()
- 
-+        self.add_forbidden_path('/etc/etcd/ca')
-         self.add_copy_spec('/etc/etcd')
- 
-         subcmds = [
--- 
-2.13.6
-
diff --git a/SOURCES/sos-bz1568882-openstack-octavia-plugin.patch b/SOURCES/sos-bz1568882-openstack-octavia-plugin.patch
deleted file mode 100644
index 8849e15..0000000
--- a/SOURCES/sos-bz1568882-openstack-octavia-plugin.patch
+++ /dev/null
@@ -1,136 +0,0 @@
-From 188b8b70ea5790af3fe1ca9fc7eea28e83a149a6 Mon Sep 17 00:00:00 2001
-From: Pavel Moravec <pmoravec@redhat.com>
-Date: Mon, 2 Apr 2018 15:18:22 +0200
-Subject: [PATCH] [openstack_octavia] Add new plugin
-
-This adds a plugin for OpenStack Octavia as a network load balancing.
-
-Resolves: #1257
-
-Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
-Signed-off-by: Bryn M. Reeves <bmr@redhat.com>
----
- sos/plugins/openstack_octavia.py | 111 +++++++++++++++++++++++++++++++++++++++
- 1 file changed, 111 insertions(+)
- create mode 100644 sos/plugins/openstack_octavia.py
-
-diff --git a/sos/plugins/openstack_octavia.py b/sos/plugins/openstack_octavia.py
-new file mode 100644
-index 00000000..43bf322a
---- /dev/null
-+++ b/sos/plugins/openstack_octavia.py
-@@ -0,0 +1,111 @@
-+# This program is free software; you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation; either version 2 of the License, or
-+# (at your option) any later version.
-+
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+# GNU General Public License for more details.
-+
-+# You should have received a copy of the GNU General Public License along
-+# with this program; if not, write to the Free Software Foundation, Inc.,
-+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-+
-+from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
-+
-+
-+class OpenStackOctavia(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):
-+    """Openstack Octavia"""
-+
-+    plugin_name = "openstack_octavia"
-+    profiles = ('openstack', 'openstack_controller')
-+    packages = ('openstack-octavia-common',)
-+
-+    var_puppet_gen = "/var/lib/config-data/puppet-generated/octavia"
-+
-+    def setup(self):
-+        # configs
-+        self.add_copy_spec([
-+            "/etc/sysconfig/network-scripts/ifcfg-o-hm0",
-+            "/etc/logrotate.d/openstack-octavia",
-+            "/etc/octavia/*",
-+            "/var/lib/octavia",
-+            self.var_puppet_gen + "/etc/octavia/conf.d",
-+            self.var_puppet_gen + "/etc/octavia/octavia.conf",
-+            self.var_puppet_gen + "/etc/my.cnf.d/tripleo.cnf",
-+        ])
-+
-+        # don't collect certificates
-+        self.add_forbidden_path("/etc/octavia/certs/")
-+
-+        # logs
-+        self.limit = self.get_option("log_size")
-+        if self.get_option("all_logs"):
-+            self.add_copy_spec([
-+                "/var/log/containers/httpd/octavia-api/*",
-+                "/var/log/containers/octavia/*",
-+                "/var/log/octavia/*",
-+            ], sizelimit=self.limit)
-+        else:
-+            self.add_copy_spec([
-+                "/var/log/containers/httpd/octavia-api/*.log",
-+                "/var/log/containers/octavia/*.log",
-+                "/var/log/octavia/*.log",
-+            ], sizelimit=self.limit)
-+
-+        # commands
-+        self.add_cmd_output([
-+            "openstack loadbalancer list",
-+            "openstack loadbalancer amphora list",
-+            "openstack loadbalancer healthmonitor list",
-+            "openstack loadbalancer l7policy list",
-+            "openstack loadbalancer listener list",
-+            "openstack loadbalancer pool list",
-+            "openstack loadbalancer quota list",
-+        ])
-+
-+        # get details from each loadbalancer
-+        cmd = "openstack loadbalancer list -f value -c id"
-+        loadbalancers = self.call_ext_prog(cmd)['output']
-+        for loadbalancer in loadbalancers.splitlines():
-+            loadbalancer = loadbalancer.split()[0]
-+            self.add_cmd_output(
-+                "openstack loadbalancer show %s" % (loadbalancer)
-+            )
-+
-+        # get details from each l7policy
-+        cmd = "openstack loadbalancer l7policy list -f value -c id"
-+        l7policies = self.call_ext_prog(cmd)['output']
-+        for l7policy in l7policies.splitlines():
-+            l7policy = l7policy.split()[0]
-+            self.add_cmd_output(
-+                "openstack loadbalancer l7rule list %s" % (l7policy)
-+            )
-+
-+        # get details from each pool
-+        cmd = "openstack loadbalancer pool list -f value -c id"
-+        pools = self.call_ext_prog(cmd)['output']
-+        for pool in pools.splitlines():
-+            pool = pool.split()[0]
-+            self.add_cmd_output(
-+                "openstack loadbalancer member list %s" % (pool)
-+            )
-+
-+        if self.get_option("verify"):
-+            self.add_cmd_output("rpm -V %s" % ' '.join(self.packages))
-+
-+    def postproc(self):
-+        protect_keys = [
-+            "ca_private_key_passphrase", "heartbeat_key", "password",
-+            "connection"
-+        ]
-+        regexp = r"((?m)^\s*(%s)\s*=\s*)(.*)" % "|".join(protect_keys)
-+
-+        self.do_path_regex_sub("/etc/octavia/*", regexp, r"\1*********")
-+        self.do_path_regex_sub(
-+            self.var_puppet_gen + "/etc/octavia/*",
-+            regexp, r"\1*********"
-+        )
-+
-+# vim: set et ts=4 sw=4 :
--- 
-2.13.6
-
diff --git a/SOURCES/sos-bz1568884-kernel-dont-collect-timer.patch b/SOURCES/sos-bz1568884-kernel-dont-collect-timer.patch
deleted file mode 100644
index 7aa61e9..0000000
--- a/SOURCES/sos-bz1568884-kernel-dont-collect-timer.patch
+++ /dev/null
@@ -1,69 +0,0 @@
-From 04dce26bd12888b924425beefa449a07b683021a Mon Sep 17 00:00:00 2001
-From: =?UTF-8?q?Renaud=20M=C3=A9trich?= <rmetrich@redhat.com>
-Date: Fri, 13 Apr 2018 09:24:30 +0200
-Subject: [PATCH] [kernel] Disable gathering /proc/timer* statistics
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-Disable gathering /proc/timer* statistics by default, a new option
-'kernel.with-timer' enables gathering these.
-
-If /proc/timer_list is huge, then kernel will experience issues with
-processing all the timers since it needs to spin in a tight loop inside
-the kernel.
-
-We have tried to fix it from kernel side, added touch_nmi_watchdog() to
-silence softlockups, cond_resched() to fix RCU stall issue but with such
-huge number of timers the RHEL7 kernel is still hangs.
-It can reproduced somehow on upstream kernel (however, there will be
-workqueue lockups).
-
-We came to conclusion that reading /proc/timer_list should be disabled
-in sosreport. Since /proc/timer_stats is tight to /proc/timer_list, both
-are disabled at the same time.
-
-Resolves: #1268
-
-Signed-off-by: Renaud Métrich <rmetrich@redhat.com>
-Signed-off-by: Bryn M. Reeves <bmr@redhat.com>
----
- sos/plugins/kernel.py | 10 +++++++++-
- 1 file changed, 9 insertions(+), 1 deletion(-)
-
-diff --git a/sos/plugins/kernel.py b/sos/plugins/kernel.py
-index 97ef7862..6c2f509c 100644
---- a/sos/plugins/kernel.py
-+++ b/sos/plugins/kernel.py
-@@ -27,6 +27,10 @@ class Kernel(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):
- 
-     sys_module = '/sys/module'
- 
-+    option_list = [
-+        ("with-timer", "gather /proc/timer* statistics", "slow", False)
-+    ]
-+
-     def setup(self):
-         # compat
-         self.add_cmd_output("uname -a", root_symlink="uname")
-@@ -83,7 +87,6 @@ class Kernel(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):
-             "/proc/driver",
-             "/proc/sys/kernel/tainted",
-             "/proc/softirqs",
--            "/proc/timer*",
-             "/proc/lock*",
-             "/proc/misc",
-             "/var/log/dmesg",
-@@ -92,4 +95,9 @@ class Kernel(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):
-             clocksource_path + "current_clocksource"
-         ])
- 
-+        if self.get_option("with-timer"):
-+            # This can be very slow, depending on the number of timers,
-+            # and may also cause softlockups
-+            self.add_copy_spec("/proc/timer*")
-+
- # vim: set et ts=4 sw=4 :
--- 
-2.13.6
-
diff --git a/SOURCES/sos-bz1568960-ovirt-provider-ovn.patch b/SOURCES/sos-bz1568960-ovirt-provider-ovn.patch
deleted file mode 100644
index 5f343bc..0000000
--- a/SOURCES/sos-bz1568960-ovirt-provider-ovn.patch
+++ /dev/null
@@ -1,70 +0,0 @@
-From 523cdb9393059534f97b5b249498aca67c2c45d6 Mon Sep 17 00:00:00 2001
-From: Leon Goldberg <lgoldber@redhat.com>
-Date: Sun, 25 Feb 2018 17:01:11 +0200
-Subject: [PATCH] [ovirt-provider-ovn] Introducing a plugin for
- ovirt-provider-ovn.
-
-Resolves: #1227.
-
-Signed-off-by: Leon Goldberg <leon.otium@gmail.com>
-
-Fixed member formatting & added VIM mode line.
-
-Signed-off-by: Bryn M. Reeves <bmr@redhat.com>
----
- sos/plugins/ovirt_provider_ovn.py | 43 +++++++++++++++++++++++++++++++++++++++
- 1 file changed, 43 insertions(+)
- create mode 100644 sos/plugins/ovirt_provider_ovn.py
-
-diff --git a/sos/plugins/ovirt_provider_ovn.py b/sos/plugins/ovirt_provider_ovn.py
-new file mode 100644
-index 00000000..a075509e
---- /dev/null
-+++ b/sos/plugins/ovirt_provider_ovn.py
-@@ -0,0 +1,43 @@
-+# Copyright (C) 2018 Red Hat, Inc.,
-+
-+# This program is free software; you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation; either version 2 of the License, or
-+# (at your option) any later version.
-+
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+# GNU General Public License for more details.
-+
-+# You should have received a copy of the GNU General Public License along
-+# with this program; if not, write to the Free Software Foundation, Inc.,
-+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-+from sos.plugins import Plugin, RedHatPlugin
-+
-+
-+class OvirtProviderOvn(Plugin, RedHatPlugin):
-+    """oVirt OVN Provider
-+    """
-+
-+    packages = ('ovirt-provider-ovn',)
-+    plugin_name = 'ovirt_provider_ovn'
-+    profiles = ('virt',)
-+
-+    provider_conf = '/etc/ovirt-provider-ovn/ovirt-provider-ovn.conf'
-+
-+    def setup(self):
-+        self.add_copy_spec(self.provider_conf)
-+        self.add_copy_spec('/etc/ovirt-provider-ovn/conf.d/*')
-+
-+        spec = '/var/log/ovirt-provider-ovn.log'
-+        if self.get_option('all_logs'):
-+            spec += '*'
-+        self.add_copy_spec(spec, sizelimit=self.get_option('log_size'))
-+
-+    def postproc(self):
-+        self.do_file_sub(self.provider_conf,
-+                         r'(ovirt-sso-client-secret\s*=\s*)(.*)',
-+                         r'\1*************')
-+
-+# vim: set et ts=4 sw=4 :
--- 
-2.13.6
-
diff --git a/SOURCES/sos-bz1580525-ovn-plugins.patch b/SOURCES/sos-bz1580525-ovn-plugins.patch
deleted file mode 100644
index 2f479a8..0000000
--- a/SOURCES/sos-bz1580525-ovn-plugins.patch
+++ /dev/null
@@ -1,262 +0,0 @@
-From 10bb3b2d6f6817bb4ae96ba58865bff294e54f8d Mon Sep 17 00:00:00 2001
-From: Mark Michelson <mmichels@redhat.com>
-Date: Thu, 17 May 2018 16:43:58 -0400
-Subject: [PATCH 1/2] [openvswitch] Add additional logging paths.
-
-Openvswitch's logs can be located in alternate paths depending on the
-installation. OpenStack installations, for instance, do not use the same
-directories for logs as typical package installations.
-
-Related: #1259
-
-Signed-off-by: Mark Michelson <mmichels@redhat.com>
-Signed-off-by: Bryn M. Reeves <bmr@redhat.com>
----
- sos/plugins/openvswitch.py | 19 ++++++++++++++++---
- 1 file changed, 16 insertions(+), 3 deletions(-)
-
-diff --git a/sos/plugins/openvswitch.py b/sos/plugins/openvswitch.py
-index 6f1b41ac..ab908fbc 100644
---- a/sos/plugins/openvswitch.py
-+++ b/sos/plugins/openvswitch.py
-@@ -16,6 +16,9 @@
- 
- from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
- 
-+from os.path import join as path_join
-+from os import environ
-+
- 
- class OpenVSwitch(Plugin):
-     """ OpenVSwitch networking
-@@ -28,12 +31,22 @@ class OpenVSwitch(Plugin):
-         all_logs = self.get_option("all_logs")
-         limit = self.get_option("log_size")
- 
-+        log_dirs = [
-+            '/var/log/containers/openvswitch/',
-+            '/var/log/openvswitch/',
-+            '/usr/local/var/log/openvswitch/',
-+        ]
-+
-+        if environ.get('OVS_LOGDIR'):
-+            log_dirs.append(environ.get('OVS_LOGDIR'))
-+
-         if not all_logs:
--            self.add_copy_spec("/var/log/openvswitch/*.log",
-+            self.add_copy_spec([path_join(ld, '*.log') for ld in log_dirs],
-                                sizelimit=limit)
--        else:
--            self.add_copy_spec("/var/log/openvswitch/",
-+            self.add_copy_spec([path_join(ld, '*.log') for ld in log_dirs],
-                                sizelimit=limit)
-+        else:
-+            self.add_copy_spec(log_dirs, sizelimit=limit)
- 
-         self.add_copy_spec([
-             "/var/run/openvswitch/ovsdb-server.pid",
--- 
-2.13.6
-
-
-From ac33925bac828246229a93da0f9b4e9218bca6b8 Mon Sep 17 00:00:00 2001
-From: Mark Michelson <mmichels@redhat.com>
-Date: Thu, 17 May 2018 16:50:40 -0400
-Subject: [PATCH 2/2] [ovn] Add new plugins for Open Virtual Network
-
-OVN is a sub-project of Openvswitch used to define logical networks for
-a cluster of OVS instances. The two plugins defined here are
-"ovn-central", which runs on a single server, and "ovn-host" which runs
-on each of the hypervisors running OVS.
-
-These plugins gather runtime information about the configured virtual
-networks.
-
-Resolves: #1259
-
-Signed-off-by: Mark Michelson <mmichels@redhat.com>
-Signed-off-by: Bryn M. Reeves <bmr@redhat.com>
----
- sos/plugins/ovn_central.py | 105 +++++++++++++++++++++++++++++++++++++++++++++
- sos/plugins/ovn_host.py    |  57 ++++++++++++++++++++++++
- 2 files changed, 162 insertions(+)
- create mode 100644 sos/plugins/ovn_central.py
- create mode 100644 sos/plugins/ovn_host.py
-
-diff --git a/sos/plugins/ovn_central.py b/sos/plugins/ovn_central.py
-new file mode 100644
-index 00000000..23c1faeb
---- /dev/null
-+++ b/sos/plugins/ovn_central.py
-@@ -0,0 +1,105 @@
-+# Copyright (C) 2018 Mark Michelson <mmichels@redhat.com>
-+
-+# This program is free software; you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation; either version 2 of the License, or
-+# (at your option) any later version.
-+
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+# GNU General Public License for more details.
-+
-+# You should have received a copy of the GNU General Public License along
-+# with this program; if not, write to the Free Software Foundation, Inc.,
-+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-+
-+from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
-+import json
-+import os
-+import six
-+
-+
-+class OVNCentral(Plugin):
-+    """ OVN Northd
-+    """
-+    plugin_name = "ovn_central"
-+    profiles = ('network', 'virt')
-+
-+    def add_database_output(self, filename, cmds, ovn_cmd, skip=[]):
-+        try:
-+            with open(filename, 'r') as f:
-+                try:
-+                    db = json.load(f)
-+                except:
-+                    # If json can't be parsed, then exit early
-+                    self._log_error("Cannot parse JSON file %s" % filename)
-+                    return
-+                try:
-+                    for table in six.iterkeys(db['tables']):
-+                        if table not in skip:
-+                            cmds.append('%s list %s' % (ovn_cmd, table))
-+                except AttributeError:
-+                    self._log_error("DB schema %s has no 'tables' key" %
-+                                    filename)
-+                    return
-+        except IOError as ex:
-+            self._log_error("Could not open DB schema file %s: %s" % (filename,
-+                                                                      ex))
-+            return
-+
-+    def setup(self):
-+        ovs_rundir = os.environ.get('OVS_RUNDIR')
-+        for pidfile in ['ovnnb_db.pid', 'ovnsb_db.pid', 'ovn-northd.pid']:
-+            self.add_copy_spec([
-+                os.path.join('/var/lib/openvswitch/ovn', pidfile),
-+                os.path.join('/usr/local/var/run/openvswitch', pidfile),
-+                os.path.join('/var/run/openvswitch/', pidfile),
-+                os.path.join('/run/openvswitch/', pidfile),
-+            ])
-+
-+            if ovs_rundir:
-+                self.add_copy_spec(os.path.join(ovs_rundir, pidfile))
-+
-+        # Some user-friendly versions of DB output
-+        cmds = [
-+            'ovn-sbctl lflow-list',
-+            'ovn-nbctl get-ssl',
-+            'ovn-nbctl get-connection',
-+            'ovn-sbctl get-ssl',
-+            'ovn-sbctl get-connection',
-+        ]
-+
-+        schema_dir = '/usr/share/openvswitch'
-+
-+        self.add_database_output(os.path.join(schema_dir, 'ovn-nb.ovsschema'),
-+                                 cmds, 'ovn-nbctl')
-+        self.add_database_output(os.path.join(schema_dir, 'ovn-sb.ovsschema'),
-+                                 cmds, 'ovn-sbctl', ['Logical_Flow'])
-+
-+        self.add_cmd_output(cmds)
-+
-+        self.add_copy_spec("/etc/sysconfig/ovn-northd")
-+
-+        ovs_dbdir = os.environ.get('OVS_DBDIR')
-+        for dbfile in ['ovnnb_db.db', 'ovnsb_db.db']:
-+            self.add_copy_spec([
-+                os.path.join('/var/lib/openvswitch/ovn', dbfile),
-+                os.path.join('/usr/local/etc/openvswitch', dbfile),
-+                os.path.join('/etc/openvswitch', dbfile),
-+                os.path.join('/var/lib/openvswitch', dbfile),
-+            ])
-+            if ovs_dbdir:
-+                self.add_copy_spec(os.path.join(ovs_dbdir, dbfile))
-+
-+        self.add_journal(units="ovn-northd")
-+
-+
-+class RedHatOVNCentral(OVNCentral, RedHatPlugin):
-+
-+    packages = ('openvswitch-ovn-central', )
-+
-+
-+class DebianOVNCentral(OVNCentral, DebianPlugin, UbuntuPlugin):
-+
-+    packages = ('ovn-central', )
-diff --git a/sos/plugins/ovn_host.py b/sos/plugins/ovn_host.py
-new file mode 100644
-index 00000000..496f35bb
---- /dev/null
-+++ b/sos/plugins/ovn_host.py
-@@ -0,0 +1,57 @@
-+# Copyright (C) 2018 Mark Michelson <mmichels@redhat.com>
-+
-+# This program is free software; you can redistribute it and/or modify
-+# it under the terms of the GNU General Public License as published by
-+# the Free Software Foundation; either version 2 of the License, or
-+# (at your option) any later version.
-+
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+# GNU General Public License for more details.
-+
-+# You should have received a copy of the GNU General Public License along
-+# with this program; if not, write to the Free Software Foundation, Inc.,
-+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-+
-+import os
-+from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
-+
-+
-+class OVNHost(Plugin):
-+    """ OVN Controller
-+    """
-+    plugin_name = "ovn_host"
-+    profiles = ('network', 'virt')
-+
-+    def setup(self):
-+        pidfile = 'ovn-controller.pid'
-+        pid_paths = [
-+                '/var/lib/openvswitch/ovn',
-+                '/usr/local/var/run/openvswitch',
-+                '/var/run/openvswitch',
-+                '/run/openvswitch'
-+        ]
-+        if os.environ.get('OVS_RUNDIR'):
-+            pid_paths.append(os.environ.get('OVS_RUNDIR'))
-+        self.add_copy_spec([os.path.join(pp, pidfile) for pp in pid_paths])
-+
-+        self.add_copy_spec('/etc/sysconfig/ovn-controller')
-+
-+        self.add_cmd_output([
-+            'ovs-ofctl -O OpenFlow13 dump-flows br-int',
-+            'ovs-vsctl list-br',
-+            'ovs-vsctl list OpenVswitch',
-+        ])
-+
-+        self.add_journal(units="ovn-controller")
-+
-+
-+class RedHatOVNHost(OVNHost, RedHatPlugin):
-+
-+    packages = ('openvswitch-ovn-host', )
-+
-+
-+class DebianOVNHost(OVNHost, DebianPlugin, UbuntuPlugin):
-+
-+    packages = ('ovn-host', )
--- 
-2.13.6
-
diff --git a/SOURCES/sos-bz1580526-docker-backport.patch b/SOURCES/sos-bz1580526-docker-backport.patch
deleted file mode 100644
index 38fa6b1..0000000
--- a/SOURCES/sos-bz1580526-docker-backport.patch
+++ /dev/null
@@ -1,104 +0,0 @@
-From 5f67a7f6cd532a4dea57a0c8ece51f1734ed8cbe Mon Sep 17 00:00:00 2001
-From: Takayoshi Kimura <takayoshi@gmail.com>
-Date: Mon, 22 May 2017 10:45:13 +0900
-Subject: [PATCH] [docker] Enable log timestamp in docker logs
-
-Enable docker logs -t option to make container troubleshooting
-easier.  Container logs sometimes have different TZ timestapms
-without TZ info or no timestamps at all.
-
-Fixes: #1013
-
-Signed-off-by: Takayoshi Kimura <tkimura@redhat.com>
-Signed-off-by: Bryn M. Reeves <bmr@redhat.com>
----
- sos/plugins/docker.py | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/sos/plugins/docker.py b/sos/plugins/docker.py
-index 4f6c98828..50fdc9c28 100644
---- a/sos/plugins/docker.py
-+++ b/sos/plugins/docker.py
-@@ -99,7 +99,7 @@ def setup(self):
-             if self.get_option('logs'):
-                 for container in insp:
-                     self.add_cmd_output(
--                        "{0} logs {1}".format(
-+                        "{0} logs -t {1}".format(
-                             self.docker_cmd,
-                             container
-                         )
-
-From 9fc6eb234682ce695c2b1b1608f755928d441585 Mon Sep 17 00:00:00 2001
-From: Jake Hunsaker <jhunsake@redhat.com>
-Date: Wed, 6 Sep 2017 14:44:20 -0400
-Subject: [PATCH] [docker] Collect new registry config location
-
-For the Red Hat release of docker, registries are now configured in
-/etc/containers/registries.conf instead of /etc/sysconfig/docker.
-
-This patch adds collection of /etc/containers to collect registry
-configuration files. /etc/sysconfig/docker can still be used for daemon
-configuration, so that is still collected.
-
-Resolves: #1096
-
-Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
-Signed-off-by: Bryn M. Reeves <bmr@redhat.com>
----
- sos/plugins/docker.py | 3 ++-
- 1 file changed, 2 insertions(+), 1 deletion(-)
-
-diff --git a/sos/plugins/docker.py b/sos/plugins/docker.py
-index 50fdc9c28..80b6af30a 100644
---- a/sos/plugins/docker.py
-+++ b/sos/plugins/docker.py
-@@ -114,7 +114,8 @@ def setup(self):
-         super(RedHatDocker, self).setup()
- 
-         self.add_copy_spec([
--            "/etc/udev/rules.d/80-docker.rules"
-+            "/etc/udev/rules.d/80-docker.rules",
-+            "/etc/containers/"
-         ])
- 
- 
-From 15d7237527fe26da95070b48e7aafd0597dc245f Mon Sep 17 00:00:00 2001
-From: Jake Hunsaker <jhunsake@redhat.com>
-Date: Fri, 15 Dec 2017 12:11:57 -0500
-Subject: [PATCH] [docker] Collect daemon.json for all OSes
-
-The Red Hat packaging of docker now also supports using
-/etc/docker/daemon.json for configuring docker.
-
-This moves collect of daemon.json from being Ubuntu-only to being for
-any OS installation.
-
-Resolves: #1168
-
-Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
-Signed-off-by: Bryn M. Reeves <bmr@redhat.com>
----
- sos/plugins/docker.py | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/sos/plugins/docker.py b/sos/plugins/docker.py
-index 80b6af30a..f372845c3 100644
---- a/sos/plugins/docker.py
-+++ b/sos/plugins/docker.py
-@@ -36,6 +36,7 @@ class Docker(Plugin):
- 
-     def setup(self):
-         self.add_copy_spec([
-+            "/etc/docker/daemon.json",
-             "/var/lib/docker/repositories-*"
-         ])
- 
-@@ -127,7 +128,6 @@ def setup(self):
-         super(UbuntuDocker, self).setup()
-         self.add_copy_spec([
-             "/etc/default/docker",
--            "/etc/docker/daemon.json",
-             "/var/run/docker/libcontainerd/containerd/events.log"
-         ])
- 
diff --git a/SOURCES/sos-bz1584548-traceback-memory.patch b/SOURCES/sos-bz1584548-traceback-memory.patch
deleted file mode 100644
index 23f10eb..0000000
--- a/SOURCES/sos-bz1584548-traceback-memory.patch
+++ /dev/null
@@ -1,38 +0,0 @@
-From 2c04f56675b36f59ab4d4cb455efeb7d71badb74 Mon Sep 17 00:00:00 2001
-From: Kazuhito Hagio <k-hagio@ab.jp.nec.com>
-Date: Tue, 22 May 2018 15:29:47 -0400
-Subject: [PATCH] [logs] rework: collect journalctl verbosed logs with
- --all-logs only
-
-commit 7bc90f618f0549279544d26effae2e5197d85e2b ("[logs] collect
-journalctl verbosed logs with --all-logs only") did not suppress
-the journalctl verbosed logs by default.  Let's rework it.
-
-Related: #1225
-Resolves: #1310
-
-Signed-off-by: Kazuhito Hagio <k-hagio@ab.jp.nec.com>
-Signed-off-by: Bryn M. Reeves <bmr@redhat.com>
----
- sos/plugins/logs.py | 3 +--
- 1 file changed, 1 insertion(+), 2 deletions(-)
-
-diff --git a/sos/plugins/logs.py b/sos/plugins/logs.py
-index 1d18bad8..c67d3e66 100644
---- a/sos/plugins/logs.py
-+++ b/sos/plugins/logs.py
-@@ -34,10 +34,10 @@ class Logs(Plugin):
-         self.add_copy_spec("/var/log/boot.log", sizelimit=self.limit)
-         self.add_copy_spec("/var/log/cloud-init*", sizelimit=self.limit)
-         self.add_journal(boot="this")
--        self.add_journal(boot="this", allfields=True, output="verbose")
-         self.add_cmd_output("journalctl --disk-usage")
- 
-         if self.get_option('all_logs'):
-+            self.add_journal(boot="this", allfields=True, output="verbose")
-             syslog_conf = self.join_sysroot("/etc/syslog.conf")
-             logs = self.do_regex_find_all("^\S+\s+(-?\/.*$)\s+", syslog_conf)
-             if self.is_installed("rsyslog") \
--- 
-2.13.6
-
diff --git a/SOURCES/sos-bz1594327-archive-encryption.patch b/SOURCES/sos-bz1594327-archive-encryption.patch
new file mode 100644
index 0000000..51c419f
--- /dev/null
+++ b/SOURCES/sos-bz1594327-archive-encryption.patch
@@ -0,0 +1,262 @@
+From 7b475f1da0f843b20437896737be04cc1c7bbc0a Mon Sep 17 00:00:00 2001
+From: Jake Hunsaker <jhunsake@redhat.com>
+Date: Fri, 25 May 2018 13:38:27 -0400
+Subject: [PATCH] [sosreport] Add mechanism to encrypt final archive
+
+Adds an option to encrypt the resulting archive that sos generates.
+There are two methods for doing so:
+
+	--encrypt-key	Uses a key-pair for asymmetric encryption
+	--encrypt-pass  Uses a password for symmetric encryption
+
+For key-pair encryption, the key-to-be-used must be imported into the
+root user's keyring, as gpg does not allow for the use of keyfiles.
+
+If the encryption process fails, sos will not abort as the unencrypted
+archive will have already been created. The assumption being that the
+archive is still of use and/or the user has another means of encrypting
+it.
+
+Resolves: #1320
+
+Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
+Signed-off-by: Bryn M. Reeves <bmr@redhat.com>
+---
+ man/en/sosreport.1     | 28 ++++++++++++++++++++++
+ sos/__init__.py        | 10 ++++----
+ sos/archive.py         | 63 ++++++++++++++++++++++++++++++++++++++++++++++----
+ sos/sosreport.py       | 20 ++++++++++++++--
+ tests/archive_tests.py |  3 ++-
+ 5 files changed, 113 insertions(+), 11 deletions(-)
+
+diff --git a/man/en/sosreport.1 b/man/en/sosreport.1
+index b0adcd8bb..b6051edc1 100644
+--- a/man/en/sosreport.1
++++ b/man/en/sosreport.1
+@@ -22,6 +22,8 @@ sosreport \- Collect and package diagnostic and support data
+           [--log-size]\fR
+           [--all-logs]\fR
+           [-z|--compression-type method]\fR
++          [--encrypt-key KEY]\fR
++          [--encrypt-pass PASS]\fR
+           [--experimental]\fR
+           [-h|--help]\fR
+ 
+@@ -120,6 +122,32 @@ increase the size of reports.
+ .B \-z, \--compression-type METHOD
+ Override the default compression type specified by the active policy.
+ .TP
++.B \--encrypt-key KEY
++Encrypts the resulting archive that sosreport produces using GPG. KEY must be
++an existing key in the user's keyring as GPG does not allow for keyfiles.
++KEY can be any value accepted by gpg's 'recipient' option.
++
++Note that the user running sosreport must match the user owning the keyring
++from which keys will be obtained. In particular this means that if sudo is
++used to run sosreport, the keyring must also be set up using sudo
++(or direct shell access to the account).
++
++Users should be aware that encrypting the final archive will result in sos
++using double the amount of temporary disk space - the encrypted archive must be
++written as a separate, rather than replacement, file within the temp directory
++that sos writes the archive to. However, since the encrypted archive will be
++the same size as the original archive, there is no additional space consumption
++once the temporary directory is removed at the end of execution.
++
++This means that only the encrypted archive is present on disk after sos
++finishes running.
++
++If encryption fails for any reason, the original unencrypted archive is
++preserved instead.
++.TP
++.B \--encrypt-pass PASS
++The same as \--encrypt-key, but use the provided PASS for symmetric encryption
++rather than key-pair encryption.
+ .TP
+ .B \--batch
+ Generate archive without prompting for interactive input.
+diff --git a/sos/__init__.py b/sos/__init__.py
+index ef4524c60..cd9779bdc 100644
+--- a/sos/__init__.py
++++ b/sos/__init__.py
+@@ -45,10 +45,10 @@ def _default(msg):
+ _arg_names = [
+     'add_preset', 'alloptions', 'all_logs', 'batch', 'build', 'case_id',
+     'chroot', 'compression_type', 'config_file', 'desc', 'debug', 'del_preset',
+-    'enableplugins', 'experimental', 'label', 'list_plugins', 'list_presets',
+-    'list_profiles', 'log_size', 'noplugins', 'noreport', 'note',
+-    'onlyplugins', 'plugopts', 'preset', 'profiles', 'quiet', 'sysroot',
+-    'threads', 'tmp_dir', 'verbosity', 'verify'
++    'enableplugins', 'encrypt_key', 'encrypt_pass', 'experimental', 'label',
++    'list_plugins', 'list_presets', 'list_profiles', 'log_size', 'noplugins',
++    'noreport', 'note', 'onlyplugins', 'plugopts', 'preset', 'profiles',
++    'quiet', 'sysroot', 'threads', 'tmp_dir', 'verbosity', 'verify'
+ ]
+ 
+ #: Arguments with non-zero default values
+@@ -84,6 +84,8 @@ class SoSOptions(object):
+     del_preset = ""
+     desc = ""
+     enableplugins = []
++    encrypt_key = None
++    encrypt_pass = None
+     experimental = False
+     label = ""
+     list_plugins = False
+diff --git a/sos/archive.py b/sos/archive.py
+index e153c09ad..263e3dd3f 100644
+--- a/sos/archive.py
++++ b/sos/archive.py
+@@ -142,11 +142,12 @@ class FileCacheArchive(Archive):
+     _archive_root = ""
+     _archive_name = ""
+ 
+-    def __init__(self, name, tmpdir, policy, threads):
++    def __init__(self, name, tmpdir, policy, threads, enc_opts):
+         self._name = name
+         self._tmp_dir = tmpdir
+         self._policy = policy
+         self._threads = threads
++        self.enc_opts = enc_opts
+         self._archive_root = os.path.join(tmpdir, name)
+         with self._path_lock:
+             os.makedirs(self._archive_root, 0o700)
+@@ -384,12 +385,65 @@ def finalize(self, method):
+                       os.stat(self._archive_name).st_size))
+         self.method = method
+         try:
+-            return self._compress()
++            res = self._compress()
+         except Exception as e:
+             exp_msg = "An error occurred compressing the archive: "
+             self.log_error("%s %s" % (exp_msg, e))
+             return self.name()
+ 
++        if self.enc_opts['encrypt']:
++            try:
++                return self._encrypt(res)
++            except Exception as e:
++                exp_msg = "An error occurred encrypting the archive:"
++                self.log_error("%s %s" % (exp_msg, e))
++                return res
++        else:
++            return res
++
++    def _encrypt(self, archive):
++        """Encrypts the compressed archive using GPG.
++
++        If encryption fails for any reason, it should be logged by sos but not
++        cause execution to stop. The assumption is that the unencrypted archive
++        would still be of use to the user, and/or that the end user has another
++        means of securing the archive.
++
++        Returns the name of the encrypted archive, or raises an exception to
++        signal that encryption failed and the unencrypted archive name should
++        be used.
++        """
++        arc_name = archive.replace("sosreport-", "secured-sosreport-")
++        arc_name += ".gpg"
++        enc_cmd = "gpg --batch -o %s " % arc_name
++        env = None
++        if self.enc_opts["key"]:
++            # need to assume a trusted key here to be able to encrypt the
++            # archive non-interactively
++            enc_cmd += "--trust-model always -e -r %s " % self.enc_opts["key"]
++            enc_cmd += archive
++        if self.enc_opts["password"]:
++            # prevent change of gpg options using a long password, but also
++            # prevent the addition of quote characters to the passphrase
++            passwd = "%s" % self.enc_opts["password"].replace('\'"', '')
++            env = {"sos_gpg": passwd}
++            enc_cmd += "-c --passphrase-fd 0 "
++            enc_cmd = "/bin/bash -c \"echo $sos_gpg | %s\"" % enc_cmd
++            enc_cmd += archive
++        r = sos_get_command_output(enc_cmd, timeout=0, env=env)
++        if r["status"] == 0:
++            return arc_name
++        elif r["status"] == 2:
++            if self.enc_opts["key"]:
++                msg = "Specified key not in keyring"
++            else:
++                msg = "Could not read passphrase"
++        else:
++            # TODO: report the actual error from gpg. Currently, we cannot as
++            # sos_get_command_output() does not capture stderr
++            msg = "gpg exited with code %s" % r["status"]
++        raise Exception(msg)
++
+ 
+ # Compatibility version of the tarfile.TarFile class. This exists to allow
+ # compatibility with PY2 runtimes that lack the 'filter' parameter to the
+@@ -468,8 +522,9 @@ class TarFileArchive(FileCacheArchive):
+     method = None
+     _with_selinux_context = False
+ 
+-    def __init__(self, name, tmpdir, policy, threads):
+-        super(TarFileArchive, self).__init__(name, tmpdir, policy, threads)
++    def __init__(self, name, tmpdir, policy, threads, enc_opts):
++        super(TarFileArchive, self).__init__(name, tmpdir, policy, threads,
++                                             enc_opts)
+         self._suffix = "tar"
+         self._archive_name = os.path.join(tmpdir, self.name())
+ 
+diff --git a/sos/sosreport.py b/sos/sosreport.py
+index 60802617c..00c3e8110 100644
+--- a/sos/sosreport.py
++++ b/sos/sosreport.py
+@@ -316,6 +316,13 @@ def _parse_args(args):
+     preset_grp.add_argument("--del-preset", type=str, action="store",
+                             help="Delete the named command line preset")
+ 
++    encrypt_grp = parser.add_mutually_exclusive_group()
++    encrypt_grp.add_argument("--encrypt-key",
++                             help="Encrypt the final archive using a GPG "
++                                  "key-pair")
++    encrypt_grp.add_argument("--encrypt-pass",
++                             help="Encrypt the final archive using a password")
++
+     return parser.parse_args(args)
+ 
+ 
+@@ -431,16 +438,25 @@ def get_temp_file(self):
+         return self.tempfile_util.new()
+ 
+     def _set_archive(self):
++        enc_opts = {
++            'encrypt': True if (self.opts.encrypt_pass or
++                                self.opts.encrypt_key) else False,
++            'key': self.opts.encrypt_key,
++            'password': self.opts.encrypt_pass
++        }
++
+         archive_name = os.path.join(self.tmpdir,
+                                     self.policy.get_archive_name())
+         if self.opts.compression_type == 'auto':
+             auto_archive = self.policy.get_preferred_archive()
+             self.archive = auto_archive(archive_name, self.tmpdir,
+-                                        self.policy, self.opts.threads)
++                                        self.policy, self.opts.threads,
++                                        enc_opts)
+ 
+         else:
+             self.archive = TarFileArchive(archive_name, self.tmpdir,
+-                                          self.policy, self.opts.threads)
++                                          self.policy, self.opts.threads,
++                                          enc_opts)
+ 
+         self.archive.set_debug(True if self.opts.debug else False)
+ 
+diff --git a/tests/archive_tests.py b/tests/archive_tests.py
+index b4dd8d0ff..e5b329b5f 100644
+--- a/tests/archive_tests.py
++++ b/tests/archive_tests.py
+@@ -19,7 +19,8 @@ class TarFileArchiveTest(unittest.TestCase):
+ 
+     def setUp(self):
+         self.tmpdir = tempfile.mkdtemp()
+-        self.tf = TarFileArchive('test', self.tmpdir, Policy(), 1)
++        enc = {'encrypt': False}
++        self.tf = TarFileArchive('test', self.tmpdir, Policy(), 1, enc)
+ 
+     def tearDown(self):
+         shutil.rmtree(self.tmpdir)
diff --git a/SOURCES/sos-bz1596494-cds-on-rhui3.patch b/SOURCES/sos-bz1596494-cds-on-rhui3.patch
new file mode 100644
index 0000000..5c55040
--- /dev/null
+++ b/SOURCES/sos-bz1596494-cds-on-rhui3.patch
@@ -0,0 +1,33 @@
+From 62f4affbc9fb6da06dd5707e9aa659d206352e87 Mon Sep 17 00:00:00 2001
+From: Pavel Moravec <pmoravec@redhat.com>
+Date: Tue, 3 Jul 2018 13:02:09 +0200
+Subject: [PATCH] [rhui] Fix detection of CDS for RHUI3
+
+Detection of CDS node on RHUI 3 cant rely on deprecated pulp-cds package
+but rather on rhui-mirrorlist one.
+
+Resolves: #1375
+
+Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
+Signed-off-by: Bryn M. Reeves <bmr@redhat.com>
+---
+ sos/plugins/rhui.py | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/sos/plugins/rhui.py b/sos/plugins/rhui.py
+index 2b1e2baa7..459a89831 100644
+--- a/sos/plugins/rhui.py
++++ b/sos/plugins/rhui.py
+@@ -22,7 +22,11 @@ class Rhui(Plugin, RedHatPlugin):
+     files = [rhui_debug_path]
+ 
+     def setup(self):
+-        if self.is_installed("pulp-cds"):
++        cds_installed = [
++            self.is_installed("pulp-cds"),
++            self.is_installed("rhui-mirrorlist")
++        ]
++        if any(cds_installed):
+             cds = "--cds"
+         else:
+             cds = ""
diff --git a/SOURCES/sos-bz1597532-stat-isblk.patch b/SOURCES/sos-bz1597532-stat-isblk.patch
new file mode 100644
index 0000000..6200ffd
--- /dev/null
+++ b/SOURCES/sos-bz1597532-stat-isblk.patch
@@ -0,0 +1,36 @@
+From 4127d02f00561b458398ce2b5ced7ae853b23227 Mon Sep 17 00:00:00 2001
+From: Bryan Quigley <bryan.quigley@canonical.com>
+Date: Mon, 2 Jul 2018 16:48:21 -0400
+Subject: [PATCH] [archive] fix stat typo
+
+They're just missing the S_ in front of them so if that code gets
+reached it fails.
+
+Fixes: #1373
+Resolves: #1374
+
+Signed-off-by: Bryan Quigley <bryan.quigley@canonical.com>
+Signed-off-by: Bryn M. Reeves <bmr@redhat.com>
+---
+ sos/archive.py | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/sos/archive.py b/sos/archive.py
+index 263e3dd3f..fdf6f9a80 100644
+--- a/sos/archive.py
++++ b/sos/archive.py
+@@ -204,10 +204,10 @@ def _check_path(self, src, path_type, dest=None, force=False):
+ 
+         def is_special(mode):
+             return any([
+-                stat.ISBLK(mode),
+-                stat.ISCHR(mode),
+-                stat.ISFIFO(mode),
+-                stat.ISSOCK(mode)
++                stat.S_ISBLK(mode),
++                stat.S_ISCHR(mode),
++                stat.S_ISFIFO(mode),
++                stat.S_ISSOCK(mode)
+             ])
+ 
+         if force:
diff --git a/SOURCES/sos-bz1600158-rhv-log-collector-analyzer.patch b/SOURCES/sos-bz1600158-rhv-log-collector-analyzer.patch
new file mode 100644
index 0000000..6930786
--- /dev/null
+++ b/SOURCES/sos-bz1600158-rhv-log-collector-analyzer.patch
@@ -0,0 +1,66 @@
+From d297b2116fd864c65dba76b343f5101466c0eeb7 Mon Sep 17 00:00:00 2001
+From: Douglas Schilling Landgraf <dougsland@gmail.com>
+Date: Tue, 10 Jul 2018 12:03:41 -0400
+Subject: [PATCH] [rhv-log-collector-analyzer] Add new plugin for RHV
+
+This commit adds the plugin rhv-log-collector-analyzer, it will
+collect:
+
+- Output of rhv-log-collector-analyer --json
+- Generated HTML file from --live
+
+Signed-off-by: Douglas Schilling Landgraf <dougsland@redhat.com>
+---
+ sos/plugins/rhv_analyzer.py | 40 +++++++++++++++++++++++++++++++++++++
+ 1 file changed, 40 insertions(+)
+ create mode 100644 sos/plugins/rhv_analyzer.py
+
+diff --git a/sos/plugins/rhv_analyzer.py b/sos/plugins/rhv_analyzer.py
+new file mode 100644
+index 00000000..7c233a0b
+--- /dev/null
++++ b/sos/plugins/rhv_analyzer.py
+@@ -0,0 +1,40 @@
++# Copyright (C) 2018 Red Hat, Inc.
++#
++# This file is part of the sos project: https://github.com/sosreport/sos
++#
++# This copyrighted material is made available to anyone wishing to use,
++# modify, copy, or redistribute it subject to the terms and conditions of
++# version 2 of the GNU General Public License.
++#
++# See the LICENSE file in the source distribution for further information.
++
++from sos.plugins import Plugin, RedHatPlugin
++
++
++class RhvLogCollectorAnalyzer(Plugin, RedHatPlugin):
++    """RHV Log Collector Analyzer"""
++
++    packages = ('rhv-log-collector-analyzer',)
++
++    plugin_name = 'RhvLogCollectorAnalyzer'
++    profiles = ('virt',)
++
++    def setup(self):
++        tool_name = 'rhv-log-collector-analyzer'
++        report = "{dircmd}/analyzer-report.html".format(
++            dircmd=self.get_cmd_output_path()
++        )
++
++        self.add_cmd_output(
++            "{tool_name}"
++            " --live"
++            " --html={report}".format(
++                report=report, tool_name=tool_name)
++        )
++
++        self.add_cmd_output(
++            "{tool_name}"
++            " --json".format(tool_name=tool_name)
++        )
++
++# vim: expandtab tabstop=4 shiftwidth=4
+-- 
+2.17.1
+
diff --git a/SOURCES/sos-bz1608384-archive-name-sanitize.patch b/SOURCES/sos-bz1608384-archive-name-sanitize.patch
new file mode 100644
index 0000000..4c48384
--- /dev/null
+++ b/SOURCES/sos-bz1608384-archive-name-sanitize.patch
@@ -0,0 +1,52 @@
+From bc650cd161548159e551ddc201596bf19b1865d0 Mon Sep 17 00:00:00 2001
+From: Pavel Moravec <pmoravec@redhat.com>
+Date: Fri, 27 Jul 2018 08:56:37 +0200
+Subject: [PATCH] [policies] sanitize report label
+
+similarly like we sanitize case id, we should sanitize report label
+to e.g. exclude spaces from final tarball name.
+
+Resolves: #1389
+
+Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
+---
+ sos/policies/__init__.py | 9 +++------
+ 1 file changed, 3 insertions(+), 6 deletions(-)
+
+diff --git a/sos/policies/__init__.py b/sos/policies/__init__.py
+index 7b301dec..65d8aac6 100644
+--- a/sos/policies/__init__.py
++++ b/sos/policies/__init__.py
+@@ -408,7 +408,7 @@ No changes will be made to system configuration.
+             date=date,
+             rand=rand
+         )
+-        return time.strftime(nstr)
++        return self.sanitize_filename(time.strftime(nstr))
+ 
+     # for some specific binaries like "xz", we need to determine package
+     # providing it; that is policy specific. By default return the binary
+@@ -726,8 +726,8 @@ class LinuxPolicy(Policy):
+         """Returns the name usd in the pre_work step"""
+         return self.host_name()
+ 
+-    def sanitize_case_id(self, case_id):
+-        return re.sub(r"[^-a-z,A-Z.0-9]", "", case_id)
++    def sanitize_filename(self, name):
++        return re.sub(r"[^-a-z,A-Z.0-9]", "", name)
+ 
+     def lsmod(self):
+         """Return a list of kernel module names as strings.
+@@ -755,9 +755,6 @@ class LinuxPolicy(Policy):
+         if cmdline_opts.case_id:
+             self.case_id = cmdline_opts.case_id
+ 
+-        if self.case_id:
+-            self.case_id = self.sanitize_case_id(self.case_id)
+-
+         return
+ 
+ 
+-- 
+2.17.1
+
diff --git a/SOURCES/sos-bz1609135-ceph-dont-collect-tmp-mnt.patch b/SOURCES/sos-bz1609135-ceph-dont-collect-tmp-mnt.patch
new file mode 100644
index 0000000..400c654
--- /dev/null
+++ b/SOURCES/sos-bz1609135-ceph-dont-collect-tmp-mnt.patch
@@ -0,0 +1,44 @@
+From dfed1abf3cac691cfc669bbf4e07e58e2e637776 Mon Sep 17 00:00:00 2001
+From: Pavel Moravec <pmoravec@redhat.com>
+Date: Fri, 27 Jul 2018 08:27:45 +0200
+Subject: [PATCH] [apparmor,ceph] fix typo in add_forbidden_path
+
+commit 29a40b7 removed leading '/' from two forbidden paths
+
+Resolves: #1388
+
+Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
+---
+ sos/plugins/apparmor.py | 2 +-
+ sos/plugins/ceph.py     | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/sos/plugins/apparmor.py b/sos/plugins/apparmor.py
+index c4c64baf..e239c0b5 100644
+--- a/sos/plugins/apparmor.py
++++ b/sos/plugins/apparmor.py
+@@ -26,7 +26,7 @@ class Apparmor(Plugin, UbuntuPlugin):
+         self.add_forbidden_path([
+             "/etc/apparmor.d/cache",
+             "/etc/apparmor.d/libvirt/libvirt*",
+-            "etc/apparmor.d/abstractions"
++            "/etc/apparmor.d/abstractions"
+         ])
+ 
+         self.add_cmd_output([
+diff --git a/sos/plugins/ceph.py b/sos/plugins/ceph.py
+index 10e48b62..ed6816b2 100644
+--- a/sos/plugins/ceph.py
++++ b/sos/plugins/ceph.py
+@@ -77,7 +77,7 @@ class Ceph(Plugin, RedHatPlugin, UbuntuPlugin):
+             "/var/lib/ceph/mon/*",
+             # Excludes temporary ceph-osd mount location like
+             # /var/lib/ceph/tmp/mnt.XXXX from sos collection.
+-            "var/lib/ceph/tmp/*mnt*",
++            "/var/lib/ceph/tmp/*mnt*",
+             "/etc/ceph/*bindpass*"
+         ])
+ 
+-- 
+2.17.1
+
diff --git a/SOURCES/sos-bz1613806-rhosp-lsof-optional.patch b/SOURCES/sos-bz1613806-rhosp-lsof-optional.patch
new file mode 100644
index 0000000..9a555bb
--- /dev/null
+++ b/SOURCES/sos-bz1613806-rhosp-lsof-optional.patch
@@ -0,0 +1,113 @@
+From a55680e6c8ac87fdf4ee3100717001c1f6f6a08b Mon Sep 17 00:00:00 2001
+From: Pavel Moravec <pmoravec@redhat.com>
+Date: Thu, 9 Aug 2018 08:59:53 +0200
+Subject: [PATCH 1/3] [process] make lsof execution optional
+
+Make calling of lsof command optional (but enabled by default).
+
+Also remove "collect lsof-threads when --all-logs" as all-logs
+has nothing in common.
+
+Resolves: #1394
+
+Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
+---
+ sos/plugins/process.py | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/sos/plugins/process.py b/sos/plugins/process.py
+index 755eec8d..d1c455a5 100644
+--- a/sos/plugins/process.py
++++ b/sos/plugins/process.py
+@@ -17,6 +17,7 @@ class Process(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):
+     profiles = ('system',)
+ 
+     option_list = [
++        ("lsof", "gathers information on all open files", "slow", True),
+         ("lsof-threads", "gathers threads' open file info if supported",
+          "slow", False)
+     ]
+@@ -35,9 +36,10 @@ class Process(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):
+ 
+         self.add_cmd_output("ps auxwww", root_symlink="ps")
+         self.add_cmd_output("pstree", root_symlink="pstree")
+-        self.add_cmd_output("lsof -b +M -n -l -c ''", root_symlink="lsof")
++        if self.get_option("lsof"):
++            self.add_cmd_output("lsof -b +M -n -l -c ''", root_symlink="lsof")
+ 
+-        if self.get_option("lsof-threads") or self.get_option("all_logs"):
++        if self.get_option("lsof-threads"):
+             self.add_cmd_output("lsof -b +M -n -l")
+ 
+         self.add_cmd_output([
+-- 
+2.17.1
+
+From 48a1a00685c680ba9fbd5c9b10377e8d0551a926 Mon Sep 17 00:00:00 2001
+From: Pavel Moravec <pmoravec@redhat.com>
+Date: Thu, 9 Aug 2018 18:11:38 +0200
+Subject: [PATCH 2/3] [policies] RHOSP preset with -k process.lsof=off
+
+Make lsof calls on OSP systems disabled by default.
+
+Relevant to: #1395
+
+Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
+---
+ sos/policies/redhat.py | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/sos/policies/redhat.py b/sos/policies/redhat.py
+index cfbf7808..ee687d46 100644
+--- a/sos/policies/redhat.py
++++ b/sos/policies/redhat.py
+@@ -192,6 +192,8 @@ ENV_HOST_SYSROOT = 'HOST'
+ _opts_verify = SoSOptions(verify=True)
+ _opts_all_logs = SoSOptions(all_logs=True)
+ _opts_all_logs_verify = SoSOptions(all_logs=True, verify=True)
++_opts_all_logs_no_lsof = SoSOptions(all_logs=True,
++                                    plugopts=['process.lsof=off'])
+ 
+ RHEL_RELEASE_STR = "Red Hat Enterprise Linux"
+ 
+@@ -219,7 +221,7 @@ rhel_presets = {
+                         opts=_opts_verify),
+     RHEL: PresetDefaults(name=RHEL, desc=RHEL_DESC),
+     RHOSP: PresetDefaults(name=RHOSP, desc=RHOSP_DESC, note=NOTE_SIZE,
+-                          opts=_opts_all_logs),
++                          opts=_opts_all_logs_no_lsof),
+     RHOCP: PresetDefaults(name=RHOCP, desc=RHOCP_DESC, note=NOTE_SIZE_TIME,
+                           opts=_opts_all_logs_verify),
+     RH_SATELLITE: PresetDefaults(name=RH_SATELLITE, desc=RH_SATELLITE_DESC,
+-- 
+2.17.1
+
+From 84c30742254a536f70bb4217756416bcf0e8a51b Mon Sep 17 00:00:00 2001
+From: Pavel Moravec <pmoravec@redhat.com>
+Date: Thu, 9 Aug 2018 18:14:56 +0200
+Subject: [PATCH 3/3] [policies] enable RHOSP preset by presence of
+ rhosp-release package
+
+Resolves: #1395
+
+Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
+---
+ sos/policies/redhat.py | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/sos/policies/redhat.py b/sos/policies/redhat.py
+index ee687d46..5bfbade2 100644
+--- a/sos/policies/redhat.py
++++ b/sos/policies/redhat.py
+@@ -315,6 +315,8 @@ No changes will be made to system configuration.
+         # Package based checks
+         if self.pkg_by_name("satellite-common") is not None:
+             return self.find_preset(RH_SATELLITE)
++        if self.pkg_by_name("rhosp-release") is not None:
++            return self.find_preset(RHOSP)
+ 
+         # Vanilla RHEL is default
+         return self.find_preset(RHEL)
+-- 
+2.17.1
+
diff --git a/SOURCES/sos-bz1616030-etcd-kube-osp-3-10.patch b/SOURCES/sos-bz1616030-etcd-kube-osp-3-10.patch
new file mode 100644
index 0000000..b08251c
--- /dev/null
+++ b/SOURCES/sos-bz1616030-etcd-kube-osp-3-10.patch
@@ -0,0 +1,325 @@
+From 6372a7f7f09511d864aa6bd894109d937f4fda65 Mon Sep 17 00:00:00 2001
+From: Jake Hunsaker <jhunsake@redhat.com>
+Date: Thu, 12 Jul 2018 12:36:25 -0400
+Subject: [PATCH 1/3] [kubernetes|etcd] Support OpenShift 3.10 deployments
+
+The 3.10 version of OCP changes the deployment configurations for etcd
+and kubernetes components, and additionally changes the way the etcdctl
+command is called when running in a static pod. Update these plugins to
+support this new deployment style.
+
+Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
+---
+ sos/plugins/etcd.py       |  11 ++-
+ sos/plugins/kubernetes.py | 148 +++++++++++++++++++-------------------
+ 2 files changed, 83 insertions(+), 76 deletions(-)
+
+diff --git a/sos/plugins/etcd.py b/sos/plugins/etcd.py
+index c343f750..c8ee3849 100644
+--- a/sos/plugins/etcd.py
++++ b/sos/plugins/etcd.py
+@@ -10,6 +10,7 @@
+ # See the LICENSE file in the source distribution for further information.
+ 
+ from sos.plugins import Plugin, RedHatPlugin
++from os import path
+ 
+ 
+ class etcd(Plugin, RedHatPlugin):
+@@ -19,10 +20,14 @@ class etcd(Plugin, RedHatPlugin):
+     plugin_name = 'etcd'
+     packages = ('etcd',)
+     profiles = ('container', 'system', 'services', 'cluster')
+-
+-    cmd = 'etcdctl'
++    files = ('/etc/origin/node/pods/etcd.yaml',)
+ 
+     def setup(self):
++        if path.exists('/etc/origin/node/pods/etcd.yaml'):
++            etcd_cmd = 'master-exec etcd etcd etcdctl'
++        else:
++            etcd_cmd = 'etcdctl'
++
+         etcd_url = self.get_etcd_url()
+ 
+         self.add_forbidden_path('/etc/etcd/ca')
+@@ -35,7 +40,7 @@ class etcd(Plugin, RedHatPlugin):
+            'ls --recursive',
+         ]
+ 
+-        self.add_cmd_output(['%s %s' % (self.cmd, sub) for sub in subcmds])
++        self.add_cmd_output(['%s %s' % (etcd_cmd, sub) for sub in subcmds])
+ 
+         urls = [
+             '/v2/stats/leader',
+diff --git a/sos/plugins/kubernetes.py b/sos/plugins/kubernetes.py
+index e75c7a37..21cb51df 100644
+--- a/sos/plugins/kubernetes.py
++++ b/sos/plugins/kubernetes.py
+@@ -18,11 +18,16 @@ class kubernetes(Plugin, RedHatPlugin):
+     """Kubernetes plugin
+     """
+ 
+-    # Red Hat Atomic Platform and OpenShift Enterprise use the
+-    # atomic-openshift-master package to provide kubernetes
++    # OpenShift Container Platform uses the atomic-openshift-master package
++    # to provide kubernetes
+     packages = ('kubernetes', 'kubernetes-master', 'atomic-openshift-master')
+     profiles = ('container',)
+-    files = ("/etc/origin/master/master-config.yaml",)
++    # use files only for masters, rely on package list for nodes
++    files = (
++        "/var/run/kubernetes/apiserver.key",
++        "/etc/origin/master/",
++        "/etc/origin/node/pods/master-config.yaml"
++    )
+ 
+     option_list = [
+         ("all", "also collect all namespaces output separately",
+@@ -33,12 +38,7 @@ class kubernetes(Plugin, RedHatPlugin):
+     ]
+ 
+     def check_is_master(self):
+-        if any([
+-            path.exists("/var/run/kubernetes/apiserver.key"),
+-            path.exists("/etc/origin/master/master-config.yaml")
+-        ]):
+-            return True
+-        return False
++        return any([path.exists(f) for f in self.files])
+ 
+     def setup(self):
+         self.add_copy_spec("/etc/kubernetes")
+@@ -56,74 +56,76 @@ class kubernetes(Plugin, RedHatPlugin):
+             self.add_journal(units=svc)
+ 
+         # We can only grab kubectl output from the master
+-        if self.check_is_master():
+-            kube_cmd = "kubectl "
+-            if path.exists('/etc/origin/master/admin.kubeconfig'):
+-                kube_cmd += "--config=/etc/origin/master/admin.kubeconfig"
+-
+-            kube_get_cmd = "get -o json "
+-            for subcmd in ['version', 'config view']:
+-                self.add_cmd_output('%s %s' % (kube_cmd, subcmd))
+-
+-            # get all namespaces in use
+-            kn = self.get_command_output('%s get namespaces' % kube_cmd)
+-            knsps = [n.split()[0] for n in kn['output'].splitlines()[1:] if n]
+-
+-            resources = [
+-                'limitrange',
+-                'pods',
+-                'pvc',
+-                'rc',
+-                'resourcequota',
+-                'services'
+-            ]
+-
+-            # nodes and pvs are not namespaced, must pull separately.
+-            # Also collect master metrics
+-            self.add_cmd_output([
+-                "{} get -o json nodes".format(kube_cmd),
+-                "{} get -o json pv".format(kube_cmd),
+-                "{} get --raw /metrics".format(kube_cmd)
+-            ])
+-
+-            for n in knsps:
+-                knsp = '--namespace=%s' % n
+-                if self.get_option('all'):
+-                    k_cmd = '%s %s %s' % (kube_cmd, kube_get_cmd, knsp)
+-
+-                    self.add_cmd_output('%s events' % k_cmd)
++        if not self.check_is_master():
++            return
++
++        kube_cmd = "kubectl "
++        if path.exists('/etc/origin/master/admin.kubeconfig'):
++            kube_cmd += "--config=/etc/origin/master/admin.kubeconfig"
++
++        kube_get_cmd = "get -o json "
++        for subcmd in ['version', 'config view']:
++            self.add_cmd_output('%s %s' % (kube_cmd, subcmd))
++
++        # get all namespaces in use
++        kn = self.get_command_output('%s get namespaces' % kube_cmd)
++        knsps = [n.split()[0] for n in kn['output'].splitlines()[1:] if n]
++
++        resources = [
++            'limitrange',
++            'pods',
++            'pvc',
++            'rc',
++            'resourcequota',
++            'services'
++        ]
++
++        # nodes and pvs are not namespaced, must pull separately.
++        # Also collect master metrics
++        self.add_cmd_output([
++            "{} get -o json nodes".format(kube_cmd),
++            "{} get -o json pv".format(kube_cmd),
++            "{} get --raw /metrics".format(kube_cmd)
++        ])
++
++        for n in knsps:
++            knsp = '--namespace=%s' % n
++            if self.get_option('all'):
++                k_cmd = '%s %s %s' % (kube_cmd, kube_get_cmd, knsp)
++
++                self.add_cmd_output('%s events' % k_cmd)
+ 
+-                    for res in resources:
+-                        self.add_cmd_output('%s %s' % (k_cmd, res))
+-
+-                    if self.get_option('describe'):
+-                        # need to drop json formatting for this
+-                        k_cmd = '%s get %s' % (kube_cmd, knsp)
+-                        for res in resources:
+-                            r = self.get_command_output(
+-                                '%s %s' % (k_cmd, res))
+-                            if r['status'] == 0:
+-                                k_list = [k.split()[0] for k in
+-                                          r['output'].splitlines()[1:]]
+-                                for k in k_list:
+-                                    k_cmd = '%s %s' % (kube_cmd, knsp)
+-                                    self.add_cmd_output(
+-                                        '%s describe %s %s' % (k_cmd, res, k))
+-
+-                if self.get_option('podlogs'):
+-                    k_cmd = '%s %s' % (kube_cmd, knsp)
+-                    r = self.get_command_output('%s get pods' % k_cmd)
+-                    if r['status'] == 0:
+-                        pods = [p.split()[0] for p in
+-                                r['output'].splitlines()[1:]]
+-                        for pod in pods:
+-                            self.add_cmd_output('%s logs %s' % (k_cmd, pod))
+-
+-            if not self.get_option('all'):
+-                k_cmd = '%s get --all-namespaces=true' % kube_cmd
+                 for res in resources:
+                     self.add_cmd_output('%s %s' % (k_cmd, res))
+ 
++                if self.get_option('describe'):
++                    # need to drop json formatting for this
++                    k_cmd = '%s get %s' % (kube_cmd, knsp)
++                    for res in resources:
++                        r = self.get_command_output(
++                            '%s %s' % (k_cmd, res))
++                        if r['status'] == 0:
++                            k_list = [k.split()[0] for k in
++                                      r['output'].splitlines()[1:]]
++                            for k in k_list:
++                                k_cmd = '%s %s' % (kube_cmd, knsp)
++                                self.add_cmd_output(
++                                    '%s describe %s %s' % (k_cmd, res, k))
++
++            if self.get_option('podlogs'):
++                k_cmd = '%s %s' % (kube_cmd, knsp)
++                r = self.get_command_output('%s get pods' % k_cmd)
++                if r['status'] == 0:
++                    pods = [p.split()[0] for p in
++                            r['output'].splitlines()[1:]]
++                    for pod in pods:
++                        self.add_cmd_output('%s logs %s' % (k_cmd, pod))
++
++        if not self.get_option('all'):
++            k_cmd = '%s get --all-namespaces=true' % kube_cmd
++            for res in resources:
++                self.add_cmd_output('%s %s' % (k_cmd, res))
++
+     def postproc(self):
+         # First, clear sensitive data from the json output collected.
+         # This will mask values when the "name" looks susceptible of
+-- 
+2.17.1
+
+
+From 63ad6c251ab88ab2f0e07ae9e3f1b2771d5e90ca Mon Sep 17 00:00:00 2001
+From: Jake Hunsaker <jhunsake@redhat.com>
+Date: Thu, 12 Jul 2018 13:07:34 -0400
+Subject: [PATCH 2/3] [kubernetes] Correct config option syntax
+
+Versions of kubernetes after 1.5 use --kubeconfig instead of --config to
+specify a configuration file to use for kubectl commands. Update the
+kubernetes plugin to use the proper syntax.
+
+Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
+---
+ sos/plugins/kubernetes.py | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sos/plugins/kubernetes.py b/sos/plugins/kubernetes.py
+index 21cb51df..c14e078e 100644
+--- a/sos/plugins/kubernetes.py
++++ b/sos/plugins/kubernetes.py
+@@ -61,7 +61,7 @@ class kubernetes(Plugin, RedHatPlugin):
+ 
+         kube_cmd = "kubectl "
+         if path.exists('/etc/origin/master/admin.kubeconfig'):
+-            kube_cmd += "--config=/etc/origin/master/admin.kubeconfig"
++            kube_cmd += "--kubeconfig=/etc/origin/master/admin.kubeconfig"
+ 
+         kube_get_cmd = "get -o json "
+         for subcmd in ['version', 'config view']:
+-- 
+2.17.1
+
+
+From 46fffd469f4f3d07337dc335cfc24341e836f23b Mon Sep 17 00:00:00 2001
+From: Jake Hunsaker <jhunsake@redhat.com>
+Date: Thu, 12 Jul 2018 13:11:44 -0400
+Subject: [PATCH 3/3] [origin] Collect statistics information
+
+Adds collection of 'oc adm top' output for images and imagestreams.
+
+Resolves: #1165
+Closes: #1383
+
+Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
+---
+ sos/plugins/origin.py | 26 ++++++++++++++++++++------
+ 1 file changed, 20 insertions(+), 6 deletions(-)
+
+diff --git a/sos/plugins/origin.py b/sos/plugins/origin.py
+index 02bc047a..0e384117 100644
+--- a/sos/plugins/origin.py
++++ b/sos/plugins/origin.py
+@@ -124,14 +124,28 @@ class OpenShiftOrigin(Plugin):
+             #
+             # Note: Information about nodes, events, pods, and services
+             # is already collected by the Kubernetes plugin
++
++            subcmds = [
++                "describe projects",
++                "adm top images",
++                "adm top imagestreams"
++            ]
++
+             self.add_cmd_output([
+-                "%s describe projects" % oc_cmd_admin,
+-                "%s get -o json hostsubnet" % oc_cmd_admin,
+-                "%s get -o json clusternetwork" % oc_cmd_admin,
+-                "%s get -o json netnamespaces" % oc_cmd_admin,
+-                # Registry and router configs are typically here
+-                "%s get -o json dc -n default" % oc_cmd_admin,
++                '%s %s' % (oc_cmd_admin, subcmd) for subcmd in subcmds
+             ])
++
++            jcmds = [
++                "hostsubnet",
++                "clusternetwork",
++                "netnamespaces",
++                "dc -n default"
++            ]
++
++            self.add_cmd_output([
++                '%s get -o json %s' % (oc_cmd_admin, jcmd) for jcmd in jcmds
++            ])
++
+             if self.get_option('diag'):
+                 diag_cmd = "%s adm diagnostics -l 0" % oc_cmd_admin
+                 if self.get_option('diag-prevent'):
+-- 
+2.17.1
+
diff --git a/SOURCES/sos-bz1623070-pipe-returncode.patch b/SOURCES/sos-bz1623070-pipe-returncode.patch
new file mode 100644
index 0000000..66c7c95
--- /dev/null
+++ b/SOURCES/sos-bz1623070-pipe-returncode.patch
@@ -0,0 +1,37 @@
+From 17bcd2bcdb8de4818b361582ac4d833ff324f4ff Mon Sep 17 00:00:00 2001
+From: "Bryn M. Reeves" <bmr@redhat.com>
+Date: Mon, 10 Sep 2018 18:06:00 +0100
+Subject: [PATCH] [utilities] wait until AsyncReader p.poll() returns None
+
+On some systems the pipe used by the AsyncReader() class and the
+sos_get_command_output() function may still be open at the time
+the p.poll() call returns. At this time the command exit status
+is undefined, leading to errors and collection failures for code
+that tests the command's exit code.
+
+Wait explicitly until poll() returns None to avoid this.
+
+Resolves: #1417
+
+Signed-off-by: Bryn M. Reeves <bmr@redhat.com>
+---
+ sos/utilities.py | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/sos/utilities.py b/sos/utilities.py
+index d112e15a..25e10429 100644
+--- a/sos/utilities.py
++++ b/sos/utilities.py
+@@ -155,7 +155,8 @@ def sos_get_command_output(command, timeout=300, stderr=False,
+ 
+         reader = AsyncReader(p.stdout, sizelimit, binary)
+         stdout = reader.get_contents()
+-        p.poll()
++        while p.poll() is None:
++            pass
+ 
+     except OSError as e:
+         if e.errno == errno.ENOENT:
+-- 
+2.17.1
+
diff --git a/SOURCES/sos-bz1624043-symlinks-not-copied.patch b/SOURCES/sos-bz1624043-symlinks-not-copied.patch
new file mode 100644
index 0000000..8246ec8
--- /dev/null
+++ b/SOURCES/sos-bz1624043-symlinks-not-copied.patch
@@ -0,0 +1,948 @@
+From 2e07f7c4778145d4366476ecc4383d491458b541 Mon Sep 17 00:00:00 2001
+From: "Bryn M. Reeves" <bmr@redhat.com>
+Date: Fri, 31 Aug 2018 12:50:24 +0100
+Subject: [PATCH 1/4] [sosreport] properly raise exceptions when --debug is
+ given
+
+OSError and IOError exceptions were not raised to the terminal
+when --debug is in effect since they were silently caught in the
+generic exception handler.
+
+Signed-off-by: Bryn M. Reeves <bmr@redhat.com>
+---
+ sos/sosreport.py | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/sos/sosreport.py b/sos/sosreport.py
+index 00c3e811..80633966 100644
+--- a/sos/sosreport.py
++++ b/sos/sosreport.py
+@@ -995,7 +995,8 @@ class SoSReport(object):
+                 print(" %s while setting up archive" % e.strerror)
+                 print("")
+             else:
+-                raise e
++                print("Error setting up archive: %s" % e)
++                raise
+         except Exception as e:
+             self.ui_log.error("")
+             self.ui_log.error(" Unexpected exception setting up archive:")
+@@ -1467,6 +1468,8 @@ class SoSReport(object):
+             return self.final_work()
+ 
+         except (OSError):
++            if self.opts.debug:
++                raise
+             self._cleanup()
+         except (KeyboardInterrupt):
+             self.ui_log.error("\nExiting on user cancel")
+-- 
+2.17.1
+
+
+From c496d2bec8cae175faf986567e73d16d401d8564 Mon Sep 17 00:00:00 2001
+From: "Bryn M. Reeves" <bmr@redhat.com>
+Date: Fri, 31 Aug 2018 12:52:38 +0100
+Subject: [PATCH 2/4] [archive] simplify FileCacheArchive.makedirs()
+
+Simplify the makedirs() method of FileCacheArchive and have it
+bypass _check_path() and directly call os.makedirs(): a subsequent
+patch will restrict the use of the method to setting up the sos_*
+directories in the archive root.
+
+File, directory and other object type add_* methods will use a
+new method that correctly handles symbolic links in intermediate
+path components.
+
+Signed-off-by: Bryn M. Reeves <bmr@redhat.com>
+---
+ sos/archive.py | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/sos/archive.py b/sos/archive.py
+index 5d99170f..ffa54036 100644
+--- a/sos/archive.py
++++ b/sos/archive.py
+@@ -361,11 +361,11 @@ class FileCacheArchive(Archive):
+         return self._archive_root
+ 
+     def makedirs(self, path, mode=0o700):
+-        dest = self._check_path(path, P_DIR)
+-        if not dest:
+-            return
++        """Create path, including leading components.
+ 
+-        self._makedirs(self.dest_path(path))
++            Used by sos.sosreport to set up sos_* directories.
++        """
++        os.makedirs(os.path.join(self._archive_root, path), mode=mode)
+         self.log_debug("created directory at '%s' in FileCacheArchive '%s'"
+                        % (path, self._archive_root))
+ 
+-- 
+2.17.1
+
+
+From ca422720b74181b2433473428e29e90af59b3cf8 Mon Sep 17 00:00:00 2001
+From: "Bryn M. Reeves" <bmr@redhat.com>
+Date: Fri, 31 Aug 2018 12:55:51 +0100
+Subject: [PATCH 3/4] [archive] normalise dest_dir in
+ FileCacheArchive._check_path()
+
+Always set a valid dest_dir in _check_path() and do not assume
+that it can be obtained by splitting the path: in the case of
+a directory it is the unmodified 'dest' value.
+
+Signed-off-by: Bryn M. Reeves <bmr@redhat.com>
+---
+ sos/archive.py | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/sos/archive.py b/sos/archive.py
+index ffa54036..903cc672 100644
+--- a/sos/archive.py
++++ b/sos/archive.py
+@@ -191,7 +191,10 @@ class FileCacheArchive(Archive):
+                       copied now or `None` otherwise
+         """
+         dest = dest or self.dest_path(src)
+-        dest_dir = os.path.split(dest)[0]
++        if path_type == P_DIR:
++            dest_dir = dest
++        else:
++            dest_dir = os.path.split(dest)[0]
+         if not dest_dir:
+             return dest
+ 
+-- 
+2.17.1
+
+
+From 75d759066e8ee0a469abc37f48f7bfcdfe8182b5 Mon Sep 17 00:00:00 2001
+From: "Bryn M. Reeves" <bmr@redhat.com>
+Date: Fri, 31 Aug 2018 12:58:01 +0100
+Subject: [PATCH 4/4] [archive] replace FileCacheArchive._makedirs()
+
+The Python os.makedirs() implementation is inadequate for sos's
+needs: it will create leading directories given an intended path
+destination, but it is not able to reflect cases where some of
+the intermediate paths are actually symbolic links.
+
+Replace the use of os.makedirs() with a method that walks over
+the path, and either creates directories, or symbolic links (and
+their directory target) to better correspond with the content of
+the host file system.
+
+This fixes a situation where two plugins can race in the archive,
+leading to an exception in the plugin that runs last:
+
+ - /foo/bar exists and is a link to /foo/bar.qux
+ - One plugin attempts to collect /foo/bar
+ - Another plugin attempts to collect a link /foo/qux -> /foo/bar/baz
+
+If the 2nd plugin happens to run first it will create the path
+"/foo/bar" as a _directory_ (via _makedirs()). Since the archive
+now checks for matching object types when a path collision occurs,
+the first plugin will arrive at add_dir(), note that "/foo/bar" is
+present and is not a symbolic link, and will raise an exception.
+
+Correct this by ensuring that whichever plugin executes first, the
+correct link/directory path structure will be set up.
+
+Signed-off-by: Bryn M. Reeves <bmr@redhat.com>
+---
+ sos/archive.py | 72 ++++++++++++++++++++++++++++++++++++++++++++------
+ 1 file changed, 64 insertions(+), 8 deletions(-)
+
+diff --git a/sos/archive.py b/sos/archive.py
+index 903cc672..11afa7aa 100644
+--- a/sos/archive.py
++++ b/sos/archive.py
+@@ -159,6 +159,67 @@ class FileCacheArchive(Archive):
+             name = name.lstrip(os.sep)
+         return (os.path.join(self._archive_root, name))
+ 
++    def _make_leading_paths(self, src, mode=0o700):
++        """Create leading path components
++
++            The standard python `os.makedirs` is insufficient for our
++            needs: it will only create directories, and ignores the fact
++            that some path components may be symbolic links.
++        """
++        self.log_debug("Making leading paths for %s" % src)
++        root = self._archive_root
++
++        def in_archive(path):
++            """Test whether path ``path`` is inside the archive.
++            """
++            return path.startswith(os.path.join(root, ""))
++
++        if not src.startswith("/"):
++            # Sos archive path (sos_commands, sos_logs etc.)
++            src_dir = src
++        else:
++            # Host file path
++            src_dir = src if os.path.isdir(src) else os.path.split(src)[0]
++
++        # Build a list of path components in root-to-leaf order.
++        path = src_dir
++        path_comps = []
++        while path != '/' and path != '':
++            head, tail = os.path.split(path)
++            path_comps.append(tail)
++            path = head
++        path_comps.reverse()
++
++        abs_path = root
++        rel_path = ""
++
++        # Check and create components as needed
++        for comp in path_comps:
++            abs_path = os.path.join(abs_path, comp)
++
++            if not in_archive(abs_path):
++                continue
++
++            rel_path = os.path.join(rel_path, comp)
++            src_path = os.path.join("/", rel_path)
++
++            if not os.path.exists(abs_path):
++                self.log_debug("Making path %s" % abs_path)
++                if os.path.islink(src_path) and os.path.isdir(src_path):
++                    target = os.readlink(src_path)
++                    abs_target = os.path.join(root, target)
++
++                    # Recursively create leading components of target
++                    self._make_leading_paths(abs_target, mode=mode)
++
++                    self.log_debug("Making symlink '%s' -> '%s'" %
++                                   (abs_path, target))
++                    target = os.path.relpath(target)
++                    os.symlink(target, abs_path)
++                else:
++                    self.log_debug("Making directory %s" % abs_path)
++                    os.mkdir(abs_path, mode)
++
+     def _check_path(self, src, path_type, dest=None, force=False):
+         """Check a new destination path in the archive.
+ 
+@@ -203,7 +264,8 @@ class FileCacheArchive(Archive):
+             raise ValueError("path '%s' exists and is not a directory" %
+                              dest_dir)
+         elif not os.path.exists(dest_dir):
+-            self._makedirs(dest_dir)
++            src_dir = src if path_type == P_DIR else os.path.split(src)[0]
++            self._make_leading_paths(src_dir)
+ 
+         def is_special(mode):
+             return any([
+@@ -326,10 +388,7 @@ class FileCacheArchive(Archive):
+ 
+     def add_dir(self, path):
+         with self._path_lock:
+-            dest = self._check_path(path, P_DIR)
+-            if not dest:
+-                return
+-            self.makedirs(path)
++            self._check_path(path, P_DIR)
+ 
+     def add_node(self, path, mode, device):
+         dest = self._check_path(path, P_NODE)
+@@ -347,9 +406,6 @@ class FileCacheArchive(Archive):
+                 raise e
+             shutil.copystat(path, dest)
+ 
+-    def _makedirs(self, path, mode=0o700):
+-        os.makedirs(path, mode)
+-
+     def name_max(self):
+         if 'PC_NAME_MAX' in os.pathconf_names:
+             pc_name_max = os.pathconf_names['PC_NAME_MAX']
+-- 
+2.17.1
+
+From 5d6228b85e174dee8abcc4c206a1e9034242c6c6 Mon Sep 17 00:00:00 2001
+From: "Bryn M. Reeves" <bmr@redhat.com>
+Date: Fri, 7 Sep 2018 12:06:34 -0400
+Subject: [PATCH 1/6] [sosreport] ensure ThreadPool exceptions are raised
+
+The ThreadPoolExecutor does not raise exceptions to the parent
+thread immediately: it stores them in-line in the pool's results
+list, and raises them to the caller on acccess to that slot in
+the results iterator.
+
+Make sure that these exceptions are handled by iterating over all
+results and asserting that they are non-None (in practice, this
+code is never executed since the resulting raise will trap to an
+exception handler, but it is less confusing than a bare 'pass').
+
+Signed-off-by: Bryn M. Reeves <bmr@redhat.com>
+---
+ sos/sosreport.py | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/sos/sosreport.py b/sos/sosreport.py
+index 80633966..44be75a1 100644
+--- a/sos/sosreport.py
++++ b/sos/sosreport.py
+@@ -1065,9 +1065,13 @@ class SoSReport(object):
+         try:
+             self.plugpool = ThreadPoolExecutor(self.opts.threads)
+             # Pass the plugpool its own private copy of self.pluglist
+-            self.plugpool.map(self._collect_plugin, list(self.pluglist),
+-                              chunksize=1)
++            results = self.plugpool.map(self._collect_plugin,
++                                        list(self.pluglist), chunksize=1)
+             self.plugpool.shutdown(wait=True)
++            for res in results:
++                if not res:
++                    self.soslog.debug("Unexpected plugin task result: %s" %
++                                      res)
+             self.ui_log.info("")
+         except KeyboardInterrupt:
+             # We may not be at a newline when the user issues Ctrl-C
+-- 
+2.17.1
+
+
+From 9aaba972bf6a42c33ea9bca80f07bfb880ba45a1 Mon Sep 17 00:00:00 2001
+From: "Bryn M. Reeves" <bmr@redhat.com>
+Date: Fri, 7 Sep 2018 12:15:10 -0400
+Subject: [PATCH 2/6] [sosreport] trap directly to PDB in handle_exception()
+
+Now that plugins are run in a threadpool, it is not possible to
+defer the call to pdb.post_mortem() to the top-level exception
+handler in the main thread: this is due to the fact that in a pool,
+exceptions are caught and saved to be re-raised to thread calling
+the pool when results are returned. When the saved exception is
+raised to the top-level handler the execution context it relates
+to is gone: the backtrace and stack frame have been torn down and
+only very limited information is available from the exception
+frame.
+
+Instead, catch these exceptions _inside_ the thread pool context,
+and directly trap to the Python debugger. This allows plugin code
+to be debugged interactively with the full backtrace and with all
+access to local variables and the execution stack. In addition,
+this means that after the debugger has handled the exception it is
+possible to return to the run and continue until report completion.
+
+One side effect of this change is that the *-plugin-errors.txt
+file containng the backtrace is now written into the archive
+whether or not --debug is given.
+
+Signed-off-by: Bryn M. Reeves <bmr@redhat.com>
+---
+ sos/sosreport.py | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/sos/sosreport.py b/sos/sosreport.py
+index 44be75a1..77ae7161 100644
+--- a/sos/sosreport.py
++++ b/sos/sosreport.py
+@@ -30,6 +30,7 @@ from shutil import rmtree
+ import tempfile
+ import hashlib
+ from concurrent.futures import ThreadPoolExecutor, TimeoutError
++import pdb
+ 
+ from sos import _sos as _
+ from sos import __version__
+@@ -504,7 +505,13 @@ class SoSReport(object):
+ 
+     def handle_exception(self, plugname=None, func=None):
+         if self.raise_plugins or self.exit_process:
+-            raise
++            # retrieve exception info for the current thread and stack.
++            (etype, val, tb) = sys.exc_info()
++            # we are NOT in interactive mode, print the exception...
++            traceback.print_exception(etype, val, tb, file=sys.stdout)
++            print_()
++            # ...then start the debugger in post-mortem mode.
++            pdb.post_mortem(tb)
+         if plugname and func:
+             self._log_plugin_exception(plugname, func)
+ 
+-- 
+2.17.1
+
+
+From 0ea62d1ea57f41c1b75ccb83e69fdda386a7d280 Mon Sep 17 00:00:00 2001
+From: "Bryn M. Reeves" <bmr@redhat.com>
+Date: Fri, 7 Sep 2018 13:00:52 -0400
+Subject: [PATCH 3/6] [Plugin] fix exception raise in Plugin._copy_dir()
+
+Use a naked 'raise' statement rather than raising the already caught
+exception in _copy_dir(), so that the original stack and backtrace
+are avaialable.
+
+Signed-off-by: Bryn M. Reeves <bmr@redhat.com>
+---
+ sos/plugins/__init__.py | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sos/plugins/__init__.py b/sos/plugins/__init__.py
+index 252de4d0..ac2c0bc8 100644
+--- a/sos/plugins/__init__.py
++++ b/sos/plugins/__init__.py
+@@ -401,7 +401,7 @@ class Plugin(object):
+                 msg = "Too many levels of symbolic links copying"
+                 self._log_error("_copy_dir: %s '%s'" % (msg, srcpath))
+                 return
+-            raise e
++            raise
+ 
+     def _get_dest_for_srcpath(self, srcpath):
+         if self.use_sysroot():
+-- 
+2.17.1
+
+
+From d84c1cd6dedf51a8ed7b1a511585c0ac2db0f083 Mon Sep 17 00:00:00 2001
+From: "Bryn M. Reeves" <bmr@redhat.com>
+Date: Wed, 5 Sep 2018 12:46:16 +0100
+Subject: [PATCH 4/6] [archive] fix leading path creation
+
+Fix the creation of leading path components for both paths that
+contain intermediate components that are symbolic links (with both
+absolute and relative targets), and those that contain only
+directory components.
+
+Since symlinks may link to other files, and other symlinks, it is
+necessary to handle these paths recursively and to include any
+intermediate symlinked directories, or symlink targets in the set
+of paths added to the archive.
+
+Related: #1404
+
+Signed-off-by: Bryn M. Reeves <bmr@redhat.com>
+---
+ sos/archive.py | 41 ++++++++++++++++++++++++++++++++++-------
+ 1 file changed, 34 insertions(+), 7 deletions(-)
+
+diff --git a/sos/archive.py b/sos/archive.py
+index 11afa7aa..c256a01f 100644
+--- a/sos/archive.py
++++ b/sos/archive.py
+@@ -165,9 +165,24 @@ class FileCacheArchive(Archive):
+             The standard python `os.makedirs` is insufficient for our
+             needs: it will only create directories, and ignores the fact
+             that some path components may be symbolic links.
++
++            :param src: The source path in the host file system for which
++                        leading components should be created, or the path
++                        to an sos_* virtual directory inside the archive.
++
++                        Host paths must be absolute (initial '/'), and
++                        sos_* directory paths must be a path relative to
++                        the root of the archive.
++
++            :param mode: An optional mode to be used when creating path
++                         components.
++            :returns: A rewritten destination path in the case that one
++                      or more symbolic links in intermediate components
++                      of the path have altered the path destination.
+         """
+         self.log_debug("Making leading paths for %s" % src)
+         root = self._archive_root
++        dest = src
+ 
+         def in_archive(path):
+             """Test whether path ``path`` is inside the archive.
+@@ -191,34 +206,42 @@ class FileCacheArchive(Archive):
+         path_comps.reverse()
+ 
+         abs_path = root
+-        rel_path = ""
++        src_path = "/"
+ 
+         # Check and create components as needed
+         for comp in path_comps:
+             abs_path = os.path.join(abs_path, comp)
+ 
++            # Do not create components that are above the archive root.
+             if not in_archive(abs_path):
+                 continue
+ 
+-            rel_path = os.path.join(rel_path, comp)
+-            src_path = os.path.join("/", rel_path)
++            src_path = os.path.join(src_path, comp)
+ 
+             if not os.path.exists(abs_path):
+                 self.log_debug("Making path %s" % abs_path)
+                 if os.path.islink(src_path) and os.path.isdir(src_path):
+                     target = os.readlink(src_path)
+-                    abs_target = os.path.join(root, target)
++
++                    # The directory containing the source in the host fs,
++                    # adjusted for the current level of path creation.
++                    target_dir = os.path.split(src_path)[0]
++
++                    # The source path of the target in the host fs to be
++                    # recursively copied.
++                    target_src = os.path.join(target_dir, target)
+ 
+                     # Recursively create leading components of target
+-                    self._make_leading_paths(abs_target, mode=mode)
++                    dest = self._make_leading_paths(target_src, mode=mode)
++                    dest = os.path.normpath(dest)
+ 
+                     self.log_debug("Making symlink '%s' -> '%s'" %
+                                    (abs_path, target))
+-                    target = os.path.relpath(target)
+                     os.symlink(target, abs_path)
+                 else:
+                     self.log_debug("Making directory %s" % abs_path)
+                     os.mkdir(abs_path, mode)
++        return dest
+ 
+     def _check_path(self, src, path_type, dest=None, force=False):
+         """Check a new destination path in the archive.
+@@ -259,13 +282,17 @@ class FileCacheArchive(Archive):
+         if not dest_dir:
+             return dest
+ 
++        # Preserve destination basename for rewritten dest_dir
++        dest_name = os.path.split(src)[1]
++
+         # Check containing directory presence and path type
+         if os.path.exists(dest_dir) and not os.path.isdir(dest_dir):
+             raise ValueError("path '%s' exists and is not a directory" %
+                              dest_dir)
+         elif not os.path.exists(dest_dir):
+             src_dir = src if path_type == P_DIR else os.path.split(src)[0]
+-            self._make_leading_paths(src_dir)
++            src_dir = self._make_leading_paths(src_dir)
++            dest = self.dest_path(os.path.join(src_dir, dest_name))
+ 
+         def is_special(mode):
+             return any([
+-- 
+2.17.1
+
+
+From 322f4a517ae336cc1443f9a399a0d15d45ec48b9 Mon Sep 17 00:00:00 2001
+From: "Bryn M. Reeves" <bmr@redhat.com>
+Date: Fri, 7 Sep 2018 13:11:03 -0400
+Subject: [PATCH 5/6] [archive] add link follow-up to
+ FileCacheArchive.add_link()
+
+Creating a link may trigger further actions in the archive: if the
+link target is a regular file, we must copy that file into the
+archive, and if the target is a symbolic link, then we must create
+that link, and copy in the link target.
+
+Handle this by calling add_file() or (recursively) add_link() in
+order to create the missing pieces of the symlink chain.
+
+These operations must take place outside of the path lock since
+they do not modify the archive namespace and will call methods of
+the Archive object that will attempt to re-acquire this lock.
+
+Resolves: #1404
+
+Signed-off-by: Bryn M. Reeves <bmr@redhat.com>
+---
+ sos/archive.py | 38 +++++++++++++++++++++++++++++++++++---
+ 1 file changed, 35 insertions(+), 3 deletions(-)
+
+diff --git a/sos/archive.py b/sos/archive.py
+index c256a01f..6db398fc 100644
+--- a/sos/archive.py
++++ b/sos/archive.py
+@@ -403,6 +403,7 @@ class FileCacheArchive(Archive):
+                            % (dest, self._archive_root))
+ 
+     def add_link(self, source, link_name):
++        self.log_debug("adding symlink at '%s' -> '%s'" % (link_name, source))
+         with self._path_lock:
+             dest = self._check_path(link_name, P_LINK)
+             if not dest:
+@@ -410,10 +411,41 @@ class FileCacheArchive(Archive):
+ 
+             if not os.path.lexists(dest):
+                 os.symlink(source, dest)
+-            self.log_debug("added symlink at '%s' to '%s' in archive '%s'"
+-                           % (dest, source, self._archive_root))
++                self.log_debug("added symlink at '%s' to '%s' in archive '%s'"
++                               % (dest, source, self._archive_root))
++
++        # Follow-up must be outside the path lock: we recurse into
++        # other monitor methods that will attempt to reacquire it.
++
++        source_dir = os.path.dirname(link_name)
++        host_source = os.path.join(source_dir, source)
++        if not os.path.exists(self.dest_path(host_source)):
++            if os.path.islink(host_source):
++                link_dir = os.path.dirname(link_name)
++                link_name = os.path.normpath(os.path.join(link_dir, source))
++                dest_dir = os.path.dirname(link_name)
++                source = os.path.join(dest_dir, os.readlink(link_name))
++                source = os.path.relpath(source)
++                self.log_debug("Adding link %s -> %s for link follow up" %
++                               (link_name, source))
++                self.add_link(source, link_name)
++            elif os.path.isdir(host_source):
++                self.log_debug("Adding dir %s for link follow up" % source)
++                self.add_dir(host_source)
++            elif os.path.isfile(host_source):
++                self.log_debug("Adding file %s for link follow up" % source)
++                self.add_file(host_source)
++            else:
++                self.log_debug("No link follow up: source=%s link_name=%s" %
++                               (source, link_name))
+ 
+-    def add_dir(self, path):
++
++    def add_dir(self, path, copy=False):
++        """Create a directory in the archive.
++
++            :param path: the path in the host file system to add
++        """
++        # Establish path structure
+         with self._path_lock:
+             self._check_path(path, P_DIR)
+ 
+-- 
+2.17.1
+
+
+From 6e79c4b4a4f32fa549708dbb8c8b9af73ab8ff61 Mon Sep 17 00:00:00 2001
+From: "Bryn M. Reeves" <bmr@redhat.com>
+Date: Mon, 10 Sep 2018 16:33:33 +0100
+Subject: [PATCH 6/6] [archive] remove unused 'copy' arg from
+ FileCacheArchive.add_dir()
+
+Signed-off-by: Bryn M. Reeves <bmr@redhat.com>
+---
+ sos/archive.py | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/sos/archive.py b/sos/archive.py
+index 6db398fc..4b30630b 100644
+--- a/sos/archive.py
++++ b/sos/archive.py
+@@ -439,8 +439,7 @@ class FileCacheArchive(Archive):
+                 self.log_debug("No link follow up: source=%s link_name=%s" %
+                                (source, link_name))
+ 
+-
+-    def add_dir(self, path, copy=False):
++    def add_dir(self, path):
+         """Create a directory in the archive.
+ 
+             :param path: the path in the host file system to add
+-- 
+2.17.1
+
+From 919e8671a6ab9684d59525eb7f3607b3aab08ee1 Mon Sep 17 00:00:00 2001
+From: "Bryn M. Reeves" <bmr@redhat.com>
+Date: Tue, 11 Sep 2018 12:16:57 -0400
+Subject: [PATCH] [archive] fix link rewriting logic in
+ FileCacheArchive.add_link()
+
+When processing link follow up for an original symbolic link, the
+add_link() logic incorrectly used the _original_ host link name,
+rather than the to-be-created name when calculating relative path
+structures. If the prior link is at a greater or lesser level of
+directory nesting this will lead to broken relative links in the
+archive (one level too high or too low).
+
+In some cases (systemd) this behaviour was masked due to the fact
+that identically named links exist at multiple levels of the path
+hierarchy.
+
+Signed-off-by: Bryn M. Reeves <bmr@redhat.com>
+---
+ sos/archive.py | 30 +++++++++++++++++++-----------
+ 1 file changed, 19 insertions(+), 11 deletions(-)
+
+diff --git a/sos/archive.py b/sos/archive.py
+index 528cfa576..7a7717de7 100644
+--- a/sos/archive.py
++++ b/sos/archive.py
+@@ -417,27 +417,35 @@ def add_link(self, source, link_name):
+         # Follow-up must be outside the path lock: we recurse into
+         # other monitor methods that will attempt to reacquire it.
+ 
++        self.log_debug("Link follow up: source=%s link_name=%s dest=%s" %
++                       (source, link_name, dest))
++
+         source_dir = os.path.dirname(link_name)
+-        host_source = os.path.join(source_dir, source)
+-        if not os.path.exists(self.dest_path(host_source)):
+-            if os.path.islink(host_source):
+-                link_dir = os.path.dirname(link_name)
+-                link_name = os.path.normpath(os.path.join(link_dir, source))
++        host_path_name = os.path.normpath(os.path.join(source_dir, source))
++        dest_path_name = self.dest_path(host_path_name)
++
++        if not os.path.exists(dest_path_name):
++            if os.path.islink(host_path_name):
++                # Normalised path for the new link_name
++                link_name = host_path_name
++                # Containing directory for the new link
+                 dest_dir = os.path.dirname(link_name)
+-                source = os.path.join(dest_dir, os.readlink(link_name))
+-                source = os.path.relpath(source)
++                # Relative source path of the new link
++                source = os.path.join(dest_dir, os.readlink(host_path_name))
++                source = os.path.relpath(source, dest_dir)
+                 self.log_debug("Adding link %s -> %s for link follow up" %
+                                (link_name, source))
+                 self.add_link(source, link_name)
+-            elif os.path.isdir(host_source):
++            elif os.path.isdir(host_path_name):
+                 self.log_debug("Adding dir %s for link follow up" % source)
+-                self.add_dir(host_source)
+-            elif os.path.isfile(host_source):
++                self.add_dir(host_path_name)
++            elif os.path.isfile(host_path_name):
+                 self.log_debug("Adding file %s for link follow up" % source)
+-                self.add_file(host_source)
++                self.add_file(host_path_name)
+             else:
+                 self.log_debug("No link follow up: source=%s link_name=%s" %
+                                (source, link_name))
++        self.log_debug("leaving add_link()")
+ 
+     def add_dir(self, path):
+         """Create a directory in the archive.
+From c065be9715dc845b6411a9a0b2d6171bbeb1c390 Mon Sep 17 00:00:00 2001
+From: "Bryn M. Reeves" <bmr@redhat.com>
+Date: Wed, 12 Sep 2018 12:02:33 +0100
+Subject: [PATCH] [plugin] canonicalize link target path in
+ Plugin._copy_symlink()
+
+Since we may be dealing with paths that contain intermediate
+symlinked directories, it is necessary to canonicalize the path
+for the link target in order to eliminate additional levels of
+symbolic links, and to calculate the correct relative path to
+use within the archive.
+
+Related: #1404
+
+Signed-off-by: Bryn M. Reeves <bmr@redhat.com>
+---
+ sos/plugins/__init__.py | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/sos/plugins/__init__.py b/sos/plugins/__init__.py
+index ac2c0bc8c..7d011a02c 100644
+--- a/sos/plugins/__init__.py
++++ b/sos/plugins/__init__.py
+@@ -353,7 +353,10 @@ def _copy_symlink(self, srcpath):
+         absdest = os.path.normpath(dest)
+         # adjust the target used inside the report to always be relative
+         if os.path.isabs(linkdest):
+-            reldest = os.path.relpath(linkdest, os.path.dirname(srcpath))
++            # Canonicalize the link target path to avoid additional levels
++            # of symbolic links (that would affect the path nesting level).
++            realdir = os.path.realpath(os.path.dirname(srcpath))
++            reldest = os.path.relpath(linkdest, start=realdir)
+             # trim leading /sysroot
+             if self.use_sysroot():
+                 reldest = reldest[len(os.sep + os.pardir):]
+From 868966cd9dbb96ce3635d884e67e738b18658140 Mon Sep 17 00:00:00 2001
+From: "Bryn M. Reeves" <bmr@redhat.com>
+Date: Wed, 12 Sep 2018 16:11:07 +0100
+Subject: [PATCH] [archive] canonicalise paths for link follow up
+
+Ensure that the canonical path is used when processing link follow
+up actions: the actual link path may contain one or more levels of
+symbolic links, leading to broken links if the link target path is
+assumed to be relative to the containing directory.
+
+Signed-off-by: Bryn M. Reeves <bmr@redhat.com>
+---
+ sos/archive.py | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sos/archive.py b/sos/archive.py
+index 7a7717de7..483d66f4f 100644
+--- a/sos/archive.py
++++ b/sos/archive.py
+@@ -421,7 +421,7 @@ def add_link(self, source, link_name):
+                        (source, link_name, dest))
+ 
+         source_dir = os.path.dirname(link_name)
+-        host_path_name = os.path.normpath(os.path.join(source_dir, source))
++        host_path_name = os.path.realpath(os.path.join(source_dir, source))
+         dest_path_name = self.dest_path(host_path_name)
+ 
+         if not os.path.exists(dest_path_name):
+From 8e60e299cdfb0027d6b6ea845234ef54ae785186 Mon Sep 17 00:00:00 2001
+From: "Bryn M. Reeves" <bmr@redhat.com>
+Date: Thu, 13 Sep 2018 16:14:12 +0100
+Subject: [PATCH 1/2] [archive, plugin] avoid recursing on symbolic link loops
+
+It's possible that symlink loops exist in the host file system,
+either 'simple' ('a'->'a'), or indirect ('a'->'b'->'a'). We need
+to avoid recursing on these loops, to avoid exceeding the maximum
+link or recursion depths, but we should still represent these
+inodes as accurately as possible in the resulting archive.
+
+Detect loops in both the Plugin link handling code and in the new
+Archive link follow-up code by creating the first requested level
+of loop, and then skipping the recursive follow-up. This means
+that the looping links are still created in the archive so long
+as they are referenced in a copy spec but that we do not attempt
+to indefinitely recurse while collecting them.
+
+Resolves: #1430
+
+Signed-off-by: Bryn M. Reeves <bmr@redhat.com>
+---
+ sos/archive.py          | 27 +++++++++++++++++++++++++++
+ sos/plugins/__init__.py | 20 +++++++++++++++-----
+ 2 files changed, 42 insertions(+), 5 deletions(-)
+
+diff --git a/sos/archive.py b/sos/archive.py
+index 483d66f4..e5819432 100644
+--- a/sos/archive.py
++++ b/sos/archive.py
+@@ -424,6 +424,29 @@ class FileCacheArchive(Archive):
+         host_path_name = os.path.realpath(os.path.join(source_dir, source))
+         dest_path_name = self.dest_path(host_path_name)
+ 
++        def is_loop(link_name, source):
++            """Return ``True`` if the symbolic link ``link_name`` is part
++                of a file system loop, or ``False`` otherwise.
++            """
++            link_dir = os.path.dirname(link_name)
++            if not os.path.isabs(source):
++                source = os.path.realpath(os.path.join(link_dir, source))
++            link_name = os.path.realpath(link_name)
++
++            # Simple a -> a loop
++            if link_name == source:
++                return True
++
++            # Find indirect loops (a->b-a) by stat()ing the first step
++            # in the symlink chain
++            try:
++                os.stat(link_name)
++            except OSError as e:
++                if e.errno == 40:
++                    return True
++                raise
++            return False
++
+         if not os.path.exists(dest_path_name):
+             if os.path.islink(host_path_name):
+                 # Normalised path for the new link_name
+@@ -433,6 +456,10 @@ class FileCacheArchive(Archive):
+                 # Relative source path of the new link
+                 source = os.path.join(dest_dir, os.readlink(host_path_name))
+                 source = os.path.relpath(source, dest_dir)
++                if is_loop(link_name, source):
++                    self.log_debug("Link '%s' - '%s' loops: skipping..." %
++                                   (link_name, source))
++                    return
+                 self.log_debug("Adding link %s -> %s for link follow up" %
+                                (link_name, source))
+                 self.add_link(source, link_name)
+diff --git a/sos/plugins/__init__.py b/sos/plugins/__init__.py
+index 7d011a02..7d2a8b2d 100644
+--- a/sos/plugins/__init__.py
++++ b/sos/plugins/__init__.py
+@@ -376,6 +376,21 @@ class Plugin(object):
+             self._log_debug("link '%s' is a directory, skipping..." % linkdest)
+             return
+ 
++        self.copied_files.append({'srcpath': srcpath,
++                                  'dstpath': dstpath,
++                                  'symlink': "yes",
++                                  'pointsto': linkdest})
++
++        # Check for indirect symlink loops by stat()ing the next step
++        # in the link chain.
++        try:
++            os.stat(absdest)
++        except OSError as e:
++            if e.errno == 40:
++                self._log_debug("link '%s' is part of a file system "
++                                "loop, skipping target..." % dstpath)
++                return
++
+         # copy the symlink target translating relative targets
+         # to absolute paths to pass to _do_copy_path.
+         self._log_debug("normalized link target '%s' as '%s'"
+@@ -388,11 +403,6 @@ class Plugin(object):
+             self._log_debug("link '%s' points to itself, skipping target..."
+                             % linkdest)
+ 
+-        self.copied_files.append({'srcpath': srcpath,
+-                                  'dstpath': dstpath,
+-                                  'symlink': "yes",
+-                                  'pointsto': linkdest})
+-
+     def _copy_dir(self, srcpath):
+         try:
+             for afile in os.listdir(srcpath):
+-- 
+2.17.1
+
+
+From e108d7c03834446f8dac66ad69f5eade4f2c5fce Mon Sep 17 00:00:00 2001
+From: Pavel Moravec <pmoravec@redhat.com>
+Date: Fri, 14 Sep 2018 10:42:07 +0200
+Subject: [PATCH 2/2] [archive] fix and simplify directory destination
+ rewriting
+
+Rewriting of the destination path by _make_leading_paths() only
+applies when creating intermediate path components that are a
+symbolic link. The final level of path creation must always be
+a directory, and the destination is always the absolute path to
+that directory.
+
+Always return the directory path when creating a new directory,
+and do not attempt to rewrite the destination at the top level
+in FileCacheArchive._check_path() since all intermediate links
+have already been handled inside _make_leading_paths() (i.e.
+the returned/rewritten destination is always equal to the path
+that was passed into the function).
+
+Resolves: #1432
+
+Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
+Signed-off-by: Bryn M. Reeves <bmr@redhat.com>
+---
+ sos/archive.py | 8 +++-----
+ 1 file changed, 3 insertions(+), 5 deletions(-)
+
+diff --git a/sos/archive.py b/sos/archive.py
+index e5819432..b02b75f7 100644
+--- a/sos/archive.py
++++ b/sos/archive.py
+@@ -241,6 +241,8 @@ class FileCacheArchive(Archive):
+                 else:
+                     self.log_debug("Making directory %s" % abs_path)
+                     os.mkdir(abs_path, mode)
++                    dest = src_path
++
+         return dest
+ 
+     def _check_path(self, src, path_type, dest=None, force=False):
+@@ -282,17 +284,13 @@ class FileCacheArchive(Archive):
+         if not dest_dir:
+             return dest
+ 
+-        # Preserve destination basename for rewritten dest_dir
+-        dest_name = os.path.split(src)[1]
+-
+         # Check containing directory presence and path type
+         if os.path.exists(dest_dir) and not os.path.isdir(dest_dir):
+             raise ValueError("path '%s' exists and is not a directory" %
+                              dest_dir)
+         elif not os.path.exists(dest_dir):
+             src_dir = src if path_type == P_DIR else os.path.split(src)[0]
+-            src_dir = self._make_leading_paths(src_dir)
+-            dest = self.dest_path(os.path.join(src_dir, dest_name))
++            self._make_leading_paths(src_dir)
+ 
+         def is_special(mode):
+             return any([
+-- 
+2.17.1
+
diff --git a/SOURCES/sos-bz1626159-atomic-attribute-error.patch b/SOURCES/sos-bz1626159-atomic-attribute-error.patch
new file mode 100644
index 0000000..035c892
--- /dev/null
+++ b/SOURCES/sos-bz1626159-atomic-attribute-error.patch
@@ -0,0 +1,60 @@
+From 4440c9094d853a452cbff6f9801fc7d47352e9b4 Mon Sep 17 00:00:00 2001
+From: Jake Hunsaker <jhunsake@redhat.com>
+Date: Thu, 6 Sep 2018 13:56:20 -0400
+Subject: [PATCH] [atomic] Define valid preset for RHEL Atomic
+
+Defines an 'atomic' preset for use with the RedHatAtomic policy for RHEL
+Atomic Host. Fixes sos being unable to run due to the preset probe
+returning a string rather than a preset.
+
+Resolves: #1418
+
+Signed-off-by: Jake Hunsaker <jhunsake@redhat.com>
+Signed-off-by: Bryn M. Reeves <bmr@redhat.com>
+---
+ sos/policies/redhat.py | 15 ++++++++++++++-
+ 1 file changed, 14 insertions(+), 1 deletion(-)
+
+diff --git a/sos/policies/redhat.py b/sos/policies/redhat.py
+index b494de3c..e1e417f3 100644
+--- a/sos/policies/redhat.py
++++ b/sos/policies/redhat.py
+@@ -325,6 +325,12 @@ No changes will be made to system configuration.
+ 
+ ATOMIC = "atomic"
+ ATOMIC_RELEASE_STR = "Atomic"
++ATOMIC_DESC = "Red Hat Enterprise Linux Atomic Host"
++
++atomic_presets = {
++    ATOMIC: PresetDefaults(name=ATOMIC, desc=ATOMIC_DESC, note=NOTE_TIME,
++                           opts=_opts_verify)
++}
+ 
+ 
+ class RedHatAtomicPolicy(RHELPolicy):
+@@ -347,6 +353,10 @@ organization before being passed to any third party.
+ %(vendor_text)s
+ """)
+ 
++    def __init__(self, sysroot=None):
++        super(RedHatAtomicPolicy, self).__init__(sysroot=sysroot)
++        self.register_presets(atomic_presets)
++
+     @classmethod
+     def check(cls):
+         atomic = False
+@@ -363,7 +373,10 @@ organization before being passed to any third party.
+         return atomic
+ 
+     def probe_preset(self):
+-        return ATOMIC
++        if self.pkg_by_name('atomic-openshift'):
++            return self.find_preset(RHOCP)
++
++        return self.find_preset(ATOMIC)
+ 
+ 
+ class FedoraPolicy(RedHatPolicy):
+-- 
+2.17.1
+
diff --git a/SOURCES/sos-centos-branding.patch b/SOURCES/sos-centos-branding.patch
deleted file mode 100644
index 86ab010..0000000
--- a/SOURCES/sos-centos-branding.patch
+++ /dev/null
@@ -1,1288 +0,0 @@
-diff -uNrp sos-3.0.orig/po/af.po sos-3.0/po/af.po
---- sos-3.0.orig/po/af.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/af.po	2014-06-21 11:15:36.435724571 -0500
-@@ -168,7 +168,7 @@ msgid "Cannot upload to specified URL."
- msgstr ""
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
- 
- #: ../sos/policyredhat.py:401
-diff -uNrp sos-3.0.orig/po/am.po sos-3.0/po/am.po
---- sos-3.0.orig/po/am.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/am.po	2014-06-21 11:15:36.436724563 -0500
-@@ -168,7 +168,7 @@ msgid "Cannot upload to specified URL."
- msgstr ""
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
- 
- #: ../sos/policyredhat.py:401
-diff -uNrp sos-3.0.orig/po/ar.po sos-3.0/po/ar.po
---- sos-3.0.orig/po/ar.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/ar.po	2014-06-21 11:16:38.081245080 -0500
-@@ -179,8 +179,8 @@ msgid "Cannot upload to specified URL."
- msgstr "لا يمكن الرفع للعنوان المحدّد"
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
--msgstr "صودفت مشكلة برفع تقريرك إلى دعم Red Hat. "
-+msgid "There was a problem uploading your report to CentOS support."
-+msgstr "صودفت مشكلة برفع تقريرك إلى دعم CentOS. "
- 
- #: ../sos/policyredhat.py:401
- #, python-format
-diff -uNrp sos-3.0.orig/po/as.po sos-3.0/po/as.po
---- sos-3.0.orig/po/as.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/as.po	2014-06-21 11:15:36.437724555 -0500
-@@ -85,11 +85,11 @@ msgid ""
- "No changes will be made to your system.\n"
- "\n"
- msgstr ""
--"এই সামগ্ৰীৰ সহায়ত যান্ত্ৰিক সামগ্ৰী আৰু Red Hat Enterprise Linux\n"
-+"এই সামগ্ৰীৰ সহায়ত যান্ত্ৰিক সামগ্ৰী আৰু CentOS Enterprise Linux\n"
- "প্ৰণালীৰ প্ৰতিষ্ঠা সম্পৰ্কে বিশদ তথ্য সংগ্ৰহ কৰা হ'ব ।\n"
- "তথ্য সংগ্ৰহৰ পিছত /tmp পঞ্জিকাৰ অধীন এটা আৰ্কাইভ নিৰ্মিত হয় ।\n"
- "এই আৰ্কাইভ আপুনি সহায়তা প্ৰতিনিধিৰ কাশত পঠায় দিব পাৰে ।\n"
--"Red Hat দ্বাৰা এই তথ্য অকল সমস্যাৰ কাৰণ নিৰ্ণয় কৰাৰ বাবে ব্যৱহাৰ কৰা হ'ব\n"
-+"CentOS দ্বাৰা এই তথ্য অকল সমস্যাৰ কাৰণ নিৰ্ণয় কৰাৰ বাবে ব্যৱহাৰ কৰা হ'ব\n"
- "আৰু ইয়াৰ গোপনীয়তা বজায় ৰাখা হ'ব ।\n"
- "\n"
- "এই কাম সম্পন্ন হ'বলৈ কিছু সময় ব্যয় হ'ব পাৰে ।\n"
-@@ -184,14 +184,14 @@ msgid "Cannot upload to specified URL."
- msgstr "উল্লিখিত URL-এ আপলোড কৰিবলৈ ব্যৰ্থ ।"
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
--msgstr "আপোনাৰ ৰিপোৰ্টটি Red Hat সহায়তা ব্যৱস্থাত আপলোড কৰিবলৈ সমস্যা ।"
-+msgid "There was a problem uploading your report to CentOS support."
-+msgstr "আপোনাৰ ৰিপোৰ্টটি CentOS সহায়তা ব্যৱস্থাত আপলোড কৰিবলৈ সমস্যা ।"
- 
- #: ../sos/policyredhat.py:401
- #, fuzzy, python-format
- msgid "Your report was successfully uploaded to %s with name:"
- msgstr ""
--"আপোনাৰ প্ৰদত্ত ৰিপোৰ্ট সফলতাৰে সৈতে Red Hat-ৰ ftp সেৱকত নিম্নলিখিত নামত আপলোড "
-+"আপোনাৰ প্ৰদত্ত ৰিপোৰ্ট সফলতাৰে সৈতে CentOS-ৰ ftp সেৱকত নিম্নলিখিত নামত আপলোড "
- "কৰা হৈছে:"
- 
- #: ../sos/policyredhat.py:404
-diff -uNrp sos-3.0.orig/po/ast.po sos-3.0/po/ast.po
---- sos-3.0.orig/po/ast.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/ast.po	2014-06-21 11:17:08.318010034 -0500
-@@ -86,10 +86,10 @@ msgid ""
- "\n"
- msgstr ""
- "Esta utilidá recueyerá dalguna información detallada sobro'l\n"
--"hardware y la configuración del to sistema Red Hat Enterprise Linux.\n"
-+"hardware y la configuración del to sistema CentOS Enterprise Linux.\n"
- "La información recuéyese y críase un ficheru baxo /tmp.\n"
- "Ésti puede mandase al to representante de sofitu.\n"
--"Red Hat usará esta información pa diagnosticar el sistema\n"
-+"CentOS usará esta información pa diagnosticar el sistema\n"
- "únicamente y considerará esta información como confidencial.\n"
- "\n"
- "Esti procesu va llevar un tiempu pa completase.\n"
-@@ -184,14 +184,14 @@ msgid "Cannot upload to specified URL."
- msgstr "Nun se puede cargar a la URL especificada."
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
--"Hebo un problema al cargar el to informe al equipu d'asistencia de Red Hat"
-+"Hebo un problema al cargar el to informe al equipu d'asistencia de CentOS"
- 
- #: ../sos/policyredhat.py:401
- #, fuzzy, python-format
- msgid "Your report was successfully uploaded to %s with name:"
--msgstr "El to informe cargóse bien a los sirvidores ftp e Red Hat col nome:"
-+msgstr "El to informe cargóse bien a los sirvidores ftp e CentOS col nome:"
- 
- #: ../sos/policyredhat.py:404
- msgid "Please communicate this name to your support representative."
-diff -uNrp sos-3.0.orig/po/be.po sos-3.0/po/be.po
---- sos-3.0.orig/po/be.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/be.po	2014-06-21 11:15:36.438724547 -0500
-@@ -168,7 +168,7 @@ msgid "Cannot upload to specified URL."
- msgstr ""
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
- 
- #: ../sos/policyredhat.py:401
-diff -uNrp sos-3.0.orig/po/bg.po sos-3.0/po/bg.po
---- sos-3.0.orig/po/bg.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/bg.po	2014-06-21 11:15:36.439724539 -0500
-@@ -172,9 +172,9 @@ msgid "Cannot upload to specified URL."
- msgstr "Не може да се качи на посочения URL"
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
--"Възникна проблем при качването на вашия отчет на проддръжката на Red Hat."
-+"Възникна проблем при качването на вашия отчет на проддръжката на CentOS."
- 
- #: ../sos/policyredhat.py:401
- #, python-format
-diff -uNrp sos-3.0.orig/po/bn_IN.po sos-3.0/po/bn_IN.po
---- sos-3.0.orig/po/bn_IN.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/bn_IN.po	2014-06-21 11:15:36.440724532 -0500
-@@ -184,8 +184,8 @@ msgid "Cannot upload to specified URL."
- msgstr "উল্লিখিত URL-এ আপলোড করতে ব্যর্থ।"
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
--msgstr "আপনার রিপোর্টটি Red Hat সহায়তা ব্যবস্থায় আপলোড করতে সমস্যা।"
-+msgid "There was a problem uploading your report to CentOS support."
-+msgstr "আপনার রিপোর্টটি CentOS সহায়তা ব্যবস্থায় আপলোড করতে সমস্যা।"
- 
- #: ../sos/policyredhat.py:401
- #, python-format
-diff -uNrp sos-3.0.orig/po/bn.po sos-3.0/po/bn.po
---- sos-3.0.orig/po/bn.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/bn.po	2014-06-21 11:15:36.440724532 -0500
-@@ -168,7 +168,7 @@ msgid "Cannot upload to specified URL."
- msgstr ""
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
- 
- #: ../sos/policyredhat.py:401
-diff -uNrp sos-3.0.orig/po/bs.po sos-3.0/po/bs.po
---- sos-3.0.orig/po/bs.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/bs.po	2014-06-21 11:15:36.441724524 -0500
-@@ -189,8 +189,8 @@ msgid "Cannot upload to specified URL."
- msgstr "Nije se mogao postaviti specificirani URL,"
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
--msgstr "Bilo je problema u postavljanju vaseg izvjestaja na Red Hat podrsku. "
-+msgid "There was a problem uploading your report to CentOS support."
-+msgstr "Bilo je problema u postavljanju vaseg izvjestaja na CentOS podrsku. "
- 
- #: ../sos/policyredhat.py:401
- #, python-format
-diff -uNrp sos-3.0.orig/po/ca.po sos-3.0/po/ca.po
---- sos-3.0.orig/po/ca.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/ca.po	2014-06-21 11:15:36.442724516 -0500
-@@ -194,8 +194,8 @@ msgid "Cannot upload to specified URL."
- msgstr "No es pot pujar a la URL especificada."
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
--msgstr "Hi ha hagut un problema en pujar l'informe al manteniment de Red Hat."
-+msgid "There was a problem uploading your report to CentOS support."
-+msgstr "Hi ha hagut un problema en pujar l'informe al manteniment de CentOS."
- 
- #: ../sos/policyredhat.py:401
- #, python-format
-diff -uNrp sos-3.0.orig/po/cs.po sos-3.0/po/cs.po
---- sos-3.0.orig/po/cs.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/cs.po	2014-06-21 11:15:36.443724508 -0500
-@@ -183,8 +183,8 @@ msgid "Cannot upload to specified URL."
- msgstr "Nelze uložit na uvedené URL."
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
--msgstr "Při odesílání zprávy do firmy Red Hat vznikla chyba."
-+msgid "There was a problem uploading your report to CentOS support."
-+msgstr "Při odesílání zprávy do firmy CentOS vznikla chyba."
- 
- #: ../sos/policyredhat.py:401
- #, python-format
-diff -uNrp sos-3.0.orig/po/cy.po sos-3.0/po/cy.po
---- sos-3.0.orig/po/cy.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/cy.po	2014-06-21 11:15:36.443724508 -0500
-@@ -168,7 +168,7 @@ msgid "Cannot upload to specified URL."
- msgstr ""
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
- 
- #: ../sos/policyredhat.py:401
-diff -uNrp sos-3.0.orig/po/da.po sos-3.0/po/da.po
---- sos-3.0.orig/po/da.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/da.po	2014-06-21 11:15:36.444724501 -0500
-@@ -184,9 +184,9 @@ msgid "Cannot upload to specified URL."
- msgstr "Kan ikke overføre til den angivne URL."
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
--"Der opstod et problem under overførsel af din rapport til Red Hat-support."
-+"Der opstod et problem under overførsel af din rapport til CentOS-support."
- 
- #: ../sos/policyredhat.py:401
- #, python-format
-diff -uNrp sos-3.0.orig/po/de_CH.po sos-3.0/po/de_CH.po
---- sos-3.0.orig/po/de_CH.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/de_CH.po	2014-06-21 11:15:36.444724501 -0500
-@@ -87,10 +87,10 @@ msgid ""
- "\n"
- msgstr ""
- "Dieses Dienstprogramm sammelt einige detaillierte Informationen\n"
--"zur Hardware und Einrichtung Ihres Red Hat Enterprise Linux Systems.\n"
-+"zur Hardware und Einrichtung Ihres CentOS Enterprise Linux Systems.\n"
- "Die Informationen werden gesammelt und in einem Archiv unter /tmp\n"
- "zusammengefasst, welches Sie an einen Support-Vertreter schicken\n"
--"können. Red Hat verwendet diese Informationen AUSSCHLIESSLICH zu\n"
-+"können. CentOS verwendet diese Informationen AUSSCHLIESSLICH zu\n"
- "Diagnosezwecken und behandelt sie als vertrauliche Informationen.\n"
- "\n"
- "Die Fertigstellung dieses Prozesses kann eine Weile dauern.\n"
-@@ -188,14 +188,14 @@ msgid "Cannot upload to specified URL."
- msgstr "Hochladen zu speziellem URL scheiterte."
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
--msgstr "Beim Hochladen Ihres Berichts zum Red Hat Support trat ein Fehler auf."
-+msgid "There was a problem uploading your report to CentOS support."
-+msgstr "Beim Hochladen Ihres Berichts zum CentOS Support trat ein Fehler auf."
- 
- #: ../sos/policyredhat.py:401
- #, fuzzy, python-format
- msgid "Your report was successfully uploaded to %s with name:"
- msgstr ""
--"Ihr Bericht wurde erfolgreich auf den Red Hat FTP-Server hochgeladen, mit "
-+"Ihr Bericht wurde erfolgreich auf den CentOS FTP-Server hochgeladen, mit "
- "dem Namen:"
- 
- #: ../sos/policyredhat.py:404
-diff -uNrp sos-3.0.orig/po/de.po sos-3.0/po/de.po
---- sos-3.0.orig/po/de.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/de.po	2014-06-21 11:15:36.445724493 -0500
-@@ -191,8 +191,8 @@ msgid "Cannot upload to specified URL."
- msgstr "Hochladen zu spezieller URL scheiterte."
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
--msgstr "Beim Hochladen Ihres Berichts zum Red Hat Support trat ein Fehler auf."
-+msgid "There was a problem uploading your report to CentOS support."
-+msgstr "Beim Hochladen Ihres Berichts zum CentOS Support trat ein Fehler auf."
- 
- #: ../sos/policyredhat.py:401
- #, python-format
-diff -uNrp sos-3.0.orig/po/el.po sos-3.0/po/el.po
---- sos-3.0.orig/po/el.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/el.po	2014-06-21 11:15:36.445724493 -0500
-@@ -84,10 +84,10 @@ msgid ""
- "\n"
- msgstr ""
- "Αυτό το εργαλείο θα συγκετρώσει ορισμένες πληροφορίες για τον υπολογιστή σας "
--"και την εγκατάσταση του Red Hat Enterprise Linux συστήματος.\n"
-+"και την εγκατάσταση του CentOS Enterprise Linux συστήματος.\n"
- "Οι πληροφορίες συγκετρώνονται και το archive δημιουργήται στο\n"
- "/tmp,το οποίο και μπορείτε να στείλετε σε έναν αντιπρόσωπο υποστήριξης.\n"
--"Η Red Hat θα χρησιμοποιήσει αυτα τα δεδομένα ΜΟΝΟ για διαγνωστικούς σκοπούς\n"
-+"Η CentOS θα χρησιμοποιήσει αυτα τα δεδομένα ΜΟΝΟ για διαγνωστικούς σκοπούς\n"
- "και θα παραμείνουν εμπιστευτηκά.\n"
- "\n"
- 
-@@ -180,8 +180,8 @@ msgid "Cannot upload to specified URL."
- msgstr "Δεν είναι δυνατό το upload στο καθορισμένο URL."
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
--msgstr "Υπήρξε ένα πρόβλημα κατα το upload της αναφοράς σας στην Red Hat."
-+msgid "There was a problem uploading your report to CentOS support."
-+msgstr "Υπήρξε ένα πρόβλημα κατα το upload της αναφοράς σας στην CentOS."
- 
- #: ../sos/policyredhat.py:401
- #, python-format
-diff -uNrp sos-3.0.orig/po/en_GB.po sos-3.0/po/en_GB.po
---- sos-3.0.orig/po/en_GB.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/en_GB.po	2014-06-21 11:15:36.446724485 -0500
-@@ -83,10 +83,10 @@ msgid ""
- "\n"
- msgstr ""
- "This utility will collect some detailed  information about the\n"
--"hardware and  setup of your  Red Hat Enterprise Linux  system.\n"
-+"hardware and  setup of your  CentOS Enterprise Linux  system.\n"
- "The information is collected and an archive is  packaged under\n"
- "/tmp, which you can send to a support representative.\n"
--"Red Hat will use this information for diagnostic purposes ONLY\n"
-+"CentOS will use this information for diagnostic purposes ONLY\n"
- "and it will be considered confidential information.\n"
- "\n"
- "This process may take a while to complete.\n"
-@@ -181,14 +181,14 @@ msgid "Cannot upload to specified URL."
- msgstr "Cannot upload to specified URL."
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
--msgstr "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
-+msgstr "There was a problem uploading your report to CentOS support."
- 
- #: ../sos/policyredhat.py:401
- #, fuzzy, python-format
- msgid "Your report was successfully uploaded to %s with name:"
- msgstr ""
--"Your report was successfully uploaded to Red Hat's ftp server with name:"
-+"Your report was successfully uploaded to CentOS's ftp server with name:"
- 
- #: ../sos/policyredhat.py:404
- msgid "Please communicate this name to your support representative."
-diff -uNrp sos-3.0.orig/po/en.po sos-3.0/po/en.po
---- sos-3.0.orig/po/en.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/en.po	2014-06-21 11:15:36.446724485 -0500
-@@ -168,7 +168,7 @@ msgid "Cannot upload to specified URL."
- msgstr ""
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
- 
- #: ../sos/policyredhat.py:401
-diff -uNrp sos-3.0.orig/po/es.po sos-3.0/po/es.po
---- sos-3.0.orig/po/es.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/es.po	2014-06-21 11:17:24.153886936 -0500
-@@ -189,9 +189,9 @@ msgid "Cannot upload to specified URL."
- msgstr "No se puede cargar a la URL especificada."
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
--"Hubo un problema al cargar su reporte al equipo de asistencia de Red Hat"
-+"Hubo un problema al cargar su reporte al equipo de asistencia de CentOS"
- 
- #: ../sos/policyredhat.py:401
- #, python-format
-diff -uNrp sos-3.0.orig/po/et.po sos-3.0/po/et.po
---- sos-3.0.orig/po/et.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/et.po	2014-06-21 11:15:36.447724477 -0500
-@@ -169,7 +169,7 @@ msgid "Cannot upload to specified URL."
- msgstr ""
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
- 
- #: ../sos/policyredhat.py:401
-diff -uNrp sos-3.0.orig/po/eu_ES.po sos-3.0/po/eu_ES.po
---- sos-3.0.orig/po/eu_ES.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/eu_ES.po	2014-06-21 11:15:36.448724469 -0500
-@@ -168,7 +168,7 @@ msgid "Cannot upload to specified URL."
- msgstr ""
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
- 
- #: ../sos/policyredhat.py:401
-diff -uNrp sos-3.0.orig/po/fa.po sos-3.0/po/fa.po
---- sos-3.0.orig/po/fa.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/fa.po	2014-06-21 11:15:36.448724469 -0500
-@@ -168,7 +168,7 @@ msgid "Cannot upload to specified URL."
- msgstr ""
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
- 
- #: ../sos/policyredhat.py:401
-diff -uNrp sos-3.0.orig/po/fi.po sos-3.0/po/fi.po
---- sos-3.0.orig/po/fi.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/fi.po	2014-06-21 11:17:38.280777198 -0500
-@@ -179,8 +179,8 @@ msgid "Cannot upload to specified URL."
- msgstr "Annettuun osoitteeseen ei voida lähettää."
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
--msgstr "Raportin lähettämisessä Red Hatin käyttötukeen oli ongelmia."
-+msgid "There was a problem uploading your report to CentOS support."
-+msgstr "Raportin lähettämisessä CentOSin käyttötukeen oli ongelmia."
- 
- #: ../sos/policyredhat.py:401
- #, python-format
-diff -uNrp sos-3.0.orig/po/fr.po sos-3.0/po/fr.po
---- sos-3.0.orig/po/fr.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/fr.po	2014-06-21 11:15:36.449724462 -0500
-@@ -188,10 +188,10 @@ msgid "Cannot upload to specified URL."
- msgstr "Impossible de le télécharger vers l'URL spécifié."
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
- "Une erreur s'est produite lors du téléchargement de votre rapport vers le "
--"support Red Hat."
-+"support CentOS."
- 
- #: ../sos/policyredhat.py:401
- #, python-format
-diff -uNrp sos-3.0.orig/po/gl.po sos-3.0/po/gl.po
---- sos-3.0.orig/po/gl.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/gl.po	2014-06-21 11:15:36.450724454 -0500
-@@ -168,7 +168,7 @@ msgid "Cannot upload to specified URL."
- msgstr ""
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
- 
- #: ../sos/policyredhat.py:401
-diff -uNrp sos-3.0.orig/po/gu.po sos-3.0/po/gu.po
---- sos-3.0.orig/po/gu.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/gu.po	2014-06-21 11:15:36.450724454 -0500
-@@ -186,8 +186,8 @@ msgid "Cannot upload to specified URL."
- msgstr "સ્પષ્ટ કરેલ URL અપલોડ કરી શકતા નથી."
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
--msgstr "તમારા અહેવાલને Red Hat આધારમાં અપલોડ કરવામાં સમસ્યા હતી."
-+msgid "There was a problem uploading your report to CentOS support."
-+msgstr "તમારા અહેવાલને CentOS આધારમાં અપલોડ કરવામાં સમસ્યા હતી."
- 
- #: ../sos/policyredhat.py:401
- #, python-format
-diff -uNrp sos-3.0.orig/po/he.po sos-3.0/po/he.po
---- sos-3.0.orig/po/he.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/he.po	2014-06-21 11:15:36.450724454 -0500
-@@ -169,7 +169,7 @@ msgid "Cannot upload to specified URL."
- msgstr ""
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
- 
- #: ../sos/policyredhat.py:401
-diff -uNrp sos-3.0.orig/po/hi.po sos-3.0/po/hi.po
---- sos-3.0.orig/po/hi.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/hi.po	2014-06-21 11:15:36.451724446 -0500
-@@ -187,8 +187,8 @@ msgid "Cannot upload to specified URL."
- msgstr "निर्दिष्ट URL अपलोड नहीं कर सकता है."
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
--msgstr "आपके रिपोर्ट को Red Hat समर्थन में अपलोड करने में समस्या थी."
-+msgid "There was a problem uploading your report to CentOS support."
-+msgstr "आपके रिपोर्ट को CentOS समर्थन में अपलोड करने में समस्या थी."
- 
- #: ../sos/policyredhat.py:401
- #, python-format
-diff -uNrp sos-3.0.orig/po/hr.po sos-3.0/po/hr.po
---- sos-3.0.orig/po/hr.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/hr.po	2014-06-21 11:15:36.451724446 -0500
-@@ -170,7 +170,7 @@ msgid "Cannot upload to specified URL."
- msgstr ""
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
- 
- #: ../sos/policyredhat.py:401
-diff -uNrp sos-3.0.orig/po/hu.po sos-3.0/po/hu.po
---- sos-3.0.orig/po/hu.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/hu.po	2014-06-21 11:15:36.452724438 -0500
-@@ -180,8 +180,8 @@ msgid "Cannot upload to specified URL."
- msgstr "Nem lehet az URL-re feltölteni."
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
--msgstr "A jelentést a Red Hat támogatáshoz feltöltvén baj történt."
-+msgid "There was a problem uploading your report to CentOS support."
-+msgstr "A jelentést a CentOS támogatáshoz feltöltvén baj történt."
- 
- #: ../sos/policyredhat.py:401
- #, python-format
-diff -uNrp sos-3.0.orig/po/hy.po sos-3.0/po/hy.po
---- sos-3.0.orig/po/hy.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/hy.po	2014-06-21 11:15:36.452724438 -0500
-@@ -168,7 +168,7 @@ msgid "Cannot upload to specified URL."
- msgstr ""
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
- 
- #: ../sos/policyredhat.py:401
-diff -uNrp sos-3.0.orig/po/id.po sos-3.0/po/id.po
---- sos-3.0.orig/po/id.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/id.po	2014-06-21 11:15:36.453724430 -0500
-@@ -171,7 +171,7 @@ msgid "Cannot upload to specified URL."
- msgstr ""
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
- 
- #: ../sos/policyredhat.py:401
-diff -uNrp sos-3.0.orig/po/ilo.po sos-3.0/po/ilo.po
---- sos-3.0.orig/po/ilo.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/ilo.po	2014-06-21 11:15:36.453724430 -0500
-@@ -168,7 +168,7 @@ msgid "Cannot upload to specified URL."
- msgstr ""
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
- 
- #: ../sos/policyredhat.py:401
-diff -uNrp sos-3.0.orig/po/is.po sos-3.0/po/is.po
---- sos-3.0.orig/po/is.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/is.po	2014-06-21 11:15:36.453724430 -0500
-@@ -168,7 +168,7 @@ msgid "Cannot upload to specified URL."
- msgstr ""
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
- 
- #: ../sos/policyredhat.py:401
-diff -uNrp sos-3.0.orig/po/it.po sos-3.0/po/it.po
---- sos-3.0.orig/po/it.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/it.po	2014-06-21 11:15:36.454724423 -0500
-@@ -181,7 +181,7 @@ msgid "Cannot upload to specified URL."
- msgstr "Impossibile inviare all'URL specificato."
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
- "Si è verificato un problema nell'inviare il report al supporto tecnico Red "
- "Hat."
-diff -uNrp sos-3.0.orig/po/ja.po sos-3.0/po/ja.po
---- sos-3.0.orig/po/ja.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/ja.po	2014-06-21 11:15:36.454724423 -0500
-@@ -185,8 +185,8 @@ msgid "Cannot upload to specified URL."
- msgstr "指定された URL にアップロードできません。"
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
--msgstr "レポートを Red Hat サポートにアップロードするのに問題がありました。"
-+msgid "There was a problem uploading your report to CentOS support."
-+msgstr "レポートを CentOS サポートにアップロードするのに問題がありました。"
- 
- #: ../sos/policyredhat.py:401
- #, python-format
-diff -uNrp sos-3.0.orig/po/ka.po sos-3.0/po/ka.po
---- sos-3.0.orig/po/ka.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/ka.po	2014-06-21 11:15:36.455724415 -0500
-@@ -168,7 +168,7 @@ msgid "Cannot upload to specified URL."
- msgstr ""
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
- 
- #: ../sos/policyredhat.py:401
-diff -uNrp sos-3.0.orig/po/kn.po sos-3.0/po/kn.po
---- sos-3.0.orig/po/kn.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/kn.po	2014-06-21 11:15:36.455724415 -0500
-@@ -185,9 +185,9 @@ msgid "Cannot upload to specified URL."
- msgstr "ಸೂಚಿಸಲಾದ URL ಅನ್ನು ಅಪ್‌ಲೋಡ್ ಮಾಡಲು ಸಾಧ್ಯವಾಗಿಲ್ಲ."
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
--"ನಿಮ್ಮ ವರದಿಯನ್ನು Red Hat ಬೆಂಬಲದ ಸ್ಥಳಕ್ಕೆ ಅಪ್‌ಲೋಡ್ ಮಾಡುವಲ್ಲಿ ಒಂದು ತೊಂದರೆ ಉಂಟಾಗಿದೆ."
-+"ನಿಮ್ಮ ವರದಿಯನ್ನು CentOS ಬೆಂಬಲದ ಸ್ಥಳಕ್ಕೆ ಅಪ್‌ಲೋಡ್ ಮಾಡುವಲ್ಲಿ ಒಂದು ತೊಂದರೆ ಉಂಟಾಗಿದೆ."
- 
- #: ../sos/policyredhat.py:401
- #, python-format
-diff -uNrp sos-3.0.orig/po/ko.po sos-3.0/po/ko.po
---- sos-3.0.orig/po/ko.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/ko.po	2014-06-21 11:17:58.331621414 -0500
-@@ -84,10 +84,10 @@ msgid ""
- "No changes will be made to your system.\n"
- "\n"
- msgstr ""
--"이 유틸리티는 Red Hat Enterprise Linux 시스템의 하드웨어와 \n"
-+"이 유틸리티는 CentOS Enterprise Linux 시스템의 하드웨어와 \n"
- "시스템 설정 사항에 대한 상세 정보를 수집하게 됩니다. 수집된 \n"
- "정보는 지원 담당자에게 보낼 수 있도록 /tmp 디렉토리 안에 \n"
--"아카이브로 저장됩니다. Red Hat은 이 정보를 문제 해결 목적으로만 사용하며 기"
-+"아카이브로 저장됩니다. CentOS은 이 정보를 문제 해결 목적으로만 사용하며 기"
- "밀 정보로 \n"
- "취급할 것입니다. \n"
- "\n"
-@@ -183,8 +183,8 @@ msgid "Cannot upload to specified URL."
- msgstr "지정된 URL에서 업로드할 수 없습니다."
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
--msgstr "리포트를 Red Hat 지원 센터로 업로드하는 데 문제가 발생했습니다."
-+msgid "There was a problem uploading your report to CentOS support."
-+msgstr "리포트를 CentOS 지원 센터로 업로드하는 데 문제가 발생했습니다."
- 
- #: ../sos/policyredhat.py:401
- #, python-format
-diff -uNrp sos-3.0.orig/po/ku.po sos-3.0/po/ku.po
---- sos-3.0.orig/po/ku.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/ku.po	2014-06-21 11:15:36.456724407 -0500
-@@ -168,7 +168,7 @@ msgid "Cannot upload to specified URL."
- msgstr ""
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
- 
- #: ../sos/policyredhat.py:401
-diff -uNrp sos-3.0.orig/po/lo.po sos-3.0/po/lo.po
---- sos-3.0.orig/po/lo.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/lo.po	2014-06-21 11:15:36.457724399 -0500
-@@ -168,7 +168,7 @@ msgid "Cannot upload to specified URL."
- msgstr ""
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
- 
- #: ../sos/policyredhat.py:401
-diff -uNrp sos-3.0.orig/po/lt.po sos-3.0/po/lt.po
---- sos-3.0.orig/po/lt.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/lt.po	2014-06-21 11:15:36.457724399 -0500
-@@ -170,7 +170,7 @@ msgid "Cannot upload to specified URL."
- msgstr ""
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
- 
- #: ../sos/policyredhat.py:401
-diff -uNrp sos-3.0.orig/po/lv.po sos-3.0/po/lv.po
---- sos-3.0.orig/po/lv.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/lv.po	2014-06-21 11:15:36.458724392 -0500
-@@ -170,7 +170,7 @@ msgid "Cannot upload to specified URL."
- msgstr ""
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
- 
- #: ../sos/policyredhat.py:401
-diff -uNrp sos-3.0.orig/po/mk.po sos-3.0/po/mk.po
---- sos-3.0.orig/po/mk.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/mk.po	2014-06-21 11:15:36.459724384 -0500
-@@ -168,7 +168,7 @@ msgid "Cannot upload to specified URL."
- msgstr ""
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
- 
- #: ../sos/policyredhat.py:401
-diff -uNrp sos-3.0.orig/po/ml.po sos-3.0/po/ml.po
---- sos-3.0.orig/po/ml.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/ml.po	2014-06-21 11:15:36.459724384 -0500
-@@ -183,8 +183,8 @@ msgid "Cannot upload to specified URL."
- msgstr "നല്‍കിയിരിക്കുന്ന URL-ലേക്ക് ഫയല്‍ അപ്ലോഡ് ചെയ്യുവാന്‍ സാധ്യമായില്ല "
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
--msgstr "Red Hat-ലേക്ക് നിങ്ങളുടെ റിപ്പോറ്‍ട്ട് അയയ്ക്കുന്നതില്‍ ഏതോ പ്റശ്നം ഉണ്ടായിരിക്കുന്നു."
-+msgid "There was a problem uploading your report to CentOS support."
-+msgstr "CentOS-ലേക്ക് നിങ്ങളുടെ റിപ്പോറ്‍ട്ട് അയയ്ക്കുന്നതില്‍ ഏതോ പ്റശ്നം ഉണ്ടായിരിക്കുന്നു."
- 
- #: ../sos/policyredhat.py:401
- #, python-format
-diff -uNrp sos-3.0.orig/po/mr.po sos-3.0/po/mr.po
---- sos-3.0.orig/po/mr.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/mr.po	2014-06-21 11:15:36.460724376 -0500
-@@ -184,8 +184,8 @@ msgid "Cannot upload to specified URL."
- msgstr "निर्देशीत URL अपलोड करण्यास अशक्य."
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
--msgstr "तुमचा अहवाल Red Hat सपोर्टकडे पाठवतेवेळी अडचण आढळली."
-+msgid "There was a problem uploading your report to CentOS support."
-+msgstr "तुमचा अहवाल CentOS सपोर्टकडे पाठवतेवेळी अडचण आढळली."
- 
- #: ../sos/policyredhat.py:401
- #, python-format
-diff -uNrp sos-3.0.orig/po/ms.po sos-3.0/po/ms.po
---- sos-3.0.orig/po/ms.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/ms.po	2014-06-21 11:15:36.461724368 -0500
-@@ -168,7 +168,7 @@ msgid "Cannot upload to specified URL."
- msgstr ""
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
- 
- #: ../sos/policyredhat.py:401
-diff -uNrp sos-3.0.orig/po/my.po sos-3.0/po/my.po
---- sos-3.0.orig/po/my.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/my.po	2014-06-21 11:15:36.461724368 -0500
-@@ -168,7 +168,7 @@ msgid "Cannot upload to specified URL."
- msgstr ""
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
- 
- #: ../sos/policyredhat.py:401
-diff -uNrp sos-3.0.orig/po/nb.po sos-3.0/po/nb.po
---- sos-3.0.orig/po/nb.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/nb.po	2014-06-21 11:15:36.462724360 -0500
-@@ -170,7 +170,7 @@ msgid "Cannot upload to specified URL."
- msgstr "Kan ikke laste opp til oppgitt URL."
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
- 
- #: ../sos/policyredhat.py:401
-diff -uNrp sos-3.0.orig/po/nds.po sos-3.0/po/nds.po
---- sos-3.0.orig/po/nds.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/nds.po	2014-06-21 11:15:36.462724360 -0500
-@@ -165,7 +165,7 @@ msgid "Cannot upload to specified URL."
- msgstr ""
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
- 
- #: ../sos/policyredhat.py:401
-diff -uNrp sos-3.0.orig/po/nl.po sos-3.0/po/nl.po
---- sos-3.0.orig/po/nl.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/nl.po	2014-06-21 11:15:36.462724360 -0500
-@@ -183,9 +183,9 @@ msgid "Cannot upload to specified URL."
- msgstr "Kan niet naar de opgegeven URL uploaden."
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
--"Er trad een probleem op bij het uploaden van jouw rapport naar Red Hat "
-+"Er trad een probleem op bij het uploaden van jouw rapport naar CentOS "
- "support."
- 
- #: ../sos/policyredhat.py:401
-diff -uNrp sos-3.0.orig/po/nn.po sos-3.0/po/nn.po
---- sos-3.0.orig/po/nn.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/nn.po	2014-06-21 11:15:36.462724360 -0500
-@@ -169,7 +169,7 @@ msgid "Cannot upload to specified URL."
- msgstr ""
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
- 
- #: ../sos/policyredhat.py:401
-diff -uNrp sos-3.0.orig/po/nso.po sos-3.0/po/nso.po
---- sos-3.0.orig/po/nso.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/nso.po	2014-06-21 11:15:36.463724353 -0500
-@@ -168,7 +168,7 @@ msgid "Cannot upload to specified URL."
- msgstr ""
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
- 
- #: ../sos/policyredhat.py:401
-diff -uNrp sos-3.0.orig/po/or.po sos-3.0/po/or.po
---- sos-3.0.orig/po/or.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/or.po	2014-06-21 11:15:36.463724353 -0500
-@@ -188,8 +188,8 @@ msgid "Cannot upload to specified URL."
- msgstr "ଉଲ୍ଲିଖିତ URL କୁ ଧାରଣ କରିପାରିବେ ନାହିଁ।"
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
--msgstr "Red Hat ସହାୟତାରେ ଆପଣଙ୍କର ବିବରଣୀକୁ ଧାରଣ କରିବାରେ ସମସ୍ୟା ଦୋଇଥିଲା।"
-+msgid "There was a problem uploading your report to CentOS support."
-+msgstr "CentOS ସହାୟତାରେ ଆପଣଙ୍କର ବିବରଣୀକୁ ଧାରଣ କରିବାରେ ସମସ୍ୟା ଦୋଇଥିଲା।"
- 
- #: ../sos/policyredhat.py:401
- #, python-format
-diff -uNrp sos-3.0.orig/po/pa.po sos-3.0/po/pa.po
---- sos-3.0.orig/po/pa.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/pa.po	2014-06-21 11:15:36.463724353 -0500
-@@ -184,8 +184,8 @@ msgid "Cannot upload to specified URL."
- msgstr "ਦਿੱਤੇ URL ਤੇ ਅੱਪਲੋਡ ਨਹੀਂ ਕਰ ਸਕਦਾ।"
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
--msgstr "ਤੁਹਾਡੀ ਰਿਪੋਰਟ ਨੂੰ Red Hat ਸਹਿਯੋਗ ਤੇ ਅੱਪਲੋਡ ਕਰਨ ਵੇਲੇ ਗਲਤੀ ਆਈ ਹੈ।"
-+msgid "There was a problem uploading your report to CentOS support."
-+msgstr "ਤੁਹਾਡੀ ਰਿਪੋਰਟ ਨੂੰ CentOS ਸਹਿਯੋਗ ਤੇ ਅੱਪਲੋਡ ਕਰਨ ਵੇਲੇ ਗਲਤੀ ਆਈ ਹੈ।"
- 
- #: ../sos/policyredhat.py:401
- #, python-format
-diff -uNrp sos-3.0.orig/po/pl.po sos-3.0/po/pl.po
---- sos-3.0.orig/po/pl.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/pl.po	2014-06-21 11:15:36.463724353 -0500
-@@ -179,10 +179,10 @@ msgid "Cannot upload to specified URL."
- msgstr "Nie można wysłać na podany adres URL."
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
- "Wystąpił problem podczas wysyłania raportu do wsparcia technicznego firmy "
--"Red Hat."
-+"CentOS."
- 
- #: ../sos/policyredhat.py:401
- #, python-format
-diff -uNrp sos-3.0.orig/po/pt_BR.po sos-3.0/po/pt_BR.po
---- sos-3.0.orig/po/pt_BR.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/pt_BR.po	2014-06-21 11:15:36.463724353 -0500
-@@ -182,8 +182,8 @@ msgid "Cannot upload to specified URL."
- msgstr "Não foi possível enviar para a URL especificada."
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
--msgstr "Houve um problema ao enviar o seu relatório para o suporte da Red Hat."
-+msgid "There was a problem uploading your report to CentOS support."
-+msgstr "Houve um problema ao enviar o seu relatório para o suporte da CentOS."
- 
- #: ../sos/policyredhat.py:401
- #, python-format
-diff -uNrp sos-3.0.orig/po/pt.po sos-3.0/po/pt.po
---- sos-3.0.orig/po/pt.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/pt.po	2014-06-21 11:15:36.463724353 -0500
-@@ -185,8 +185,8 @@ msgid "Cannot upload to specified URL."
- msgstr "Não foi possível submeter para o URL especificado."
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
--msgstr "Ocorreu um erro ao submeter o seu relatório para o suporte Red Hat."
-+msgid "There was a problem uploading your report to CentOS support."
-+msgstr "Ocorreu um erro ao submeter o seu relatório para o suporte CentOS."
- 
- #: ../sos/policyredhat.py:401
- #, python-format
-diff -uNrp sos-3.0.orig/po/ro.po sos-3.0/po/ro.po
---- sos-3.0.orig/po/ro.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/ro.po	2014-06-21 11:15:36.464724345 -0500
-@@ -168,7 +168,7 @@ msgid "Cannot upload to specified URL."
- msgstr ""
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
- 
- #: ../sos/policyredhat.py:401
-diff -uNrp sos-3.0.orig/po/ru.po sos-3.0/po/ru.po
---- sos-3.0.orig/po/ru.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/ru.po	2014-06-21 11:15:36.464724345 -0500
-@@ -186,9 +186,9 @@ msgid "Cannot upload to specified URL."
- msgstr "Не удалось отправить файл."
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
--"Произошла ошибка при попытке отправить отчёт в службу поддержки Red Hat."
-+"Произошла ошибка при попытке отправить отчёт в службу поддержки CentOS."
- 
- #: ../sos/policyredhat.py:401
- #, python-format
-diff -uNrp sos-3.0.orig/po/si.po sos-3.0/po/si.po
---- sos-3.0.orig/po/si.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/si.po	2014-06-21 11:15:36.464724345 -0500
-@@ -85,11 +85,11 @@ msgid ""
- "No changes will be made to your system.\n"
- "\n"
- msgstr ""
--"මෙම උපයෝගි තාවය දෘඩාංග පිළිබදව සවිස්තරාත්මක තොරතුරු රැස්කරණ අතර ඔබගේ  Red Hat "
-+"මෙම උපයෝගි තාවය දෘඩාංග පිළිබදව සවිස්තරාත්මක තොරතුරු රැස්කරණ අතර ඔබගේ  CentOS "
- "Enterprise Linux  පද්ධතිය පිහිටවනු ලැබේ.\n"
- "රැස් කළ තොරතුරු සහ සංරක්‍ෂකය /tmp යටතේ ඇසුරුම් ගත කර ඇති අතර ඔබට එය සහායක නියෝජිත වෙත "
- "යැවිය හැක.\n"
--"Red Hat මෙම තොරතුරු  භාවිතා කරන්නේ දෝෂ විනිශ්චය පමණක් වන අතර එම තොරතුරු රහසිගත තොරතුරු "
-+"CentOS මෙම තොරතුරු  භාවිතා කරන්නේ දෝෂ විනිශ්චය පමණක් වන අතර එම තොරතුරු රහසිගත තොරතුරු "
- "ලෙස සළකණු ලබයි.\n"
- "\n"
- "මෙම ක්‍රියාව නිම වීමට වේලාවක් ගතවනු ඇත.\n"
-@@ -184,13 +184,13 @@ msgid "Cannot upload to specified URL."
- msgstr "දක්වන ලඳ URL වෙත ලබා දිය නොහැක."
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
--msgstr "ඔබගේ වාර්තාව Red Hat සහය වෙතට ලබා දිමේදි දෝෂයල් ඇති විය."
-+msgid "There was a problem uploading your report to CentOS support."
-+msgstr "ඔබගේ වාර්තාව CentOS සහය වෙතට ලබා දිමේදි දෝෂයල් ඇති විය."
- 
- #: ../sos/policyredhat.py:401
- #, fuzzy, python-format
- msgid "Your report was successfully uploaded to %s with name:"
--msgstr "ඔබගේ වාර්තාව සාර්තකව Red Hat's ftp සේවාදායකයට ලබාදුන් අතර නම වූයේ:"
-+msgstr "ඔබගේ වාර්තාව සාර්තකව CentOS's ftp සේවාදායකයට ලබාදුන් අතර නම වූයේ:"
- 
- #: ../sos/policyredhat.py:404
- msgid "Please communicate this name to your support representative."
-diff -uNrp sos-3.0.orig/po/sk.po sos-3.0/po/sk.po
---- sos-3.0.orig/po/sk.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/sk.po	2014-06-21 11:15:36.464724345 -0500
-@@ -182,8 +182,8 @@ msgid "Cannot upload to specified URL."
- msgstr "Nie je možné odoslať na zadanú adresu URL."
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
--msgstr "Nastal problém pri odosielaní vašej správy na podporu Red Hat."
-+msgid "There was a problem uploading your report to CentOS support."
-+msgstr "Nastal problém pri odosielaní vašej správy na podporu CentOS."
- 
- #: ../sos/policyredhat.py:401
- #, python-format
-diff -uNrp sos-3.0.orig/po/sl.po sos-3.0/po/sl.po
---- sos-3.0.orig/po/sl.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/sl.po	2014-06-21 11:15:36.464724345 -0500
-@@ -170,7 +170,7 @@ msgid "Cannot upload to specified URL."
- msgstr ""
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
- 
- #: ../sos/policyredhat.py:401
-diff -uNrp sos-3.0.orig/po/sos.pot sos-3.0/po/sos.pot
---- sos-3.0.orig/po/sos.pot	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/sos.pot	2014-06-21 11:15:36.464724345 -0500
-@@ -169,7 +169,7 @@ msgid "Cannot upload to specified URL."
- msgstr ""
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
- 
- #: ../sos/policyredhat.py:401
-diff -uNrp sos-3.0.orig/po/sq.po sos-3.0/po/sq.po
---- sos-3.0.orig/po/sq.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/sq.po	2014-06-21 11:15:36.464724345 -0500
-@@ -168,7 +168,7 @@ msgid "Cannot upload to specified URL."
- msgstr ""
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
- 
- #: ../sos/policyredhat.py:401
-diff -uNrp sos-3.0.orig/po/sr@latin.po sos-3.0/po/sr@latin.po
---- sos-3.0.orig/po/sr@latin.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/sr@latin.po	2014-06-21 11:15:36.465724337 -0500
-@@ -182,8 +182,8 @@ msgid "Cannot upload to specified URL."
- msgstr "Ne mogu da pošaljem na navedeni URL."
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
--msgstr "Pojavio se problem pri slanju vašeg izveštaja Red Hat podršci."
-+msgid "There was a problem uploading your report to CentOS support."
-+msgstr "Pojavio se problem pri slanju vašeg izveštaja CentOS podršci."
- 
- #: ../sos/policyredhat.py:401
- #, python-format
-diff -uNrp sos-3.0.orig/po/sr.po sos-3.0/po/sr.po
---- sos-3.0.orig/po/sr.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/sr.po	2014-06-21 11:15:36.465724337 -0500
-@@ -182,8 +182,8 @@ msgid "Cannot upload to specified URL."
- msgstr "Не могу да пошаљем на наведени УРЛ."
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
--msgstr "Појавио се проблем при слању вашег извештаја Red Hat подршци."
-+msgid "There was a problem uploading your report to CentOS support."
-+msgstr "Појавио се проблем при слању вашег извештаја CentOS подршци."
- 
- #: ../sos/policyredhat.py:401
- #, python-format
-diff -uNrp sos-3.0.orig/po/sv.po sos-3.0/po/sv.po
---- sos-3.0.orig/po/sv.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/sv.po	2014-06-21 11:15:36.465724337 -0500
-@@ -185,8 +185,8 @@ msgid "Cannot upload to specified URL."
- msgstr "Kan inte skicka till angiven URL."
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
--msgstr "Ett problem uppstod när din rapport skickades till Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
-+msgstr "Ett problem uppstod när din rapport skickades till CentOS support."
- 
- #: ../sos/policyredhat.py:401
- #, python-format
-diff -uNrp sos-3.0.orig/po/ta.po sos-3.0/po/ta.po
---- sos-3.0.orig/po/ta.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/ta.po	2014-06-21 11:15:36.465724337 -0500
-@@ -188,8 +188,8 @@ msgid "Cannot upload to specified URL."
- msgstr "குறிப்பிட்ட இணைய முகவரியில் ஏற்ற முடியவில்லை."
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
--msgstr "உங்கள் அறிக்கையை Red Hat சேவைக்கு அனுப்புவதில் சிக்கல்."
-+msgid "There was a problem uploading your report to CentOS support."
-+msgstr "உங்கள் அறிக்கையை CentOS சேவைக்கு அனுப்புவதில் சிக்கல்."
- 
- #: ../sos/policyredhat.py:401
- #, python-format
-diff -uNrp sos-3.0.orig/po/te.po sos-3.0/po/te.po
---- sos-3.0.orig/po/te.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/te.po	2014-06-21 11:15:36.465724337 -0500
-@@ -185,8 +185,8 @@ msgid "Cannot upload to specified URL."
- msgstr "తెలుపబడిన URLకు అప్‌లోడ్ చేయలేదు."
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
--msgstr "మీ సమస్యను Red Hat మద్దతునకు అప్‌లోడు చేయుటలో వొక సమస్యవుంది."
-+msgid "There was a problem uploading your report to CentOS support."
-+msgstr "మీ సమస్యను CentOS మద్దతునకు అప్‌లోడు చేయుటలో వొక సమస్యవుంది."
- 
- #: ../sos/policyredhat.py:401
- #, python-format
-diff -uNrp sos-3.0.orig/po/th.po sos-3.0/po/th.po
---- sos-3.0.orig/po/th.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/th.po	2014-06-21 11:18:12.876508348 -0500
-@@ -84,9 +84,9 @@ msgid ""
- "\n"
- msgstr ""
- "เครื่องมือนี้จะเก็บข้อมูลโดยละเอียดเกี่ยวกับฮาร์ดแวร์และการตั้งค่า\n"
--"ระบบ Red Hat Enterprise Linux ของคุณ ข้อมูลจะถูกเก็บและ\n"
-+"ระบบ CentOS Enterprise Linux ของคุณ ข้อมูลจะถูกเก็บและ\n"
- "สร้างเป็นไฟล์ที่ /tmp ซึ่งคุณสามารถส่งไปยังผู้สนับสนุนได้\n"
--"Red Hat จะใช้ข้อมูลนี้ในการแก้ไขปัญหาเท่านั้น และจะถือว่าเป็น\n"
-+"CentOS จะใช้ข้อมูลนี้ในการแก้ไขปัญหาเท่านั้น และจะถือว่าเป็น\n"
- "ความลับ\n"
- "\n"
- "กระบวนการนี้อาจจะใช้เวลาสักครู่ในการทำงาน จะไม่มีการแก้ไข\n"
-@@ -180,13 +180,13 @@ msgid "Cannot upload to specified URL."
- msgstr "ไม่สามารถอัพโหลดไปยัง URL ที่ระบุ"
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
--msgstr "มีปัญหาในการอัพโหลดรายงานของคุณไปยังฝ่ายสนับสนุน Red Hat"
-+msgid "There was a problem uploading your report to CentOS support."
-+msgstr "มีปัญหาในการอัพโหลดรายงานของคุณไปยังฝ่ายสนับสนุน CentOS"
- 
- #: ../sos/policyredhat.py:401
- #, fuzzy, python-format
- msgid "Your report was successfully uploaded to %s with name:"
--msgstr "รายงานของคุณได้ถูกส่งไปยังเซิร์ฟเวอร์ ftp ของ Red Hat ในชื่อ:"
-+msgstr "รายงานของคุณได้ถูกส่งไปยังเซิร์ฟเวอร์ ftp ของ CentOS ในชื่อ:"
- 
- #: ../sos/policyredhat.py:404
- msgid "Please communicate this name to your support representative."
-diff -uNrp sos-3.0.orig/po/tr.po sos-3.0/po/tr.po
---- sos-3.0.orig/po/tr.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/tr.po	2014-06-21 11:15:36.466724329 -0500
-@@ -185,8 +185,8 @@ msgid "Cannot upload to specified URL."
- msgstr "Belirtilen URL 'ye yükleme yapılamadı."
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
--msgstr "Raporunuz Red Hat desteğe yüklenirken bir sorunla karşılaşıldı."
-+msgid "There was a problem uploading your report to CentOS support."
-+msgstr "Raporunuz CentOS desteğe yüklenirken bir sorunla karşılaşıldı."
- 
- #: ../sos/policyredhat.py:401
- #, python-format
-diff -uNrp sos-3.0.orig/po/uk.po sos-3.0/po/uk.po
---- sos-3.0.orig/po/uk.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/uk.po	2014-06-21 11:15:36.466724329 -0500
-@@ -183,8 +183,8 @@ msgid "Cannot upload to specified URL."
- msgstr "Не вдається надіслати файл."
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
--msgstr "Виникла помилка при спробі надіслати звіт до служби підтримки Red Hat."
-+msgid "There was a problem uploading your report to CentOS support."
-+msgstr "Виникла помилка при спробі надіслати звіт до служби підтримки CentOS."
- 
- #: ../sos/policyredhat.py:401
- #, python-format
-diff -uNrp sos-3.0.orig/po/ur.po sos-3.0/po/ur.po
---- sos-3.0.orig/po/ur.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/ur.po	2014-06-21 11:15:36.466724329 -0500
-@@ -168,7 +168,7 @@ msgid "Cannot upload to specified URL."
- msgstr ""
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
- 
- #: ../sos/policyredhat.py:401
-diff -uNrp sos-3.0.orig/po/vi.po sos-3.0/po/vi.po
---- sos-3.0.orig/po/vi.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/vi.po	2014-06-21 11:15:36.466724329 -0500
-@@ -169,7 +169,7 @@ msgid "Cannot upload to specified URL."
- msgstr ""
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
- 
- #: ../sos/policyredhat.py:401
-diff -uNrp sos-3.0.orig/po/zh_CN.po sos-3.0/po/zh_CN.po
---- sos-3.0.orig/po/zh_CN.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/zh_CN.po	2014-06-21 11:15:36.466724329 -0500
-@@ -184,7 +184,7 @@ msgid "Cannot upload to specified URL."
- msgstr "无法上传到指定的网址。"
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr "在将您的报告上传到红帽支持时出错。"
- 
- #: ../sos/policyredhat.py:401
-diff -uNrp sos-3.0.orig/po/zh_TW.po sos-3.0/po/zh_TW.po
---- sos-3.0.orig/po/zh_TW.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/zh_TW.po	2014-06-21 11:15:36.466724329 -0500
-@@ -180,8 +180,8 @@ msgid "Cannot upload to specified URL."
- msgstr "無法上傳指定的網址。"
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
--msgstr "將報告上傳至 Red Hat 技術支援時,出現問題。"
-+msgid "There was a problem uploading your report to CentOS support."
-+msgstr "將報告上傳至 CentOS 技術支援時,出現問題。"
- 
- #: ../sos/policyredhat.py:401
- #, python-format
-diff -uNrp sos-3.0.orig/po/zu.po sos-3.0/po/zu.po
---- sos-3.0.orig/po/zu.po	2013-06-10 12:35:56.000000000 -0500
-+++ sos-3.0/po/zu.po	2014-06-21 11:15:36.467724321 -0500
-@@ -168,7 +168,7 @@ msgid "Cannot upload to specified URL."
- msgstr ""
- 
- #: ../sos/policyredhat.py:399
--msgid "There was a problem uploading your report to Red Hat support."
-+msgid "There was a problem uploading your report to CentOS support."
- msgstr ""
- 
- #: ../sos/policyredhat.py:401
-diff -uNrp sos-3.2.orig/sos/plugins/cluster.py sos-3.2/sos/plugins/cluster.py
---- sos-3.2.orig/sos/plugins/cluster.py	2014-09-30 12:38:28.000000000 -0500
-+++ sos-3.2/sos/plugins/cluster.py	2015-03-09 14:58:02.982869116 -0500
-@@ -19,7 +19,7 @@ from datetime import datetime, timedelta
- 
- 
- class Cluster(Plugin, RedHatPlugin):
--    """Red Hat Cluster Suite and GFS
-+    """Cluster Suite and GFS
-     """
- 
-     plugin_name = 'cluster'
-diff -uNrp sos-3.2.orig/sos/plugins/cs.py sos-3.2/sos/plugins/cs.py
---- sos-3.2.orig/sos/plugins/cs.py	2014-09-30 12:38:28.000000000 -0500
-+++ sos-3.2/sos/plugins/cs.py	2015-03-09 14:58:20.085778645 -0500
-@@ -54,7 +54,7 @@ class CertificateSystem(Plugin, RedHatPl
-     def setup(self):
-         csversion = self.checkversion()
-         if not csversion:
--            self.add_alert("Red Hat Certificate System not found.")
-+            self.add_alert("Certificate System not found.")
-             return
-         if csversion == 71:
-             self.add_copy_spec([
-diff -uNrp sos-3.2.orig/sos/plugins/hts.py sos-3.2/sos/plugins/hts.py
---- sos-3.2.orig/sos/plugins/hts.py	2014-09-30 12:38:28.000000000 -0500
-+++ sos-3.2/sos/plugins/hts.py	2015-03-09 14:58:36.973689309 -0500
-@@ -16,7 +16,7 @@ from sos.plugins import Plugin, RedHatPl
- 
- 
- class HardwareTestSuite(Plugin, RedHatPlugin):
--    """Red Hat Hardware Test Suite
-+    """Hardware Test Suite
-     """
- 
-     plugin_name = 'hardwaretestsuite'
-diff -uNrp sos-3.2.orig/sos/plugins/__init__.py sos-3.2/sos/plugins/__init__.py
---- sos-3.2.orig/sos/plugins/__init__.py	2015-03-09 14:50:34.162237962 -0500
-+++ sos-3.2/sos/plugins/__init__.py	2015-03-09 14:58:56.861584108 -0500
-@@ -732,7 +732,7 @@ class Plugin(object):
- 
- 
- class RedHatPlugin(object):
--    """Tagging class to indicate that this plugin works with Red Hat Linux"""
-+    """Tagging class to indicate that this plugin works with CentOS Linux"""
-     pass
- 
- 
-diff -uNrp sos-3.2.orig/sos/plugins/rhui.py sos-3.2/sos/plugins/rhui.py
---- sos-3.2.orig/sos/plugins/rhui.py	2014-09-30 12:38:28.000000000 -0500
-+++ sos-3.2/sos/plugins/rhui.py	2015-03-09 14:59:16.909478057 -0500
-@@ -16,7 +16,7 @@ from sos.plugins import Plugin, RedHatPl
- 
- 
- class Rhui(Plugin, RedHatPlugin):
--    """Red Hat Update Infrastructure
-+    """Update Infrastructure
-     """
- 
-     plugin_name = 'rhui'
-diff -uNrp sos-3.2.orig/sos/policies/redhat.py sos-3.2/sos/policies/redhat.py
---- sos-3.2.orig/sos/policies/redhat.py	2014-09-30 12:38:28.000000000 -0500
-+++ sos-3.2/sos/policies/redhat.py	2015-03-09 14:56:04.383496495 -0500
-@@ -33,9 +33,9 @@ except:
- 
- 
- class RedHatPolicy(LinuxPolicy):
--    distro = "Red Hat"
--    vendor = "Red Hat"
--    vendor_url = "http://www.redhat.com/"
-+    distro = "CentOS"
-+    vendor = "CentOS"
-+    vendor_url = "http://www.centos.org/"
-     _tmp_dir = "/var/tmp"
- 
-     def __init__(self):
-@@ -57,9 +57,9 @@ class RedHatPolicy(LinuxPolicy):
- 
-     @classmethod
-     def check(self):
--        """This method checks to see if we are running on Red Hat. It must be
-+        """This method checks to see if we are running on CentOS. It must be
-         overriden by concrete subclasses to return True when running on a
--        Fedora, RHEL or other Red Hat distribution or False otherwise."""
-+        CentOS or False otherwise."""
-         return False
- 
-     def runlevel_by_service(self, name):
-@@ -94,9 +94,9 @@ class RedHatPolicy(LinuxPolicy):
- 
- 
- class RHELPolicy(RedHatPolicy):
--    distro = "Red Hat Enterprise Linux"
--    vendor = "Red Hat"
--    vendor_url = "https://access.redhat.com/support/"
-+    distro = "CentOS Linux"
-+    vendor = "CentOS"
-+    vendor_url = "https://www.centos.org/"
-     msg = _("""\
- This command will collect diagnostic and configuration \
- information from this %(distro)s system and installed \
diff --git a/SPECS/sos.spec b/SPECS/sos.spec
index 1613b1e..aea87b6 100644
--- a/SPECS/sos.spec
+++ b/SPECS/sos.spec
@@ -1,7 +1,7 @@
 %{!?python_sitelib: %define python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")}
 Summary: A set of tools to gather troubleshooting information from a system
 Name: sos
-Version: 3.5
+Version: 3.6
 Release: 9%{?dist}
 Group: Applications/System
 Source0: https://github.com/sosreport/sos/archive/%{version}.tar.gz
@@ -15,28 +15,21 @@ Requires: libxml2-python
 Requires: python-six
 Requires: bzip2
 Requires: xz
+Requires: python2-futures
 Obsoletes: sos-plugins-openstack
 Patch0: skip-generating-doc.patch
-Patch1: sos-bz1509079-vdo.patch
-Patch2: sos-bz1506908-openstack-containerized.patch
-Patch3: sos-bz1483414-opendaylight-plugin.patch
-Patch4: sos-bz1491042-keystone-domains.patch
-Patch5: sos-bz1519267-haproxy-etcd-tracebacks.patch
-Patch6: sos-bz1463509-oc-adm-diagnostics.patch
-Patch7: sos-bz1494420-postgresql-scl-path.patch
-Patch8: sos-bz1353873-pcp-logsize.patch
-Patch9: sos-bz1517767-osp-ironic.patch
-Patch10: sos-bz1539038-etcd-private-keys.patch
-Patch11: sos-bz1535390-ipa-logs.patch
-Patch12: sos-bz1525620-rabbitmq-osp12-containerized.patch
-Patch13: sos-bz1568960-ovirt-provider-ovn.patch
-Patch14: sos-bz1568884-kernel-dont-collect-timer.patch
-Patch15: sos-bz1568882-openstack-octavia-plugin.patch
-Patch16: sos-bz1580526-docker-backport.patch
-Patch17: sos-bz1580525-ovn-plugins.patch
-Patch18: sos-bz1584548-traceback-memory.patch
-Patch19: sos-centos-branding.patch
-
+Patch1: sos-bz1474976-regexp-sub.patch
+Patch2: sos-bz1594327-archive-encryption.patch
+Patch3: sos-bz1597532-stat-isblk.patch
+Patch4: sos-bz1596494-cds-on-rhui3.patch
+Patch5: sos-bz1609135-ceph-dont-collect-tmp-mnt.patch
+Patch6: sos-bz1608384-archive-name-sanitize.patch
+Patch7: sos-bz1613806-rhosp-lsof-optional.patch
+Patch8: sos-bz1600158-rhv-log-collector-analyzer.patch
+Patch9: sos-bz1616030-etcd-kube-osp-3-10.patch
+Patch10: sos-bz1624043-symlinks-not-copied.patch
+Patch11: sos-bz1626159-atomic-attribute-error.patch
+Patch12: sos-bz1623070-pipe-returncode.patch
 
 %description
 Sos is a set of tools that gathers information about system
@@ -59,13 +52,6 @@ support technicians and developers.
 %patch10 -p1
 %patch11 -p1
 %patch12 -p1
-%patch13 -p1
-%patch14 -p1
-%patch15 -p1
-%patch16 -p1
-%patch17 -p1
-%patch18 -p1
-%patch19 -p1
 
 %build
 make
@@ -89,26 +75,74 @@ rm -rf ${RPM_BUILD_ROOT}
 %config(noreplace) %{_sysconfdir}/sos.conf
 
 %changelog
-* Tue Jun 26 2018 CentOS Sources <bugs@centos.org> - 3.5-9.el7.centos
-- Roll in CentOS Branding
+* Fri Sep 14 2018 Pavel Moravec <pmoravec@redhat.com> = 3.6-9
+- [archive] recursive symlink fix and simplify directory destination
+  Resolves: bz1624043
+
+* Thu Sep 13 2018 Pavel Moravec <pmoravec@redhat.com> = 3.6-8
+- [plugin,archive] fix remaining add_link issues
+  Resolves: bz1624043
+
+* Tue Sep 11 2018 Pavel Moravec <pmoravec@redhat.com> = 3.6-7
+- [archive] fix copy&paste error in link_path
+  Resolves: bz1624043
+
+* Mon Sep 10 2018 Pavel Moravec <pmoravec@redhat.com> = 3.6-6
+- [archive] fix leading path creation
+  Resolves: bz1624043
+- [atomic] Define valid preset for RHEL Atomic
+  Resolves: bz1626159
+- [utilities] wait till AsyncReader p.poll() returns None
+  Resolves: bz1623070
+
+* Tue Aug 21 2018 Pavel Moravec <pmoravec@redhat.com> = 3.6-5
+- [rhv-log-collector-analyzer] Add new plugin for RHV
+  Resolves: bz1600158
+- [kubernetes|etcd] Support OpenShift 3.10 deployments
+  Resolves: bz1616030
+
+* Fri Aug 10 2018 Pavel Moravec <pmoravec@redhat.com> = 3.6-4
+- [apparmor,ceph] fix typo in add_forbidden_path
+  Resolves: bz1609135
+- [policies] sanitize report label
+  Resolves: bz1608384
+- [policies,process] make lsof execution optional, dont call on RHOSP
+  Resolves: bz1613806
+
+* Thu Jul 12 2018 Pavel Moravec <pmoravec@redhat.com> = 3.6-3
+- [sosreport] Add mechanism to encrypt final archive
+  Resolves: bz1594327
+- [archive] fix stat typo
+  Resolves: bz1597532
+- [rhui] Fix detection of CDS for RHUI3
+  Resolves: bz1596494
+
+* Mon Jul 02 2018 Pavel Moravec <pmoravec@redhat.com> = 3.6-2
+- [archive] fix add_string()/do_*_sub() regression
+  Resolves: bz1474976
+- [kernel] handle case when bpftool installed but not implemented
+  Resolves: bz1559756
+
+* Fri Jun 22 2018 Pavel Moravec <pmoravec@redhat.com> = 3.6-1
+- New upstream release sos-3.6
 
 * Thu May 31 2018 Pavel Moravec <pmoravec@redhat.com> = 3.5-9
 - [logs] collect journalctl verbosed logs with --all-logs only
-  Resolves: bz1584548
+  Resolves: bz1183244
 
 * Mon May 21 2018 Pavel Moravec <pmoravec@redhat.com> = 3.5-8
 - [docker] backport three container related patches
-  Resolves: bz1580526
+  Resolves: bz1573907
 - [ovn] add two OpenVSwitch plugins
-  Resoles: bz1580525
+  Resoles: bz1560845
 
 * Wed Apr 18 2018 Pavel Moravec <pmoravec@redhat.com> = 3.5-7
 - [kernel] Disable gathering /proc/timer* statistics
-  Resolves: bz1568884
+  Resolves: bz1566933
 - [openstack_octavia] Add new plugin
-  Resolves: bz1568882
+  Resolves: bz1541100
 - [ovirt-provider-ovn] A new plugin
-  Resolves: bz1568960
+  Resolves: bz1547544
 
 * Tue Feb 13 2018 Pavel Moravec <pmoravec@redhat.com> = 3.5-6
 - [ipa] set ipa_version variable before referencing it