Blob Blame History Raw
From e63c17d902f99d96cbd4cb2a06d9cbbf8a4d4c18 Mon Sep 17 00:00:00 2001
From: Martin Schuppert <mschuppert@redhat.com>
Date: Tue, 7 Nov 2017 18:07:47 +0100
Subject: [PATCH] [openstack_nova] added missing nova container config

Tripleo Pike opinionated config+log paths to be collected
for services, when running in containers.

The nova configuration for the nova and placement container
was included, but the nova configuration libvirt container
was missing. Also the httpd configs for the nova contaier
were added.

This is a change to #1130

Signed-off-by: Martin Schuppert mschuppe@redhat.com
---
 sos/plugins/openstack_nova.py | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/sos/plugins/openstack_nova.py b/sos/plugins/openstack_nova.py
index 1fbfa76a..cdd29760 100644
--- a/sos/plugins/openstack_nova.py
+++ b/sos/plugins/openstack_nova.py
@@ -87,7 +87,10 @@ class OpenStackNova(Plugin):
             "/etc/nova/",
             self.var_puppet_gen + "/etc/nova/",
             self.var_puppet_gen + "/etc/my.cnf.d/tripleo.cnf",
-            self.var_puppet_gen + "_placement/var/spool/cron/nova",
+            self.var_puppet_gen + "/var/spool/cron/nova",
+            self.var_puppet_gen + "/etc/httpd/conf/",
+            self.var_puppet_gen + "/etc/httpd/conf.d/",
+            self.var_puppet_gen + "/etc/httpd/conf.modules.d/*.conf",
             self.var_puppet_gen + "_placement/etc/nova/",
             self.var_puppet_gen + "_placement/etc/httpd/conf/",
             self.var_puppet_gen + "_placement/etc/httpd/conf.d/",
@@ -96,6 +99,7 @@ class OpenStackNova(Plugin):
             self.var_puppet_gen + "/../memcached/etc/sysconfig/memcached",
             self.var_puppet_gen + "_libvirt/etc/libvirt/",
             self.var_puppet_gen + "_libvirt/etc/my.cnf.d/tripleo.cnf",
+            self.var_puppet_gen + "_libvirt/etc/nova/",
             self.var_puppet_gen + "_libvirt/etc/nova/migration/"
             "authorized_keys",
             self.var_puppet_gen + "_libvirt/var/lib/nova/.ssh/config",
-- 
2.13.6

From 410733862a1f5ea1f9666d1fa41a7b5d3390e3c6 Mon Sep 17 00:00:00 2001
From: Martin Schuppert <mschuppert@redhat.com>
Date: Wed, 8 Nov 2017 17:57:54 +0100
Subject: [PATCH] [openstack_[glance|heat|cinder|nova]] limit command run

Collect "glance-manage db_version" and similar commands from the
four plugins only if the relevant services or containers are
running. Otherwise the commands get stuck and timeout.

This is an enhancement to #1124 to check for containers + do the
same for nova + cinder.

Signed-off-by: Martin Schuppert mschuppe@redhat.com

Edited to remove use of shell syntax.

Fixes: #1139

Signed-off-by: Bryn M. Reeves <bmr@redhat.com>
---
 sos/plugins/openstack_cinder.py | 30 ++++++++++---
 sos/plugins/openstack_glance.py | 33 +++++++++++----
 sos/plugins/openstack_heat.py   | 23 ++++++++--
 sos/plugins/openstack_nova.py   | 94 +++++++++++++++++++++++++++--------------
 4 files changed, 129 insertions(+), 51 deletions(-)

diff --git a/sos/plugins/openstack_cinder.py b/sos/plugins/openstack_cinder.py
index abfd267b..a023105c 100644
--- a/sos/plugins/openstack_cinder.py
+++ b/sos/plugins/openstack_cinder.py
@@ -27,16 +27,34 @@ class OpenStackCinder(Plugin):
     plugin_name = "openstack_cinder"
     profiles = ('openstack', 'openstack_controller')
 
-    option_list = [("db", "gathers openstack cinder db version", "slow",
-                    False)]
-
     var_puppet_gen = "/var/lib/config-data/puppet-generated/cinder"
 
     def setup(self):
-        if self.get_option("db"):
+
+        # collect commands output only if the openstack-cinder-api service
+        # is running
+        service_status = self.get_command_output(
+            "systemctl status openstack-cinder-api.service"
+        )
+
+        container_status = self.get_command_output("docker ps")
+        in_container = False
+        if container_status['status'] == 0:
+            for line in container_status['output'].splitlines():
+                if line.endswith("cinder_api"):
+                    in_container = True
+
+        if (service_status['status'] == 0) or in_container:
+            cinder_config = ""
+            # if containerized we need to pass the config to the cont.
+            if in_container:
+                cinder_config = "--config-dir " + self.var_puppet_gen + \
+                                "/etc/cinder/"
+
             self.add_cmd_output(
-                "cinder-manage db version",
-                suggest_filename="cinder_db_version")
+                "cinder-manage " + cinder_config + " db version",
+                suggest_filename="cinder_db_version"
+            )
 
         self.add_copy_spec([
             "/etc/cinder/",
diff --git a/sos/plugins/openstack_glance.py b/sos/plugins/openstack_glance.py
index fdd789a8..4cdc6dc6 100644
--- a/sos/plugins/openstack_glance.py
+++ b/sos/plugins/openstack_glance.py
@@ -54,22 +54,37 @@ class OpenStackGlance(Plugin):
         if self.get_option("verify"):
             self.add_cmd_output("rpm -V %s" % ' '.join(self.packages))
 
-        vars_all = [p in os.environ for p in [
-                    'OS_USERNAME', 'OS_PASSWORD']]
-
-        vars_any = [p in os.environ for p in [
-                    'OS_TENANT_NAME', 'OS_PROJECT_NAME']]
-
         # collect commands output only if the openstack-glance-api service
         # is running
         service_status = self.get_command_output(
-                "systemctl status openstack-glance-api.service"
+            "systemctl status openstack-glance-api.service"
         )
-        if service_status['status'] == 0:
+
+        container_status = self.get_command_output("docker ps")
+        in_container = False
+        if container_status['status'] == 0:
+            for line in container_status['output'].splitlines():
+                if line.endswith("cinder_api"):
+                    in_container = True
+
+        if (service_status['status'] == 0) or in_container:
+            glance_config = ""
+            # if containerized we need to pass the config to the cont.
+            if in_container:
+                glance_config = "--config-dir " + self.var_puppet_gen + \
+                                "/etc/glance/"
+
             self.add_cmd_output(
-                "glance-manage db_version",
+                "glance-manage " + glance_config + " db_version",
                 suggest_filename="glance_db_version"
             )
+
+            vars_all = [p in os.environ for p in [
+                        'OS_USERNAME', 'OS_PASSWORD']]
+
+            vars_any = [p in os.environ for p in [
+                        'OS_TENANT_NAME', 'OS_PROJECT_NAME']]
+
             if not (all(vars_all) and any(vars_any)):
                 self.soslog.warning("Not all environment variables set. "
                                     "Source the environment file for the user "
diff --git a/sos/plugins/openstack_heat.py b/sos/plugins/openstack_heat.py
index de34ed15..e3395fab 100644
--- a/sos/plugins/openstack_heat.py
+++ b/sos/plugins/openstack_heat.py
@@ -32,11 +32,26 @@ class OpenStackHeat(Plugin):
 
         # collect commands output only if the openstack-heat-api service
         # is running
-        service_status = self.get_command_output("systemctl status "
-                                                 "openstack-heat-api.service")
-        if service_status['status'] == 0:
+        service_status = self.get_command_output(
+            "systemctl status openstack-heat-api.service"
+        )
+
+        container_status = self.get_command_output("docker ps")
+        in_container = False
+        if container_status['status'] == 0:
+            for line in container_status['output'].splitlines():
+                if line.endswith("cinder_api"):
+                    in_container = True
+
+        if (service_status['status'] == 0) or in_container:
+            heat_config = ""
+            # if containerized we need to pass the config to the cont.
+            if in_container:
+                heat_config = "--config-dir " + self.var_puppet_gen + \
+                                "_api/etc/heat/"
+
             self.add_cmd_output(
-                "heat-manage db_version",
+                "heat-manage " + heat_config + " db_version",
                 suggest_filename="heat_db_version"
             )
 
diff --git a/sos/plugins/openstack_nova.py b/sos/plugins/openstack_nova.py
index cdd29760..e8839a2a 100644
--- a/sos/plugins/openstack_nova.py
+++ b/sos/plugins/openstack_nova.py
@@ -32,40 +32,70 @@ class OpenStackNova(Plugin):
     var_puppet_gen = "/var/lib/config-data/puppet-generated/nova"
 
     def setup(self):
-        # commands we do not need to source the environment file
-        self.add_cmd_output("nova-manage db version")
-        self.add_cmd_output("nova-manage fixed list")
-        self.add_cmd_output("nova-manage floating list")
 
-        vars_all = [p in os.environ for p in [
-                    'OS_USERNAME', 'OS_PASSWORD']]
-
-        vars_any = [p in os.environ for p in [
-                    'OS_TENANT_NAME', 'OS_PROJECT_NAME']]
+        # collect commands output only if the openstack-nova-api service
+        # is running
+        service_status = self.get_command_output(
+            "systemctl status openstack-nova-api.service"
+        )
 
-        if not (all(vars_all) and any(vars_any)):
-            self.soslog.warning("Not all environment variables set. Source "
-                                "the environment file for the user intended "
-                                "to connect to the OpenStack environment.")
-        else:
-            self.add_cmd_output("nova service-list")
-            self.add_cmd_output("openstack flavor list --long")
-            self.add_cmd_output("nova network-list")
-            self.add_cmd_output("nova list")
-            self.add_cmd_output("nova agent-list")
-            self.add_cmd_output("nova version-list")
-            self.add_cmd_output("nova host-list")
-            self.add_cmd_output("openstack quota show")
-            self.add_cmd_output("openstack hypervisor stats show")
-            # get details for each nova instance
-            cmd = "openstack server list -f value"
-            nova_instances = self.call_ext_prog(cmd)['output']
-            for instance in nova_instances.splitlines():
-                instance = instance.split()[0]
-                cmd = "openstack server show %s" % (instance)
-                self.add_cmd_output(
-                    cmd,
-                    suggest_filename="instance-" + instance + ".log")
+        container_status = self.get_command_output("docker ps")
+        in_container = False
+        if container_status['status'] == 0:
+            for line in container_status['output'].splitlines():
+                if line.endswith("cinder_api"):
+                    in_container = True
+
+        if (service_status['status'] == 0) or in_container:
+            nova_config = ""
+            # if containerized we need to pass the config to the cont.
+            if in_container:
+                nova_config = "--config-dir " + self.var_puppet_gen + \
+                                "/etc/nova/"
+
+            self.add_cmd_output(
+                "nova-manage " + nova_config + " db version",
+                suggest_filename="nova-manage_db_version"
+            )
+            self.add_cmd_output(
+                "nova-manage " + nova_config + " fixed list",
+                suggest_filename="nova-manage_fixed_list"
+            )
+            self.add_cmd_output(
+                "nova-manage " + nova_config + " floating list",
+                suggest_filename="nova-manage_floating_list"
+            )
+
+            vars_all = [p in os.environ for p in [
+                        'OS_USERNAME', 'OS_PASSWORD']]
+
+            vars_any = [p in os.environ for p in [
+                        'OS_TENANT_NAME', 'OS_PROJECT_NAME']]
+
+            if not (all(vars_all) and any(vars_any)):
+                self.soslog.warning("Not all environment variables set. "
+                                    "Source the environment file for the user "
+                                    "intended to connect to the OpenStack "
+                                    "environment.")
+            else:
+                self.add_cmd_output("nova service-list")
+                self.add_cmd_output("openstack flavor list --long")
+                self.add_cmd_output("nova network-list")
+                self.add_cmd_output("nova list")
+                self.add_cmd_output("nova agent-list")
+                self.add_cmd_output("nova version-list")
+                self.add_cmd_output("nova hypervisor-list")
+                self.add_cmd_output("openstack quota show")
+                self.add_cmd_output("openstack hypervisor stats show")
+                # get details for each nova instance
+                cmd = "openstack server list -f value"
+                nova_instances = self.call_ext_prog(cmd)['output']
+                for instance in nova_instances.splitlines():
+                    instance = instance.split()[0]
+                    cmd = "openstack server show %s" % (instance)
+                    self.add_cmd_output(
+                        cmd,
+                        suggest_filename="instance-" + instance + ".log")
 
         self.limit = self.get_option("log_size")
         if self.get_option("all_logs"):
-- 
2.13.6

From 404951b99d5e2e46fe0757d27b984eb5ff94cf76 Mon Sep 17 00:00:00 2001
From: Pavel Moravec <pmoravec@redhat.com>
Date: Wed, 8 Nov 2017 19:30:57 +0100
Subject: [PATCH] [etcd] fix typo in etcdctl subcmds

subcmds variable set while "subcmd" referred instead.

Resolves: #1141

Signed-off-by: Pavel Moravec <pmoravec@redhat.com>
---
 sos/plugins/etcd.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/sos/plugins/etcd.py b/sos/plugins/etcd.py
index 4f072a4c..bd5d10d8 100644
--- a/sos/plugins/etcd.py
+++ b/sos/plugins/etcd.py
@@ -40,7 +40,7 @@ class etcd(Plugin, RedHatPlugin):
            'ls --recursive',
         ]
 
-        self.add_cmd_output(['%s %s' % (self.cmd, sub) for sub in subcmd])
+        self.add_cmd_output(['%s %s' % (self.cmd, sub) for sub in subcmds])
 
         urls = [
             '/v2/stats/leader',
-- 
2.13.6

From 2140b1611565078c4a6536782c013a525722e0da Mon Sep 17 00:00:00 2001
From: Martin Schuppert <mschuppert@redhat.com>
Date: Thu, 21 Dec 2017 08:00:41 +0100
Subject: [PATCH] [openstack_glance|heat|nova] fix api container names

Container names of glance, heat and nova api was not correct
when verify if a the container is running.

Signed-off-by: Martin Schuppert <mschuppe@redhat.com>
---
 sos/plugins/openstack_glance.py | 2 +-
 sos/plugins/openstack_heat.py   | 2 +-
 sos/plugins/openstack_nova.py   | 2 +-
 3 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/sos/plugins/openstack_glance.py b/sos/plugins/openstack_glance.py
index 4cdc6dc62..d7588abe0 100644
--- a/sos/plugins/openstack_glance.py
+++ b/sos/plugins/openstack_glance.py
@@ -64,7 +64,7 @@ def setup(self):
         in_container = False
         if container_status['status'] == 0:
             for line in container_status['output'].splitlines():
-                if line.endswith("cinder_api"):
+                if line.endswith("glance_api"):
                     in_container = True
 
         if (service_status['status'] == 0) or in_container:
diff --git a/sos/plugins/openstack_heat.py b/sos/plugins/openstack_heat.py
index e3395fabd..0cf7c8595 100644
--- a/sos/plugins/openstack_heat.py
+++ b/sos/plugins/openstack_heat.py
@@ -40,7 +40,7 @@ def setup(self):
         in_container = False
         if container_status['status'] == 0:
             for line in container_status['output'].splitlines():
-                if line.endswith("cinder_api"):
+                if line.endswith("heat_api"):
                     in_container = True
 
         if (service_status['status'] == 0) or in_container:
diff --git a/sos/plugins/openstack_nova.py b/sos/plugins/openstack_nova.py
index e8839a2a6..951e69cba 100644
--- a/sos/plugins/openstack_nova.py
+++ b/sos/plugins/openstack_nova.py
@@ -43,7 +43,7 @@ def setup(self):
         in_container = False
         if container_status['status'] == 0:
             for line in container_status['output'].splitlines():
-                if line.endswith("cinder_api"):
+                if line.endswith("nova_api"):
                     in_container = True
 
         if (service_status['status'] == 0) or in_container:
From 6f5295056cbea8220407fe42159b15ea1a135e46 Mon Sep 17 00:00:00 2001
From: Martin Schuppert <mschuppert@redhat.com>
Date: Wed, 10 Jan 2018 20:50:46 +0100
Subject: [PATCH] [plugins] add method to check process list for a named
 process

In openstack plugins we collect data depending if processes with
different names are there. This introduces a check_process_by_name
Plugin method to have a consistent way to do this from any plugin
where needed.

Signed-off-by: Martin Schuppert mschuppe@redhat.com
---
 sos/plugins/__init__.py | 16 ++++++++++++++++
 1 file changed, 16 insertions(+)

diff --git a/sos/plugins/__init__.py b/sos/plugins/__init__.py
index 2a8bc516e..3f3a19558 100644
--- a/sos/plugins/__init__.py
+++ b/sos/plugins/__init__.py
@@ -1026,6 +1026,22 @@ def report(self):
         else:
             return html
 
+    def check_process_by_name(self, process):
+        """Checks if a named process is found in /proc/[0-9]*/cmdline.
+        Returns either True or False."""
+        status = False
+        cmd_line_glob = "/proc/[0-9]*/cmdline"
+        try:
+            cmd_line_paths = glob.glob(cmd_line_glob)
+            for path in cmd_line_paths:
+                f = open(path, 'r')
+                cmd_line = f.read().strip()
+                if process in cmd_line:
+                    status = True
+        except IOError as e:
+            return False
+        return status
+
 
 class RedHatPlugin(object):
     """Tagging class for Red Hat's Linux distributions"""
From 12f1c4f851c771a0173f6e00657e1a983af8451c Mon Sep 17 00:00:00 2001
From: Martin Schuppert <mschuppert@redhat.com>
Date: Fri, 29 Dec 2017 09:20:33 +0100
Subject: [PATCH] [openstack_cinder] check for api service running via
 cinder_wsgi

With OSP11 cinder api changed to run via https wsgi. To check for
running cinder-manage command we also need to take this situation.
The change checks for cinder_wsgi process.

Signed-off-by: Martin Schuppert <mschuppert@redhat.com>
---
 sos/plugins/__init__.py         | 11 +++++++++++
 sos/plugins/openstack_cinder.py | 27 +++++++++++++++------------
 2 files changed, 26 insertions(+), 12 deletions(-)

diff --git a/sos/plugins/openstack_cinder.py b/sos/plugins/openstack_cinder.py
index a023105c8..cc9181efa 100644
--- a/sos/plugins/openstack_cinder.py
+++ b/sos/plugins/openstack_cinder.py
@@ -31,26 +31,29 @@ class OpenStackCinder(Plugin):
 
     def setup(self):
 
-        # collect commands output only if the openstack-cinder-api service
-        # is running
-        service_status = self.get_command_output(
-            "systemctl status openstack-cinder-api.service"
-        )
+        # check if either standalone (cinder-api) or httpd wsgi (cinder_wsgi)
+        # is up and running
+        cinder_process = ["cinder_wsgi", "cinder-api"]
+        in_ps = False
+        for process in cinder_process:
+            in_ps = self.check_process_by_name(process)
+            if in_ps:
+                break
 
         container_status = self.get_command_output("docker ps")
         in_container = False
+        cinder_config = ""
         if container_status['status'] == 0:
             for line in container_status['output'].splitlines():
                 if line.endswith("cinder_api"):
                     in_container = True
+                    # if containerized we need to pass the config to the cont.
+                    cinder_config = "--config-dir " + self.var_puppet_gen + \
+                                    "/etc/cinder/"
+                    break
 
-        if (service_status['status'] == 0) or in_container:
-            cinder_config = ""
-            # if containerized we need to pass the config to the cont.
-            if in_container:
-                cinder_config = "--config-dir " + self.var_puppet_gen + \
-                                "/etc/cinder/"
-
+        # collect commands output if the standalone, wsgi or container is up
+        if in_ps or in_container:
             self.add_cmd_output(
                 "cinder-manage " + cinder_config + " db version",
                 suggest_filename="cinder_db_version"