Blame SOURCES/lvm2-2_02_182-dmeventd-lvm2-plugin-uses-envvar-registry.patch

0d8a0a
From 41d8039e12ebad0727f8c7455ad9392bc61e6414 Mon Sep 17 00:00:00 2001
0d8a0a
From: Zdenek Kabelac <zkabelac@redhat.com>
0d8a0a
Date: Mon, 27 Aug 2018 10:18:26 +0200
0d8a0a
Subject: [PATCH 1/2] dmeventd: lvm2 plugin uses envvar registry
0d8a0a
0d8a0a
Thin plugin started to use configuble setting to allow to configure
0d8a0a
usage of external scripts - however to read this value it needed to
0d8a0a
execute internal command as dmeventd itself has no access to lvm.conf
0d8a0a
and the API for dmeventd plugin has been kept stable.
0d8a0a
0d8a0a
The call of command itself was not normally 'a big issue' until users
0d8a0a
started to use higher number of monitored LVs and execution of command
0d8a0a
got stuck because other monitored resource already started to execute
0d8a0a
some other lvm2 command and become blocked waiting on VG lock.
0d8a0a
0d8a0a
This scenario revealed necesity to somehow avoid calling lvm2 command
0d8a0a
during resource registration - but this requires bigger changes - so
0d8a0a
meanwhile this patch tries to minimize the possibility to hit this race
0d8a0a
by obtaining any configurable setting just once - such patch is small
0d8a0a
and covers majority of problem - yet better solution needs to be
0d8a0a
introduced likely with bigger rework of dmeventd.
0d8a0a
0d8a0a
TODO: Avoid blocking registration of resource with execution of lvm2
0d8a0a
commands since those can get stuck waiting on mutexes.
0d8a0a
0d8a0a
(cherry picked from commit a8d59404f7713ae4f9a3b172dd560ed1364d8bee)
0d8a0a
0d8a0a
Conflicts:
0d8a0a
	WHATS_NEW_DM
0d8a0a
---
0d8a0a
 WHATS_NEW_DM                                 |  4 +++
0d8a0a
 daemons/dmeventd/plugins/lvm2/dmeventd_lvm.c | 49 +++++++++++++++++++++-------
0d8a0a
 2 files changed, 42 insertions(+), 11 deletions(-)
0d8a0a
0d8a0a
diff --git a/WHATS_NEW_DM b/WHATS_NEW_DM
0d8a0a
index c42ee17..42cf2a8 100644
0d8a0a
--- a/WHATS_NEW_DM
0d8a0a
+++ b/WHATS_NEW_DM
0d8a0a
@@ -1,3 +1,7 @@
0d8a0a
+Version 1.02.151 - 
0d8a0a
+==============================
0d8a0a
+  Add hot fix to avoiding locking collision when monitoring thin-pools.
0d8a0a
+
0d8a0a
 Version 1.02.150 - 
0d8a0a
 =================================
0d8a0a
   Add vdo plugin for monitoring VDO devices.
0d8a0a
diff --git a/daemons/dmeventd/plugins/lvm2/dmeventd_lvm.c b/daemons/dmeventd/plugins/lvm2/dmeventd_lvm.c
0d8a0a
index 930f9fc..5be11f1 100644
0d8a0a
--- a/daemons/dmeventd/plugins/lvm2/dmeventd_lvm.c
0d8a0a
+++ b/daemons/dmeventd/plugins/lvm2/dmeventd_lvm.c
0d8a0a
@@ -31,6 +31,13 @@ static pthread_mutex_t _register_mutex = PTHREAD_MUTEX_INITIALIZER;
0d8a0a
 static int _register_count = 0;
0d8a0a
 static struct dm_pool *_mem_pool = NULL;
0d8a0a
 static void *_lvm_handle = NULL;
0d8a0a
+static DM_LIST_INIT(_env_registry);
0d8a0a
+
0d8a0a
+struct env_data {
0d8a0a
+	struct dm_list list;
0d8a0a
+	const char *cmd;
0d8a0a
+	const char *data;
0d8a0a
+};
0d8a0a
 
0d8a0a
 DM_EVENT_LOG_FN("#lvm")
0d8a0a
 
0d8a0a
@@ -100,6 +107,7 @@ void dmeventd_lvm2_exit(void)
0d8a0a
 		lvm2_run(_lvm_handle, "_memlock_dec");
0d8a0a
 		dm_pool_destroy(_mem_pool);
0d8a0a
 		_mem_pool = NULL;
0d8a0a
+		dm_list_init(&_env_registry);
0d8a0a
 		lvm2_exit(_lvm_handle);
0d8a0a
 		_lvm_handle = NULL;
0d8a0a
 		log_debug("lvm plugin exited.");
0d8a0a
@@ -124,6 +132,8 @@ int dmeventd_lvm2_command(struct dm_pool *mem, char *buffer, size_t size,
0d8a0a
 	static char _internal_prefix[] =  "_dmeventd_";
0d8a0a
 	char *vg = NULL, *lv = NULL, *layer;
0d8a0a
 	int r;
0d8a0a
+	struct env_data *env_data;
0d8a0a
+	const char *env = NULL;
0d8a0a
 
0d8a0a
 	if (!dm_split_lvm_name(mem, device, &vg, &lv, &layer)) {
0d8a0a
 		log_error("Unable to determine VG name from %s.",
0d8a0a
@@ -137,18 +147,35 @@ int dmeventd_lvm2_command(struct dm_pool *mem, char *buffer, size_t size,
0d8a0a
 		*layer = '\0';
0d8a0a
 
0d8a0a
 	if (!strncmp(cmd, _internal_prefix, sizeof(_internal_prefix) - 1)) {
0d8a0a
-		dmeventd_lvm2_lock();
0d8a0a
-		/* output of internal command passed via env var */
0d8a0a
-		if (!dmeventd_lvm2_run(cmd))
0d8a0a
-			cmd = NULL;
0d8a0a
-		else if ((cmd = getenv(cmd)))
0d8a0a
-			cmd = dm_pool_strdup(mem, cmd); /* copy with lock */
0d8a0a
-		dmeventd_lvm2_unlock();
0d8a0a
-
0d8a0a
-		if (!cmd) {
0d8a0a
-			log_error("Unable to find configured command.");
0d8a0a
-			return 0;
0d8a0a
+		/* check if ENVVAR wasn't already resolved */
0d8a0a
+		dm_list_iterate_items(env_data, &_env_registry)
0d8a0a
+			if (!strcmp(cmd, env_data->cmd)) {
0d8a0a
+				env = env_data->data;
0d8a0a
+				break;
0d8a0a
+			}
0d8a0a
+
0d8a0a
+		if (!env) {
0d8a0a
+			/* run lvm2 command to find out setting value */
0d8a0a
+			dmeventd_lvm2_lock();
0d8a0a
+			if (!dmeventd_lvm2_run(cmd) ||
0d8a0a
+			    !(env = getenv(cmd))) {
0d8a0a
+				log_error("Unable to find configured command.");
0d8a0a
+				return 0;
0d8a0a
+			}
0d8a0a
+			/* output of internal command passed via env var */
0d8a0a
+			env = dm_pool_strdup(_mem_pool, env); /* copy with lock */
0d8a0a
+			dmeventd_lvm2_unlock();
0d8a0a
+			if (!env ||
0d8a0a
+			    !(env_data = dm_pool_zalloc(_mem_pool, sizeof(*env_data))) ||
0d8a0a
+			    !(env_data->cmd = dm_pool_strdup(_mem_pool, cmd))) {
0d8a0a
+				log_error("Unable to allocate env memory.");
0d8a0a
+				return 0;
0d8a0a
+			}
0d8a0a
+			env_data->data = env;
0d8a0a
+			/* add to ENVVAR registry */
0d8a0a
+			dm_list_add(&_env_registry, &env_data->list);
0d8a0a
 		}
0d8a0a
+		cmd = env;
0d8a0a
 	}
0d8a0a
 
0d8a0a
 	r = dm_snprintf(buffer, size, "%s %s/%s", cmd, vg, lv);
0d8a0a
-- 
0d8a0a
1.8.3.1
0d8a0a