Blob Blame History Raw
From 4ee9a7026d7ed15b0b5cd26f06a21d04fc05d14e Mon Sep 17 00:00:00 2001
From: Roger Zhou <zzhou@suse.com>
Date: Mon, 1 Apr 2019 22:57:26 +0800
Subject: [PATCH 1/2] LVM-activate: return OCF_NOT_RUNNING on initial probe

In the use case of lvm on top of cluster md/raid. When the fenced node
rejoins to the cluster, Pacemaker will run the monitor action for the
probe operation. At that time, LVM PV and VG won't exist before cluster
md/raid get assembled, and the probe should return $OCF_NOT_RUNNING
instead of $OCF_ERR_CONFIGURED.

Signed-off-by: Roger Zhou <zzhou@suse.com>
---
 heartbeat/LVM-activate | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate
index 3c462c75c..91ac05c34 100755
--- a/heartbeat/LVM-activate
+++ b/heartbeat/LVM-activate
@@ -329,6 +329,7 @@ lvmlockd_check()
 	# Good: lvmlockd is running, and clvmd is not running
 	if ! pgrep lvmlockd >/dev/null 2>&1 ; then
 		if ocf_is_probe; then
+			ocf_log info "initial probe: lvmlockd is not running yet."
 			exit $OCF_NOT_RUNNING
 		fi
 
@@ -481,6 +482,11 @@ lvm_validate() {
 			exit $OCF_SUCCESS
 		fi
 
+		if ocf_is_probe; then
+			ocf_log info "initial probe: VG [${VG}] is not found on any block device yet."
+			exit $OCF_NOT_RUNNING
+		fi
+
 		ocf_exit_reason "Volume group[${VG}] doesn't exist, or not visible on this node!"
 		exit $OCF_ERR_CONFIGURED
 	fi

From df2f58c400b1f6f239f9e1c1fdf6ce0875639b43 Mon Sep 17 00:00:00 2001
From: Roger Zhou <zzhou@suse.com>
Date: Mon, 1 Apr 2019 23:02:54 +0800
Subject: [PATCH 2/2] LVM-activate: align dmsetup report command to standard

Namely to change 'vgname/lvname' to 'vg_name/lv_name'.  The dmsetup
report command follows lvm2 selection criteria field name standard.
- dmsetup v1.02.86 (lvm2 v2_02_107) - 23rd June 2014
  "Add dmsetup -S/--select to define selection criteria"
- dmsetup info -c -S help

Signed-off-by: Roger Zhou <zzhou@suse.com>
---
 heartbeat/LVM-activate | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/heartbeat/LVM-activate b/heartbeat/LVM-activate
index 91ac05c34..730d9a09d 100755
--- a/heartbeat/LVM-activate
+++ b/heartbeat/LVM-activate
@@ -707,7 +707,7 @@ tagging_deactivate() {
 # method:
 #
 # lv_count=$(vgs --foreign -o lv_count --noheadings ${VG} 2>/dev/null | tr -d '[:blank:]')
-# dm_count=$(dmsetup --noheadings info -c -S "vgname=${VG}" 2>/dev/null | grep -c "${VG}-")
+# dm_count=$(dmsetup --noheadings info -c -S "vg_name=${VG}" 2>/dev/null | grep -c "${VG}-")
 # test $lv_count -eq $dm_count
 #
 # It works, but we cannot afford to use LVM command in lvm_status. LVM command is expensive
@@ -730,9 +730,9 @@ lvm_status() {
 	if [ -n "${LV}" ]; then
 		# dmsetup ls? It cannot accept device name. It's
 		# too heavy to list all DM devices.
-		dm_count=$(dmsetup info --noheadings --noflush -c -S "vgname=${VG} && lvname=${LV}" | grep -c -v '^No devices found')
+		dm_count=$(dmsetup info --noheadings --noflush -c -S "vg_name=${VG} && lv_name=${LV}" | grep -c -v '^No devices found')
 	else
-		dm_count=$(dmsetup info --noheadings --noflush -c -S "vgname=${VG}" | grep -c -v '^No devices found')
+		dm_count=$(dmsetup info --noheadings --noflush -c -S "vg_name=${VG}" | grep -c -v '^No devices found')
 	fi
 
 	if [ $dm_count -eq 0 ]; then