Blame SOURCES/0017-lvmdbusd-Don-t-require-lvm-prompt-for-shell.patch

d0283b
From a3c2dcc3726261d6463ea35102d86863d698021b Mon Sep 17 00:00:00 2001
d0283b
From: Tony Asleson <tasleson@redhat.com>
d0283b
Date: Mon, 6 Jun 2022 09:56:32 -0500
d0283b
Subject: [PATCH 9/9] lvmdbusd: Don't require "lvm> " prompt for shell
d0283b
d0283b
Depending on how lvm is compiled, it may not present the "lvm> " prompt
d0283b
when using the lvm shell.  Don't require it to be present.
d0283b
d0283b
Addresses: https://bugzilla.redhat.com/show_bug.cgi?id=2090391
d0283b
(cherry picked from commit 691494268502ddb20da2a14568984c0fa4f29f50)
d0283b
---
d0283b
 daemons/lvmdbusd/lvm_shell_proxy.py.in | 83 +++++++++++++-------------
d0283b
 1 file changed, 43 insertions(+), 40 deletions(-)
d0283b
d0283b
diff --git a/daemons/lvmdbusd/lvm_shell_proxy.py.in b/daemons/lvmdbusd/lvm_shell_proxy.py.in
d0283b
index 1a5051a92..e106ca36f 100644
d0283b
--- a/daemons/lvmdbusd/lvm_shell_proxy.py.in
d0283b
+++ b/daemons/lvmdbusd/lvm_shell_proxy.py.in
d0283b
@@ -19,7 +19,6 @@ import sys
d0283b
 import tempfile
d0283b
 import time
d0283b
 import select
d0283b
-import copy
d0283b
 
d0283b
 try:
d0283b
 	import simplejson as json
d0283b
@@ -31,8 +30,6 @@ from lvmdbusd.cfg import LVM_CMD
d0283b
 from lvmdbusd.utils import log_debug, log_error, add_no_notify, make_non_block,\
d0283b
 							read_decoded
d0283b
 
d0283b
-SHELL_PROMPT = "lvm> "
d0283b
-
d0283b
 
d0283b
 def _quote_arg(arg):
d0283b
 	if len(shlex.split(arg)) > 1:
d0283b
@@ -43,10 +40,11 @@ def _quote_arg(arg):
d0283b
 
d0283b
 class LVMShellProxy(object):
d0283b
 
d0283b
-	# Read until we get prompt back and a result
d0283b
-	# @param: no_output	Caller expects no output to report FD
d0283b
-	# Returns stdout, report, stderr (report is JSON!)
d0283b
-	def _read_until_prompt(self, no_output=False):
d0283b
+	# Read REPORT FD until we have a complete and valid JSON record or give
d0283b
+	# up trying to get one.
d0283b
+	#
d0283b
+	# Returns stdout, report (JSON), stderr
d0283b
+	def _read_response(self):
d0283b
 		stdout = ""
d0283b
 		report = ""
d0283b
 		stderr = ""
d0283b
@@ -58,6 +56,7 @@ class LVMShellProxy(object):
d0283b
 		# Try reading from all FDs to prevent one from filling up and causing
d0283b
 		# a hang.  Keep reading until we get the prompt back and the report
d0283b
 		# FD does not contain valid JSON
d0283b
+
d0283b
 		while keep_reading:
d0283b
 			try:
d0283b
 				rd_fd = [
d0283b
@@ -78,32 +77,33 @@ class LVMShellProxy(object):
d0283b
 				if self.lvm_shell.poll() is not None:
d0283b
 					raise Exception(self.lvm_shell.returncode, "%s" % stderr)
d0283b
 
d0283b
-				if stdout.endswith(SHELL_PROMPT):
d0283b
-					if no_output:
d0283b
-						keep_reading = False
d0283b
-					else:
d0283b
-						cur_report_len = len(report)
d0283b
-						if cur_report_len != 0:
d0283b
-							# Only bother to parse if we have more data
d0283b
-							if prev_report_len != cur_report_len:
d0283b
-								prev_report_len = cur_report_len
d0283b
-								# Parse the JSON if it's good we are done,
d0283b
-								# if not we will try to read some more.
d0283b
-								try:
d0283b
-									report_json = json.loads(report)
d0283b
-									keep_reading = False
d0283b
-								except ValueError:
d0283b
-									pass
d0283b
-
d0283b
-						if keep_reading:
d0283b
-							extra_passes -= 1
d0283b
-							if extra_passes <= 0:
d0283b
-								if len(report):
d0283b
-									raise ValueError("Invalid json: %s" %
d0283b
-														report)
d0283b
-								else:
d0283b
-									raise ValueError(
d0283b
-										"lvm returned no JSON output!")
d0283b
+				cur_report_len = len(report)
d0283b
+				if cur_report_len != 0:
d0283b
+					# Only bother to parse if we have more data and the last 2 characters match expected
d0283b
+					# complete JSON, prevents excessive JSON parsing attempts
d0283b
+					if prev_report_len != cur_report_len and report[-2:] == "}\n":
d0283b
+						prev_report_len = cur_report_len
d0283b
+
d0283b
+						# Parse the JSON if it's good we are done,
d0283b
+						# if not we will try to read some more.
d0283b
+						try:
d0283b
+							report_json = json.loads(report)
d0283b
+							keep_reading = False
d0283b
+						except ValueError:
d0283b
+							pass
d0283b
+
d0283b
+				# As long as lvm is spewing something on one of the FDs we will
d0283b
+				# keep trying.  If we get a few timeouts with no activity, and
d0283b
+				# we don't have valid JSON, we will raise an error.
d0283b
+				if len(ready) == 0 and keep_reading:
d0283b
+					extra_passes -= 1
d0283b
+					if extra_passes <= 0:
d0283b
+						if len(report):
d0283b
+							raise ValueError("Invalid json: %s" %
d0283b
+												report)
d0283b
+						else:
d0283b
+							raise ValueError(
d0283b
+								"lvm returned no JSON output!")
d0283b
 
d0283b
 			except IOError as ioe:
d0283b
 				log_debug(str(ioe))
d0283b
@@ -118,7 +118,6 @@ class LVMShellProxy(object):
d0283b
 		self.lvm_shell.stdin.flush()
d0283b
 
d0283b
 	def __init__(self):
d0283b
-
d0283b
 		# Create a temp directory
d0283b
 		tmp_dir = tempfile.mkdtemp(prefix="lvmdbus_")
d0283b
 		tmp_file = "%s/lvmdbus_report" % (tmp_dir)
d0283b
@@ -139,6 +138,11 @@ class LVMShellProxy(object):
d0283b
 		local_env = {"LC_ALL": "C", "LVM_REPORT_FD": "%s" % lvm_fd, "LVM_COMMAND_PROFILE": "lvmdbusd",
d0283b
 					 "LVM_LOG_FILE_MAX_LINES": "0"}
d0283b
 
d0283b
+		# If any env variables contain LVM we will propagate them too
d0283b
+		for k, v in os.environ.items():
d0283b
+			if "LVM" in k:
d0283b
+				local_env[k] = v
d0283b
+
d0283b
 		# run the lvm shell
d0283b
 		self.lvm_shell = subprocess.Popen(
d0283b
 			[LVM_CMD],
d0283b
@@ -152,10 +156,9 @@ class LVMShellProxy(object):
d0283b
 			# Close our copy of the lvm_fd, child process is open in its process space
d0283b
 			os.close(lvm_fd)
d0283b
 
d0283b
-			# wait for the first prompt
d0283b
-			errors = self._read_until_prompt(no_output=True)[2]
d0283b
-			if errors and len(errors):
d0283b
-				raise RuntimeError(errors)
d0283b
+			# Assume we are ready as we may not get the lvm prompt message depending on
d0283b
+			# if we are using readline or editline.
d0283b
+
d0283b
 		except:
d0283b
 			raise
d0283b
 		finally:
d0283b
@@ -169,7 +172,7 @@ class LVMShellProxy(object):
d0283b
 		self._write_cmd('lastlog\n')
d0283b
 
d0283b
 		# read everything from the STDOUT to the next prompt
d0283b
-		stdout, report_json, stderr = self._read_until_prompt()
d0283b
+		stdout, report_json, stderr = self._read_response()
d0283b
 		if 'log' in report_json:
d0283b
 			error_msg = ""
d0283b
 			# Walk the entire log array and build an error string
d0283b
@@ -203,7 +206,7 @@ class LVMShellProxy(object):
d0283b
 		self._write_cmd(cmd)
d0283b
 
d0283b
 		# read everything from the STDOUT to the next prompt
d0283b
-		stdout, report_json, stderr = self._read_until_prompt()
d0283b
+		stdout, report_json, stderr = self._read_response()
d0283b
 
d0283b
 		# Parse the report to see what happened
d0283b
 		if 'log' in report_json:
d0283b
-- 
d0283b
2.37.1
d0283b