diff --git a/.gitignore b/.gitignore index 4d3a5d4..d3a23d4 100644 --- a/.gitignore +++ b/.gitignore @@ -9,14 +9,15 @@ SOURCES/ffi-1.13.1.gem SOURCES/json-2.3.0.gem SOURCES/mustermann-1.1.1.gem SOURCES/open4-1.3.4-1.gem -SOURCES/pcs-0.10.8.tar.gz -SOURCES/pcs-web-ui-0.1.5.tar.gz -SOURCES/pcs-web-ui-node-modules-0.1.5.tar.xz +SOURCES/pcs-0.10.10.tar.gz +SOURCES/pcs-web-ui-0.1.7.tar.gz +SOURCES/pcs-web-ui-node-modules-0.1.7.tar.xz SOURCES/pyagentx-0.4.pcs.2.tar.gz SOURCES/python-dateutil-2.8.1.tar.gz SOURCES/rack-2.2.3.gem SOURCES/rack-protection-2.0.8.1.gem SOURCES/rack-test-1.1.0.gem +SOURCES/rexml-3.2.5.gem SOURCES/ruby2_keywords-0.0.2.gem SOURCES/sinatra-2.0.8.1.gem SOURCES/thin-1.7.2.gem diff --git a/.pcs.metadata b/.pcs.metadata index 1b31355..6167c6c 100644 --- a/.pcs.metadata +++ b/.pcs.metadata @@ -9,14 +9,15 @@ cfa25e7a3760c3ec16723cb8263d9b7a52d0eadf SOURCES/ffi-1.13.1.gem 0230e8c5a37f1543982e5b04be503dd5f9004b47 SOURCES/json-2.3.0.gem 50a4e37904485810cb05e27d75c9783e5a8f3402 SOURCES/mustermann-1.1.1.gem 41a7fe9f8e3e02da5ae76c821b89c5b376a97746 SOURCES/open4-1.3.4-1.gem -0e6b705715023ec5224ca05e977b8888f2a1b1e6 SOURCES/pcs-0.10.8.tar.gz -f23b14786b1911d498612bf0e90f344bcc4915c3 SOURCES/pcs-web-ui-0.1.5.tar.gz -57beab1c4bed96d7f9fc35261e96f78babb06980 SOURCES/pcs-web-ui-node-modules-0.1.5.tar.xz +a1c0585455b7e050c33598598a045ccd2776cb28 SOURCES/pcs-0.10.10.tar.gz +b9ed12ca957c2f204ec37ea2836b924c36fab379 SOURCES/pcs-web-ui-0.1.7.tar.gz +8824285e6f1c2807d9222d573c6e6df1e50d8410 SOURCES/pcs-web-ui-node-modules-0.1.7.tar.xz 3176b2f2b332c2b6bf79fe882e83feecf3d3f011 SOURCES/pyagentx-0.4.pcs.2.tar.gz bd26127e57f83a10f656b62c46524c15aeb844dd SOURCES/python-dateutil-2.8.1.tar.gz 345b7169d4d2d62176a225510399963bad62b68f SOURCES/rack-2.2.3.gem 1f046e23baca8beece3b38c60382f44aa2b2cb41 SOURCES/rack-protection-2.0.8.1.gem b80bc5ca38a885e747271675ba91dd3d02136bf1 SOURCES/rack-test-1.1.0.gem +e7f48fa5fb2d92e6cb21d6b1638fe41a5a7c4287 SOURCES/rexml-3.2.5.gem 0be571aacb5d6a212a30af3f322a7000d8af1ef9 SOURCES/ruby2_keywords-0.0.2.gem 04cca7a5d9d641fe076e4e24dc5b6ff31922f4c3 SOURCES/sinatra-2.0.8.1.gem 41395e86322ffd31f3a7aef1f697bda3e1e2d6b9 SOURCES/thin-1.7.2.gem diff --git a/SOURCES/add-missing-file-test_stonith_update_scsi_devices.py.patch b/SOURCES/add-missing-file-test_stonith_update_scsi_devices.py.patch new file mode 100644 index 0000000..796544d --- /dev/null +++ b/SOURCES/add-missing-file-test_stonith_update_scsi_devices.py.patch @@ -0,0 +1,1172 @@ +From e3f9823283517bafa8d309fb6148539e0e8ecdb2 Mon Sep 17 00:00:00 2001 +From: Miroslav Lisik +Date: Fri, 10 Sep 2021 11:40:03 +0200 +Subject: [PATCH] add missing file test_stonith_update_scsi_devices.py + +--- + .../test_stonith_update_scsi_devices.py | 1153 +++++++++++++++++ + 1 file changed, 1153 insertions(+) + create mode 100644 pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py + +diff --git a/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py b/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py +new file mode 100644 +index 0000000..3bc5132 +--- /dev/null ++++ b/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py +@@ -0,0 +1,1153 @@ ++import json ++from unittest import mock, TestCase ++ ++ ++from pcs_test.tools import fixture ++from pcs_test.tools.command_env import get_env_tools ++from pcs_test.tools.misc import get_test_resource as rc ++ ++from pcs import settings ++from pcs.lib.commands import stonith ++from pcs.common import ( ++ communication, ++ reports, ++) ++from pcs.common.interface import dto ++from pcs.common.tools import timeout_to_seconds ++ ++from .cluster.common import ( ++ corosync_conf_fixture, ++ get_two_node, ++ node_fixture, ++) ++ ++SCSI_STONITH_ID = "scsi-fence-device" ++SCSI_NODE = "node1" ++_DIGEST = "0" * 31 ++DEFAULT_DIGEST = _DIGEST + "0" ++ALL_DIGEST = _DIGEST + "1" ++NONPRIVATE_DIGEST = _DIGEST + "2" ++NONRELOADABLE_DIGEST = _DIGEST + "3" ++DEVICES_1 = ("/dev/sda",) ++DEVICES_2 = ("/dev/sda", "/dev/sdb") ++DEVICES_3 = ("/dev/sda", "/dev/sdb", "/dev/sdc") ++ ++DEFAULT_MONITOR = ("monitor", "60s", None, None) ++DEFAULT_OPS = (DEFAULT_MONITOR,) ++DEFAULT_LRM_START_OPS = (("0", DEFAULT_DIGEST, None, None),) ++DEFAULT_LRM_MONITOR_OPS = (("60000", DEFAULT_DIGEST, None, None),) ++DEFAULT_LRM_START_OPS_UPDATED = (("0", ALL_DIGEST, None, None),) ++DEFAULT_LRM_MONITOR_OPS_UPDATED = (("60000", ALL_DIGEST, None, None),) ++ ++ ++def _fixture_ops(resource_id, ops): ++ return "\n".join( ++ [ ++ ( ++ '' ++ ).format( ++ resource_id=resource_id, ++ name=name, ++ _interval=_interval if _interval else interval, ++ interval=interval, ++ timeout=f'timeout="{timeout}"' if timeout else "", ++ ) ++ for name, interval, timeout, _interval in ops ++ ] ++ ) ++ ++ ++def _fixture_devices_nvpair(resource_id, devices): ++ if devices is None: ++ return "" ++ return ( ++ '' ++ ).format(resource_id=resource_id, devices=",".join(sorted(devices))) ++ ++ ++def fixture_scsi( ++ stonith_id=SCSI_STONITH_ID, devices=DEVICES_1, resource_ops=DEFAULT_OPS ++): ++ return """ ++ ++ ++ ++ {devices} ++ ++ ++ ++ ++ ++ ++ ++ ++ {operations} ++ ++ ++ ++ ++ """.format( ++ stonith_id=stonith_id, ++ devices=_fixture_devices_nvpair(stonith_id, devices), ++ operations=_fixture_ops(stonith_id, resource_ops), ++ ) ++ ++ ++def _fixture_lrm_rsc_ops(op_type, resource_id, lrm_ops): ++ return [ ++ ( ++ '' ++ ).format( ++ op_type_id="last" if op_type == "start" else op_type, ++ op_type=op_type, ++ resource_id=resource_id, ++ ms=ms, ++ _all=f'op-digest="{_all}"' if _all else "", ++ secure=f'op-secure-digest="{secure}"' if secure else "", ++ restart=f'op-restart-digest="{restart}"' if restart else "", ++ ) ++ for ms, _all, secure, restart in lrm_ops ++ ] ++ ++ ++def _fixture_lrm_rsc_monitor_ops(resource_id, lrm_monitor_ops): ++ return _fixture_lrm_rsc_ops("monitor", resource_id, lrm_monitor_ops) ++ ++ ++def _fixture_lrm_rsc_start_ops(resource_id, lrm_start_ops): ++ return _fixture_lrm_rsc_ops("start", resource_id, lrm_start_ops) ++ ++ ++def _fixture_status_lrm_ops_base( ++ resource_id, ++ lrm_ops, ++): ++ return f""" ++ ++ ++ ++ ++ ++ {lrm_ops} ++ ++ ++ ++ ++ ++ """ ++ ++ ++def _fixture_status_lrm_ops( ++ resource_id, ++ lrm_start_ops=DEFAULT_LRM_START_OPS, ++ lrm_monitor_ops=DEFAULT_LRM_MONITOR_OPS, ++): ++ return _fixture_status_lrm_ops_base( ++ resource_id, ++ "\n".join( ++ _fixture_lrm_rsc_start_ops(resource_id, lrm_start_ops) ++ + _fixture_lrm_rsc_monitor_ops(resource_id, lrm_monitor_ops) ++ ), ++ ) ++ ++ ++def fixture_digests_xml(resource_id, node_name, devices=""): ++ return f""" ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ """ ++ ++ ++FIXTURE_CRM_MON_RES_RUNNING_1 = f""" ++ ++ ++ ++""" ++ ++FIXTURE_CRM_MON_RES_RUNNING_2 = f""" ++ ++ ++ ++ ++ ++ ++""" ++FIXTURE_CRM_MON_NODES = """ ++ ++ ++ ++ ++ ++""" ++ ++FIXTURE_CRM_MON_RES_STOPPED = f""" ++ ++""" ++ ++ ++@mock.patch.object( ++ settings, ++ "pacemaker_api_result_schema", ++ rc("pcmk_api_rng/api-result.rng"), ++) ++class UpdateScsiDevices(TestCase): ++ def setUp(self): ++ self.env_assist, self.config = get_env_tools(self) ++ ++ self.existing_nodes = ["node1", "node2", "node3"] ++ self.existing_corosync_nodes = [ ++ node_fixture(node, node_id) ++ for node_id, node in enumerate(self.existing_nodes, 1) ++ ] ++ self.config.env.set_known_nodes(self.existing_nodes) ++ ++ def assert_command_success( ++ self, ++ devices_before=DEVICES_1, ++ devices_updated=DEVICES_2, ++ resource_ops=DEFAULT_OPS, ++ lrm_monitor_ops=DEFAULT_LRM_MONITOR_OPS, ++ lrm_start_ops=DEFAULT_LRM_START_OPS, ++ lrm_monitor_ops_updated=DEFAULT_LRM_MONITOR_OPS_UPDATED, ++ lrm_start_ops_updated=DEFAULT_LRM_START_OPS_UPDATED, ++ ): ++ # pylint: disable=too-many-locals ++ self.config.runner.pcmk.is_resource_digests_supported() ++ self.config.runner.cib.load( ++ resources=fixture_scsi( ++ devices=devices_before, resource_ops=resource_ops ++ ), ++ status=_fixture_status_lrm_ops( ++ SCSI_STONITH_ID, ++ lrm_start_ops=lrm_start_ops, ++ lrm_monitor_ops=lrm_monitor_ops, ++ ), ++ ) ++ self.config.runner.pcmk.load_state( ++ resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES ++ ) ++ devices_opt = "devices={}".format(",".join(devices_updated)) ++ self.config.runner.pcmk.resource_digests( ++ SCSI_STONITH_ID, ++ SCSI_NODE, ++ name="start.op.digests", ++ stdout=fixture_digests_xml( ++ SCSI_STONITH_ID, SCSI_NODE, devices=",".join(devices_updated) ++ ), ++ args=[devices_opt], ++ ) ++ ++ for num, op in enumerate(resource_ops, 1): ++ name, interval, timeout, _ = op ++ if name != "monitor": ++ continue ++ args = [devices_opt] ++ args.append( ++ "CRM_meta_interval={}".format( ++ 1000 * timeout_to_seconds(interval) ++ ) ++ ) ++ if timeout: ++ args.append( ++ "CRM_meta_timeout={}".format( ++ 1000 * timeout_to_seconds(timeout) ++ ) ++ ) ++ self.config.runner.pcmk.resource_digests( ++ SCSI_STONITH_ID, ++ SCSI_NODE, ++ name=f"{name}-{num}.op.digests", ++ stdout=fixture_digests_xml( ++ SCSI_STONITH_ID, ++ SCSI_NODE, ++ devices=",".join(devices_updated), ++ ), ++ args=args, ++ ) ++ self.config.corosync_conf.load_content( ++ corosync_conf_fixture( ++ self.existing_corosync_nodes, ++ get_two_node(len(self.existing_corosync_nodes)), ++ ) ++ ) ++ self.config.http.corosync.get_corosync_online_targets( ++ node_labels=self.existing_nodes ++ ) ++ self.config.http.scsi.unfence_node( ++ devices_updated, node_labels=self.existing_nodes ++ ) ++ self.config.env.push_cib( ++ resources=fixture_scsi( ++ devices=devices_updated, resource_ops=resource_ops ++ ), ++ status=_fixture_status_lrm_ops( ++ SCSI_STONITH_ID, ++ lrm_start_ops=lrm_start_ops_updated, ++ lrm_monitor_ops=lrm_monitor_ops_updated, ++ ), ++ ) ++ stonith.update_scsi_devices( ++ self.env_assist.get_env(), SCSI_STONITH_ID, devices_updated ++ ) ++ self.env_assist.assert_reports([]) ++ ++ def test_update_1_to_1_devices(self): ++ self.assert_command_success( ++ devices_before=DEVICES_1, devices_updated=DEVICES_1 ++ ) ++ ++ def test_update_2_to_2_devices(self): ++ self.assert_command_success( ++ devices_before=DEVICES_1, devices_updated=DEVICES_1 ++ ) ++ ++ def test_update_1_to_2_devices(self): ++ self.assert_command_success() ++ ++ def test_update_1_to_3_devices(self): ++ self.assert_command_success( ++ devices_before=DEVICES_1, devices_updated=DEVICES_3 ++ ) ++ ++ def test_update_3_to_1_devices(self): ++ self.assert_command_success( ++ devices_before=DEVICES_3, devices_updated=DEVICES_1 ++ ) ++ ++ def test_update_3_to_2_devices(self): ++ self.assert_command_success( ++ devices_before=DEVICES_3, devices_updated=DEVICES_2 ++ ) ++ ++ def test_default_monitor(self): ++ self.assert_command_success() ++ ++ def test_no_monitor_ops(self): ++ self.assert_command_success( ++ resource_ops=(), lrm_monitor_ops=(), lrm_monitor_ops_updated=() ++ ) ++ ++ def test_1_monitor_with_timeout(self): ++ self.assert_command_success( ++ resource_ops=(("monitor", "30s", "10s", None),), ++ lrm_monitor_ops=(("30000", DEFAULT_DIGEST, None, None),), ++ lrm_monitor_ops_updated=(("30000", ALL_DIGEST, None, None),), ++ ) ++ ++ def test_2_monitor_ops_with_timeouts(self): ++ self.assert_command_success( ++ resource_ops=( ++ ("monitor", "30s", "10s", None), ++ ("monitor", "40s", "20s", None), ++ ), ++ lrm_monitor_ops=( ++ ("30000", DEFAULT_DIGEST, None, None), ++ ("40000", DEFAULT_DIGEST, None, None), ++ ), ++ lrm_monitor_ops_updated=( ++ ("30000", ALL_DIGEST, None, None), ++ ("40000", ALL_DIGEST, None, None), ++ ), ++ ) ++ ++ def test_2_monitor_ops_with_one_timeout(self): ++ self.assert_command_success( ++ resource_ops=( ++ ("monitor", "30s", "10s", None), ++ ("monitor", "60s", None, None), ++ ), ++ lrm_monitor_ops=( ++ ("30000", DEFAULT_DIGEST, None, None), ++ ("60000", DEFAULT_DIGEST, None, None), ++ ), ++ lrm_monitor_ops_updated=( ++ ("30000", ALL_DIGEST, None, None), ++ ("60000", ALL_DIGEST, None, None), ++ ), ++ ) ++ ++ def test_various_start_ops_one_lrm_start_op(self): ++ self.assert_command_success( ++ resource_ops=( ++ ("monitor", "60s", None, None), ++ ("start", "0s", "40s", None), ++ ("start", "0s", "30s", "1"), ++ ("start", "10s", "5s", None), ++ ("start", "20s", None, None), ++ ), ++ ) ++ ++ def test_1_nonrecurring_start_op_with_timeout(self): ++ self.assert_command_success( ++ resource_ops=( ++ ("monitor", "60s", None, None), ++ ("start", "0s", "40s", None), ++ ), ++ ) ++ ++ ++@mock.patch.object( ++ settings, ++ "pacemaker_api_result_schema", ++ rc("pcmk_api_rng/api-result.rng"), ++) ++class TestUpdateScsiDevicesFailures(TestCase): ++ # pylint: disable=too-many-public-methods ++ def setUp(self): ++ self.env_assist, self.config = get_env_tools(self) ++ ++ self.existing_nodes = ["node1", "node2", "node3"] ++ self.existing_corosync_nodes = [ ++ node_fixture(node, node_id) ++ for node_id, node in enumerate(self.existing_nodes, 1) ++ ] ++ self.config.env.set_known_nodes(self.existing_nodes) ++ ++ def test_pcmk_doesnt_support_digests(self): ++ self.config.runner.pcmk.is_resource_digests_supported( ++ is_supported=False ++ ) ++ self.env_assist.assert_raise_library_error( ++ lambda: stonith.update_scsi_devices( ++ self.env_assist.get_env(), SCSI_STONITH_ID, () ++ ), ++ [ ++ fixture.error( ++ reports.codes.STONITH_RESTARTLESS_UPDATE_OF_SCSI_DEVICES_NOT_SUPPORTED, ++ ) ++ ], ++ expected_in_processor=False, ++ ) ++ ++ def test_devices_cannot_be_empty(self): ++ self.config.runner.pcmk.is_resource_digests_supported() ++ self.config.runner.cib.load(resources=fixture_scsi()) ++ self.env_assist.assert_raise_library_error( ++ lambda: stonith.update_scsi_devices( ++ self.env_assist.get_env(), SCSI_STONITH_ID, () ++ ) ++ ) ++ self.env_assist.assert_reports( ++ [ ++ fixture.error( ++ reports.codes.INVALID_OPTION_VALUE, ++ option_name="devices", ++ option_value="", ++ allowed_values=None, ++ cannot_be_empty=True, ++ forbidden_characters=None, ++ ) ++ ] ++ ) ++ ++ def test_nonexistant_id(self): ++ self.config.runner.pcmk.is_resource_digests_supported() ++ self.config.runner.cib.load(resources=fixture_scsi()) ++ self.env_assist.assert_raise_library_error( ++ lambda: stonith.update_scsi_devices( ++ self.env_assist.get_env(), "non-existent-id", DEVICES_2 ++ ) ++ ) ++ self.env_assist.assert_reports( ++ [ ++ fixture.error( ++ reports.codes.ID_NOT_FOUND, ++ id="non-existent-id", ++ expected_types=["primitive"], ++ context_type="cib", ++ context_id="", ++ ) ++ ] ++ ) ++ ++ def test_not_a_resource_id(self): ++ self.config.runner.pcmk.is_resource_digests_supported() ++ self.config.runner.cib.load(resources=fixture_scsi()) ++ self.env_assist.assert_raise_library_error( ++ lambda: stonith.update_scsi_devices( ++ self.env_assist.get_env(), ++ f"{SCSI_STONITH_ID}-instance_attributes-devices", ++ DEVICES_2, ++ ) ++ ) ++ self.env_assist.assert_reports( ++ [ ++ fixture.error( ++ reports.codes.ID_BELONGS_TO_UNEXPECTED_TYPE, ++ id=f"{SCSI_STONITH_ID}-instance_attributes-devices", ++ expected_types=["primitive"], ++ current_type="nvpair", ++ ) ++ ] ++ ) ++ ++ def test_not_supported_resource_type(self): ++ self.config.runner.pcmk.is_resource_digests_supported() ++ self.config.runner.cib.load(resources=fixture_scsi()) ++ self.env_assist.assert_raise_library_error( ++ lambda: stonith.update_scsi_devices( ++ self.env_assist.get_env(), "dummy", DEVICES_2 ++ ) ++ ) ++ self.env_assist.assert_reports( ++ [ ++ fixture.error( ++ reports.codes.STONITH_RESTARTLESS_UPDATE_UNSUPPORTED_AGENT, ++ resource_id="dummy", ++ resource_type="Dummy", ++ supported_stonith_types=["fence_scsi"], ++ ) ++ ] ++ ) ++ ++ def test_devices_option_missing(self): ++ self.config.runner.pcmk.is_resource_digests_supported() ++ self.config.runner.cib.load(resources=fixture_scsi(devices=None)) ++ self.env_assist.assert_raise_library_error( ++ lambda: stonith.update_scsi_devices( ++ self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2 ++ ) ++ ) ++ self.env_assist.assert_reports( ++ [ ++ fixture.error( ++ reports.codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM, ++ reason=( ++ "no devices option configured for stonith device " ++ f"'{SCSI_STONITH_ID}'" ++ ), ++ reason_type=reports.const.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_OTHER, ++ ) ++ ] ++ ) ++ ++ def test_devices_option_empty(self): ++ self.config.runner.pcmk.is_resource_digests_supported() ++ self.config.runner.cib.load(resources=fixture_scsi(devices="")) ++ self.env_assist.assert_raise_library_error( ++ lambda: stonith.update_scsi_devices( ++ self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2 ++ ) ++ ) ++ self.env_assist.assert_reports( ++ [ ++ fixture.error( ++ reports.codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM, ++ reason=( ++ "no devices option configured for stonith device " ++ f"'{SCSI_STONITH_ID}'" ++ ), ++ reason_type=reports.const.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_OTHER, ++ ) ++ ] ++ ) ++ ++ def test_stonith_resource_is_not_running(self): ++ self.config.runner.pcmk.is_resource_digests_supported() ++ self.config.runner.cib.load(resources=fixture_scsi()) ++ self.config.runner.pcmk.load_state( ++ resources=FIXTURE_CRM_MON_RES_STOPPED, nodes=FIXTURE_CRM_MON_NODES ++ ) ++ self.env_assist.assert_raise_library_error( ++ lambda: stonith.update_scsi_devices( ++ self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2 ++ ), ++ [ ++ fixture.error( ++ reports.codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM, ++ reason=f"resource '{SCSI_STONITH_ID}' is not running on any node", ++ reason_type=reports.const.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_NOT_RUNNING, ++ ) ++ ], ++ expected_in_processor=False, ++ ) ++ ++ def test_stonith_resource_is_running_on_more_than_one_node(self): ++ self.config.runner.pcmk.is_resource_digests_supported() ++ self.config.runner.cib.load(resources=fixture_scsi()) ++ self.config.runner.pcmk.load_state( ++ resources=FIXTURE_CRM_MON_RES_RUNNING_2, nodes=FIXTURE_CRM_MON_NODES ++ ) ++ self.env_assist.assert_raise_library_error( ++ lambda: stonith.update_scsi_devices( ++ self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2 ++ ), ++ [ ++ fixture.error( ++ reports.codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM, ++ reason=( ++ f"resource '{SCSI_STONITH_ID}' is running on more than " ++ "1 node" ++ ), ++ reason_type=reports.const.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_OTHER, ++ ) ++ ], ++ expected_in_processor=False, ++ ) ++ ++ def test_lrm_op_missing_digest_attributes(self): ++ devices = ",".join(DEVICES_2) ++ self.config.runner.pcmk.is_resource_digests_supported() ++ self.config.runner.cib.load( ++ resources=fixture_scsi(), ++ status=_fixture_status_lrm_ops_base( ++ SCSI_STONITH_ID, ++ f'', ++ ), ++ ) ++ self.config.runner.pcmk.load_state( ++ resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES ++ ) ++ self.config.runner.pcmk.resource_digests( ++ SCSI_STONITH_ID, ++ SCSI_NODE, ++ name="start.op.digests", ++ stdout=fixture_digests_xml( ++ SCSI_STONITH_ID, ++ SCSI_NODE, ++ devices=devices, ++ ), ++ args=[f"devices={devices}"], ++ ) ++ self.env_assist.assert_raise_library_error( ++ lambda: stonith.update_scsi_devices( ++ self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2 ++ ), ++ [ ++ fixture.error( ++ reports.codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM, ++ reason="no digests attributes in lrm_rsc_op element", ++ reason_type=reports.const.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_OTHER, ++ ) ++ ], ++ expected_in_processor=False, ++ ) ++ ++ def test_crm_resource_digests_missing(self): ++ devices = ",".join(DEVICES_2) ++ self.config.runner.pcmk.is_resource_digests_supported() ++ self.config.runner.cib.load( ++ resources=fixture_scsi(), ++ status=_fixture_status_lrm_ops_base( ++ SCSI_STONITH_ID, ++ ( ++ f'' ++ ), ++ ), ++ ) ++ self.config.runner.pcmk.load_state( ++ resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES ++ ) ++ self.config.runner.pcmk.resource_digests( ++ SCSI_STONITH_ID, ++ SCSI_NODE, ++ name="start.op.digests", ++ stdout=fixture_digests_xml( ++ SCSI_STONITH_ID, ++ SCSI_NODE, ++ devices=devices, ++ ), ++ args=[f"devices={devices}"], ++ ) ++ self.env_assist.assert_raise_library_error( ++ lambda: stonith.update_scsi_devices( ++ self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2 ++ ), ++ [ ++ fixture.error( ++ reports.codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM, ++ reason=( ++ "necessary digest for 'op-restart-digest' attribute is " ++ "missing" ++ ), ++ reason_type=reports.const.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_OTHER, ++ ) ++ ], ++ expected_in_processor=False, ++ ) ++ ++ def test_no_lrm_start_op(self): ++ self.config.runner.pcmk.is_resource_digests_supported() ++ self.config.runner.cib.load( ++ resources=fixture_scsi(), ++ status=_fixture_status_lrm_ops(SCSI_STONITH_ID, lrm_start_ops=()), ++ ) ++ self.config.runner.pcmk.load_state( ++ resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES ++ ) ++ self.env_assist.assert_raise_library_error( ++ lambda: stonith.update_scsi_devices( ++ self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2 ++ ), ++ [ ++ fixture.error( ++ reports.codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM, ++ reason=( ++ "lrm_rsc_op element for start operation was not found" ++ ), ++ reason_type=reports.const.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_OTHER, ++ ) ++ ], ++ expected_in_processor=False, ++ ) ++ ++ def test_monitor_ops_and_lrm_monitor_ops_do_not_match(self): ++ self.config.runner.pcmk.is_resource_digests_supported() ++ self.config.runner.cib.load( ++ resources=fixture_scsi( ++ resource_ops=( ++ ("monitor", "30s", "10s", None), ++ ("monitor", "30s", "20s", "31"), ++ ("monitor", "60s", None, None), ++ ) ++ ), ++ status=_fixture_status_lrm_ops(SCSI_STONITH_ID), ++ ) ++ self.config.runner.pcmk.load_state( ++ resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES ++ ) ++ self.config.runner.pcmk.resource_digests( ++ SCSI_STONITH_ID, ++ SCSI_NODE, ++ name="start.op.digests", ++ stdout=fixture_digests_xml( ++ SCSI_STONITH_ID, SCSI_NODE, devices=",".join(DEVICES_2) ++ ), ++ args=["devices={}".format(",".join(DEVICES_2))], ++ ) ++ self.env_assist.assert_raise_library_error( ++ lambda: stonith.update_scsi_devices( ++ self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2 ++ ), ++ [ ++ fixture.error( ++ reports.codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM, ++ reason=( ++ "number of lrm_rsc_op and op elements for monitor " ++ "operation differs" ++ ), ++ reason_type=reports.const.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_OTHER, ++ ) ++ ], ++ expected_in_processor=False, ++ ) ++ ++ def test_lrm_monitor_ops_not_found(self): ++ self.config.runner.pcmk.is_resource_digests_supported() ++ self.config.runner.cib.load( ++ resources=fixture_scsi( ++ resource_ops=(("monitor", "30s", None, None),) ++ ), ++ status=_fixture_status_lrm_ops(SCSI_STONITH_ID), ++ ) ++ self.config.runner.pcmk.load_state( ++ resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES ++ ) ++ self.config.runner.pcmk.resource_digests( ++ SCSI_STONITH_ID, ++ SCSI_NODE, ++ name="start.op.digests", ++ stdout=fixture_digests_xml( ++ SCSI_STONITH_ID, SCSI_NODE, devices=",".join(DEVICES_2) ++ ), ++ args=["devices={}".format(",".join(DEVICES_2))], ++ ) ++ self.env_assist.assert_raise_library_error( ++ lambda: stonith.update_scsi_devices( ++ self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2 ++ ), ++ [ ++ fixture.error( ++ reports.codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM, ++ reason=( ++ "monitor lrm_rsc_op element for resource " ++ f"'{SCSI_STONITH_ID}', node '{SCSI_NODE}' and interval " ++ "'30000' not found" ++ ), ++ reason_type=reports.const.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_OTHER, ++ ) ++ ], ++ expected_in_processor=False, ++ ) ++ ++ def test_node_missing_name_and_missing_auth_token(self): ++ self.config.runner.pcmk.is_resource_digests_supported() ++ self.config.runner.cib.load( ++ resources=fixture_scsi(), ++ status=_fixture_status_lrm_ops(SCSI_STONITH_ID), ++ ) ++ self.config.runner.pcmk.load_state( ++ resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES ++ ) ++ self.config.runner.pcmk.resource_digests( ++ SCSI_STONITH_ID, ++ SCSI_NODE, ++ name="start.op.digests", ++ stdout=fixture_digests_xml( ++ SCSI_STONITH_ID, SCSI_NODE, devices=",".join(DEVICES_2) ++ ), ++ args=["devices={}".format(",".join(DEVICES_2))], ++ ) ++ self.config.runner.pcmk.resource_digests( ++ SCSI_STONITH_ID, ++ SCSI_NODE, ++ name="monitor.op.digests", ++ stdout=fixture_digests_xml( ++ SCSI_STONITH_ID, SCSI_NODE, devices=",".join(DEVICES_2) ++ ), ++ args=[ ++ "devices={}".format(",".join(DEVICES_2)), ++ "CRM_meta_interval=60000", ++ ], ++ ) ++ self.config.corosync_conf.load_content( ++ corosync_conf_fixture( ++ self.existing_corosync_nodes ++ + [[("ring0_addr", "custom_node"), ("nodeid", "5")]], ++ ) ++ ) ++ self.config.env.set_known_nodes(self.existing_nodes[:-1]) ++ self.env_assist.assert_raise_library_error( ++ lambda: stonith.update_scsi_devices( ++ self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2 ++ ), ++ ) ++ self.env_assist.assert_reports( ++ [ ++ fixture.error( ++ reports.codes.COROSYNC_CONFIG_MISSING_NAMES_OF_NODES, ++ fatal=True, ++ ), ++ fixture.error( ++ reports.codes.HOST_NOT_FOUND, ++ host_list=[self.existing_nodes[-1]], ++ ), ++ ] ++ ) ++ ++ def _unfence_failure_common_calls(self): ++ devices = ",".join(DEVICES_2) ++ self.config.runner.pcmk.is_resource_digests_supported() ++ self.config.runner.cib.load( ++ resources=fixture_scsi(), ++ status=_fixture_status_lrm_ops(SCSI_STONITH_ID), ++ ) ++ self.config.runner.pcmk.load_state( ++ resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES ++ ) ++ self.config.runner.pcmk.resource_digests( ++ SCSI_STONITH_ID, ++ SCSI_NODE, ++ name="start.op.digests", ++ stdout=fixture_digests_xml( ++ SCSI_STONITH_ID, ++ SCSI_NODE, ++ devices=devices, ++ ), ++ args=[f"devices={devices}"], ++ ) ++ self.config.runner.pcmk.resource_digests( ++ SCSI_STONITH_ID, ++ SCSI_NODE, ++ name="monitor.op.digests", ++ stdout=fixture_digests_xml( ++ SCSI_STONITH_ID, ++ SCSI_NODE, ++ devices=devices, ++ ), ++ args=[ ++ f"devices={devices}", ++ "CRM_meta_interval=60000", ++ ], ++ ) ++ self.config.corosync_conf.load_content( ++ corosync_conf_fixture(self.existing_corosync_nodes) ++ ) ++ ++ def test_unfence_failure_unable_to_connect(self): ++ self._unfence_failure_common_calls() ++ self.config.http.corosync.get_corosync_online_targets( ++ node_labels=self.existing_nodes ++ ) ++ self.config.http.scsi.unfence_node( ++ DEVICES_2, ++ communication_list=[ ++ dict( ++ label=self.existing_nodes[0], ++ raw_data=json.dumps( ++ dict(devices=DEVICES_2, node=self.existing_nodes[0]) ++ ), ++ was_connected=False, ++ error_msg="errA", ++ ), ++ dict( ++ label=self.existing_nodes[1], ++ raw_data=json.dumps( ++ dict(devices=DEVICES_2, node=self.existing_nodes[1]) ++ ), ++ output=json.dumps( ++ dto.to_dict( ++ communication.dto.InternalCommunicationResultDto( ++ status=communication.const.COM_STATUS_ERROR, ++ status_msg="error", ++ report_list=[ ++ reports.ReportItem.error( ++ reports.messages.StonithUnfencingFailed( ++ "errB" ++ ) ++ ).to_dto() ++ ], ++ data=None, ++ ) ++ ) ++ ), ++ ), ++ dict( ++ label=self.existing_nodes[2], ++ raw_data=json.dumps( ++ dict(devices=DEVICES_2, node=self.existing_nodes[2]) ++ ), ++ ), ++ ], ++ ) ++ self.env_assist.assert_raise_library_error( ++ lambda: stonith.update_scsi_devices( ++ self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2 ++ ), ++ ) ++ self.env_assist.assert_reports( ++ [ ++ fixture.error( ++ reports.codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, ++ node=self.existing_nodes[0], ++ command="api/v1/scsi-unfence-node/v1", ++ reason="errA", ++ ), ++ fixture.error( ++ reports.codes.STONITH_UNFENCING_FAILED, ++ reason="errB", ++ context=reports.dto.ReportItemContextDto( ++ node=self.existing_nodes[1], ++ ), ++ ), ++ ] ++ ) ++ ++ def test_unfence_failure_agent_script_failed(self): ++ self._unfence_failure_common_calls() ++ self.config.http.corosync.get_corosync_online_targets( ++ node_labels=self.existing_nodes ++ ) ++ self.config.http.scsi.unfence_node( ++ DEVICES_2, ++ communication_list=[ ++ dict( ++ label=self.existing_nodes[0], ++ raw_data=json.dumps( ++ dict(devices=DEVICES_2, node=self.existing_nodes[0]) ++ ), ++ ), ++ dict( ++ label=self.existing_nodes[1], ++ raw_data=json.dumps( ++ dict(devices=DEVICES_2, node=self.existing_nodes[1]) ++ ), ++ output=json.dumps( ++ dto.to_dict( ++ communication.dto.InternalCommunicationResultDto( ++ status=communication.const.COM_STATUS_ERROR, ++ status_msg="error", ++ report_list=[ ++ reports.ReportItem.error( ++ reports.messages.StonithUnfencingFailed( ++ "errB" ++ ) ++ ).to_dto() ++ ], ++ data=None, ++ ) ++ ) ++ ), ++ ), ++ dict( ++ label=self.existing_nodes[2], ++ raw_data=json.dumps( ++ dict(devices=DEVICES_2, node=self.existing_nodes[2]) ++ ), ++ ), ++ ], ++ ) ++ self.env_assist.assert_raise_library_error( ++ lambda: stonith.update_scsi_devices( ++ self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2 ++ ), ++ ) ++ self.env_assist.assert_reports( ++ [ ++ fixture.error( ++ reports.codes.STONITH_UNFENCING_FAILED, ++ reason="errB", ++ context=reports.dto.ReportItemContextDto( ++ node=self.existing_nodes[1], ++ ), ++ ), ++ ] ++ ) ++ ++ def test_corosync_targets_unable_to_connect(self): ++ self._unfence_failure_common_calls() ++ self.config.http.corosync.get_corosync_online_targets( ++ communication_list=[ ++ dict( ++ label=self.existing_nodes[0], ++ output='{"corosync":true}', ++ ), ++ ] ++ + [ ++ dict( ++ label=node, ++ was_connected=False, ++ errno=7, ++ error_msg="an error", ++ ) ++ for node in self.existing_nodes[1:] ++ ] ++ ) ++ self.env_assist.assert_raise_library_error( ++ lambda: stonith.update_scsi_devices( ++ self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2 ++ ), ++ ) ++ self.env_assist.assert_reports( ++ [ ++ fixture.error( ++ reports.codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, ++ force_code=reports.codes.SKIP_OFFLINE_NODES, ++ node=node, ++ command="remote/status", ++ reason="an error", ++ ) ++ for node in self.existing_nodes[1:] ++ ] ++ ) ++ ++ def test_corosync_targets_skip_offline_unfence_node_running_corosync( ++ self, ++ ): ++ self._unfence_failure_common_calls() ++ self.config.http.corosync.get_corosync_online_targets( ++ communication_list=[ ++ dict( ++ label=self.existing_nodes[0], ++ output='{"corosync":true}', ++ ), ++ dict( ++ label=self.existing_nodes[1], ++ output='{"corosync":false}', ++ ), ++ dict( ++ label=self.existing_nodes[2], ++ was_connected=False, ++ errno=7, ++ error_msg="an error", ++ ), ++ ] ++ ) ++ self.config.http.scsi.unfence_node( ++ DEVICES_2, ++ communication_list=[ ++ dict( ++ label=self.existing_nodes[0], ++ raw_data=json.dumps( ++ dict(devices=DEVICES_2, node=self.existing_nodes[0]) ++ ), ++ ), ++ ], ++ ) ++ self.config.env.push_cib( ++ resources=fixture_scsi(devices=DEVICES_2), ++ status=_fixture_status_lrm_ops( ++ SCSI_STONITH_ID, ++ lrm_start_ops=DEFAULT_LRM_START_OPS_UPDATED, ++ lrm_monitor_ops=DEFAULT_LRM_MONITOR_OPS_UPDATED, ++ ), ++ ) ++ stonith.update_scsi_devices( ++ self.env_assist.get_env(), ++ SCSI_STONITH_ID, ++ DEVICES_2, ++ force_flags=[reports.codes.SKIP_OFFLINE_NODES], ++ ) ++ self.env_assist.assert_reports( ++ [ ++ fixture.warn( ++ reports.codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, ++ node=self.existing_nodes[2], ++ command="remote/status", ++ reason="an error", ++ ), ++ ] ++ ) ++ ++ def test_corosync_targets_unable_to_perform_unfencing_operation( ++ self, ++ ): ++ self._unfence_failure_common_calls() ++ self.config.http.corosync.get_corosync_online_targets( ++ communication_list=[ ++ dict( ++ label=self.existing_nodes[0], ++ was_connected=False, ++ errno=7, ++ error_msg="an error", ++ ), ++ dict( ++ label=self.existing_nodes[1], ++ was_connected=False, ++ errno=7, ++ error_msg="an error", ++ ), ++ dict( ++ label=self.existing_nodes[2], ++ output='{"corosync":false}', ++ ), ++ ] ++ ) ++ self.config.http.scsi.unfence_node(DEVICES_2, communication_list=[]) ++ self.env_assist.assert_raise_library_error( ++ lambda: stonith.update_scsi_devices( ++ self.env_assist.get_env(), ++ SCSI_STONITH_ID, ++ DEVICES_2, ++ force_flags=[reports.codes.SKIP_OFFLINE_NODES], ++ ), ++ ) ++ self.env_assist.assert_reports( ++ [ ++ fixture.warn( ++ reports.codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, ++ node=node, ++ command="remote/status", ++ reason="an error", ++ ) ++ for node in self.existing_nodes[0:2] ++ ] ++ + [ ++ fixture.error( ++ reports.codes.UNABLE_TO_PERFORM_OPERATION_ON_ANY_NODE, ++ ), ++ ] ++ ) +-- +2.31.1 + diff --git a/SOURCES/bz1991654-01-fix-unfencing-in-pcs-stonith-update-scsi-devices.patch b/SOURCES/bz1991654-01-fix-unfencing-in-pcs-stonith-update-scsi-devices.patch new file mode 100644 index 0000000..60b7502 --- /dev/null +++ b/SOURCES/bz1991654-01-fix-unfencing-in-pcs-stonith-update-scsi-devices.patch @@ -0,0 +1,787 @@ +From cf68ded959ad03244c94de308b79fc1af806a474 Mon Sep 17 00:00:00 2001 +From: Ondrej Mular +Date: Wed, 15 Sep 2021 07:55:50 +0200 +Subject: [PATCH 1/2] fix unfencing in `pcs stonith update-scsi-devices` + +* do not unfence newly added devices on fenced cluster nodes +--- + pcs/common/reports/codes.py | 6 ++ + pcs/common/reports/messages.py | 41 +++++++ + pcs/lib/commands/scsi.py | 55 +++++++++- + pcs/lib/commands/stonith.py | 26 +++-- + pcs/lib/communication/scsi.py | 40 ++++--- + .../tier0/common/reports/test_messages.py | 24 +++++ + pcs_test/tier0/lib/commands/test_scsi.py | 101 ++++++++++++++++-- + .../test_stonith_update_scsi_devices.py | 87 ++++++++++++--- + .../tools/command_env/config_http_scsi.py | 16 ++- + .../tools/command_env/config_runner_scsi.py | 36 ++++++- + pcsd/api_v1.rb | 2 +- + pcsd/capabilities.xml | 8 +- + 12 files changed, 387 insertions(+), 55 deletions(-) + +diff --git a/pcs/common/reports/codes.py b/pcs/common/reports/codes.py +index bbd61500..4bee0bac 100644 +--- a/pcs/common/reports/codes.py ++++ b/pcs/common/reports/codes.py +@@ -468,6 +468,12 @@ STONITH_RESTARTLESS_UPDATE_UNSUPPORTED_AGENT = M( + "STONITH_RESTARTLESS_UPDATE_UNSUPPORTED_AGENT" + ) + STONITH_UNFENCING_FAILED = M("STONITH_UNFENCING_FAILED") ++STONITH_UNFENCING_DEVICE_STATUS_FAILED = M( ++ "STONITH_UNFENCING_DEVICE_STATUS_FAILED" ++) ++STONITH_UNFENCING_SKIPPED_DEVICES_FENCED = M( ++ "STONITH_UNFENCING_SKIPPED_DEVICES_FENCED" ++) + STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM = M( + "STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM" + ) +diff --git a/pcs/common/reports/messages.py b/pcs/common/reports/messages.py +index f9688437..be8dd154 100644 +--- a/pcs/common/reports/messages.py ++++ b/pcs/common/reports/messages.py +@@ -2782,6 +2782,47 @@ class StonithUnfencingFailed(ReportItemMessage): + return f"Unfencing failed:\n{self.reason}" + + ++@dataclass(frozen=True) ++class StonithUnfencingDeviceStatusFailed(ReportItemMessage): ++ """ ++ Unfencing failed on a cluster node. ++ """ ++ ++ device: str ++ reason: str ++ ++ _code = codes.STONITH_UNFENCING_DEVICE_STATUS_FAILED ++ ++ @property ++ def message(self) -> str: ++ return ( ++ "Unfencing failed, unable to check status of device " ++ f"'{self.device}': {self.reason}" ++ ) ++ ++ ++@dataclass(frozen=True) ++class StonithUnfencingSkippedDevicesFenced(ReportItemMessage): ++ """ ++ Unfencing skipped on a cluster node, because fenced devices were found on ++ the node. ++ """ ++ ++ devices: List[str] ++ ++ _code = codes.STONITH_UNFENCING_SKIPPED_DEVICES_FENCED ++ ++ @property ++ def message(self) -> str: ++ return ( ++ "Unfencing skipped, {device_pl} {devices} {is_pl} fenced" ++ ).format( ++ device_pl=format_plural(self.devices, "device"), ++ devices=format_list(self.devices), ++ is_pl=format_plural(self.devices, "is", "are"), ++ ) ++ ++ + @dataclass(frozen=True) + class StonithRestartlessUpdateUnableToPerform(ReportItemMessage): + """ +diff --git a/pcs/lib/commands/scsi.py b/pcs/lib/commands/scsi.py +index 31a3ef2d..ff20a563 100644 +--- a/pcs/lib/commands/scsi.py ++++ b/pcs/lib/commands/scsi.py +@@ -8,20 +8,65 @@ from pcs.lib.env import LibraryEnvironment + from pcs.lib.errors import LibraryError + + +-def unfence_node(env: LibraryEnvironment, node: str, devices: Iterable[str]): ++def unfence_node( ++ env: LibraryEnvironment, ++ node: str, ++ original_devices: Iterable[str], ++ updated_devices: Iterable[str], ++) -> None: + """ +- Unfence scsi devices on a node by calling fence_scsi agent script. ++ Unfence scsi devices on a node by calling fence_scsi agent script. Only ++ newly added devices will be unfenced (set(updated_devices) - ++ set(original_devices)). Before unfencing, original devices are be checked ++ if any of them are not fenced. If there is a fenced device, unfencing will ++ be skipped. + + env -- provides communication with externals + node -- node name on wich is unfencing performed +- devices -- scsi devices to be unfenced ++ original_devices -- list of devices defined before update ++ updated_devices -- list of devices defined after update + """ ++ devices_to_unfence = set(updated_devices) - set(original_devices) ++ if not devices_to_unfence: ++ return ++ fence_scsi_bin = os.path.join(settings.fence_agent_binaries, "fence_scsi") ++ fenced_devices = [] ++ for device in original_devices: ++ stdout, stderr, return_code = env.cmd_runner().run( ++ [ ++ fence_scsi_bin, ++ "--action=status", ++ f"--devices={device}", ++ f"--plug={node}", ++ ] ++ ) ++ if return_code == 2: ++ fenced_devices.append(device) ++ elif return_code != 0: ++ raise LibraryError( ++ reports.ReportItem.error( ++ reports.messages.StonithUnfencingDeviceStatusFailed( ++ device, join_multilines([stderr, stdout]) ++ ) ++ ) ++ ) ++ if fenced_devices: ++ # At least one of existing devices is off, which means the node has ++ # been fenced and new devices should not be unfenced. ++ env.report_processor.report( ++ reports.ReportItem.info( ++ reports.messages.StonithUnfencingSkippedDevicesFenced( ++ fenced_devices ++ ) ++ ) ++ ) ++ return + stdout, stderr, return_code = env.cmd_runner().run( + [ +- os.path.join(settings.fence_agent_binaries, "fence_scsi"), ++ fence_scsi_bin, + "--action=on", + "--devices", +- ",".join(sorted(devices)), ++ ",".join(sorted(devices_to_unfence)), + f"--plug={node}", + ], + ) +diff --git a/pcs/lib/commands/stonith.py b/pcs/lib/commands/stonith.py +index 6f26e7d3..0dcf44f2 100644 +--- a/pcs/lib/commands/stonith.py ++++ b/pcs/lib/commands/stonith.py +@@ -453,7 +453,8 @@ def _update_scsi_devices_get_element_and_devices( + + def _unfencing_scsi_devices( + env: LibraryEnvironment, +- device_list: Iterable[str], ++ original_devices: Iterable[str], ++ updated_devices: Iterable[str], + force_flags: Container[reports.types.ForceCode] = (), + ) -> None: + """ +@@ -461,9 +462,13 @@ def _unfencing_scsi_devices( + to pcsd and corosync is running. + + env -- provides all for communication with externals +- device_list -- devices to be unfenced ++ original_devices -- devices before update ++ updated_devices -- devices after update + force_flags -- list of flags codes + """ ++ devices_to_unfence = set(updated_devices) - set(original_devices) ++ if not devices_to_unfence: ++ return + cluster_nodes_names, nodes_report_list = get_existing_nodes_names( + env.get_corosync_conf(), + error_on_missing_name=True, +@@ -487,7 +492,11 @@ def _unfencing_scsi_devices( + online_corosync_target_list = run_and_raise( + env.get_node_communicator(), com_cmd + ) +- com_cmd = Unfence(env.report_processor, sorted(device_list)) ++ com_cmd = Unfence( ++ env.report_processor, ++ original_devices=sorted(original_devices), ++ updated_devices=sorted(updated_devices), ++ ) + com_cmd.set_targets(online_corosync_target_list) + run_and_raise(env.get_node_communicator(), com_cmd) + +@@ -531,9 +540,9 @@ def update_scsi_devices( + IdProvider(stonith_el), + set_device_list, + ) +- devices_for_unfencing = set(set_device_list).difference(current_device_list) +- if devices_for_unfencing: +- _unfencing_scsi_devices(env, devices_for_unfencing, force_flags) ++ _unfencing_scsi_devices( ++ env, current_device_list, set_device_list, force_flags ++ ) + env.push_cib() + + +@@ -585,6 +594,7 @@ def update_scsi_devices_add_remove( + IdProvider(stonith_el), + updated_device_set, + ) +- if add_device_list: +- _unfencing_scsi_devices(env, add_device_list, force_flags) ++ _unfencing_scsi_devices( ++ env, current_device_list, updated_device_set, force_flags ++ ) + env.push_cib() +diff --git a/pcs/lib/communication/scsi.py b/pcs/lib/communication/scsi.py +index 7b272017..250d67aa 100644 +--- a/pcs/lib/communication/scsi.py ++++ b/pcs/lib/communication/scsi.py +@@ -1,4 +1,5 @@ + import json ++from typing import Iterable + + from dacite import DaciteError + +@@ -26,9 +27,15 @@ class Unfence( + MarkSuccessfulMixin, + RunRemotelyBase, + ): +- def __init__(self, report_processor, devices): ++ def __init__( ++ self, ++ report_processor: reports.ReportProcessor, ++ original_devices: Iterable[str], ++ updated_devices: Iterable[str], ++ ) -> None: + super().__init__(report_processor) +- self._devices = devices ++ self._original_devices = original_devices ++ self._updated_devices = updated_devices + + def _get_request_data(self): + return None +@@ -38,9 +45,13 @@ class Unfence( + Request( + target, + RequestData( +- "api/v1/scsi-unfence-node/v1", ++ "api/v1/scsi-unfence-node/v2", + data=json.dumps( +- {"devices": self._devices, "node": target.label} ++ dict( ++ node=target.label, ++ original_devices=self._original_devices, ++ updated_devices=self._updated_devices, ++ ) + ), + ), + ) +@@ -48,7 +59,9 @@ class Unfence( + ] + + def _process_response(self, response): +- report_item = response_to_report_item(response) ++ report_item = response_to_report_item( ++ response, report_pcsd_too_old_on_404=True ++ ) + if report_item: + self._report(report_item) + return +@@ -57,15 +70,14 @@ class Unfence( + result = from_dict( + InternalCommunicationResultDto, json.loads(response.data) + ) +- if result.status != const.COM_STATUS_SUCCESS: +- context = reports.ReportItemContext(node_label) +- self._report_list( +- [ +- reports.report_dto_to_item(report, context) +- for report in result.report_list +- ] +- ) +- else: ++ context = reports.ReportItemContext(node_label) ++ self._report_list( ++ [ ++ reports.report_dto_to_item(report, context) ++ for report in result.report_list ++ ] ++ ) ++ if result.status == const.COM_STATUS_SUCCESS: + self._on_success() + + except (json.JSONDecodeError, DaciteError): +diff --git a/pcs_test/tier0/common/reports/test_messages.py b/pcs_test/tier0/common/reports/test_messages.py +index b0826cfd..05c3f619 100644 +--- a/pcs_test/tier0/common/reports/test_messages.py ++++ b/pcs_test/tier0/common/reports/test_messages.py +@@ -1904,6 +1904,30 @@ class StonithUnfencingFailed(NameBuildTest): + ) + + ++class StonithUnfencingDeviceStatusFailed(NameBuildTest): ++ def test_build_message(self): ++ self.assert_message_from_report( ++ "Unfencing failed, unable to check status of device 'dev1': reason", ++ reports.StonithUnfencingDeviceStatusFailed("dev1", "reason"), ++ ) ++ ++ ++class StonithUnfencingSkippedDevicesFenced(NameBuildTest): ++ def test_one_device(self): ++ self.assert_message_from_report( ++ "Unfencing skipped, device 'dev1' is fenced", ++ reports.StonithUnfencingSkippedDevicesFenced(["dev1"]), ++ ) ++ ++ def test_multiple_devices(self): ++ self.assert_message_from_report( ++ "Unfencing skipped, devices 'dev1', 'dev2', 'dev3' are fenced", ++ reports.StonithUnfencingSkippedDevicesFenced( ++ ["dev2", "dev1", "dev3"] ++ ), ++ ) ++ ++ + class StonithRestartlessUpdateUnableToPerform(NameBuildTest): + def test_build_message(self): + self.assert_message_from_report( +diff --git a/pcs_test/tier0/lib/commands/test_scsi.py b/pcs_test/tier0/lib/commands/test_scsi.py +index de75743f..8ef9836a 100644 +--- a/pcs_test/tier0/lib/commands/test_scsi.py ++++ b/pcs_test/tier0/lib/commands/test_scsi.py +@@ -10,26 +10,113 @@ from pcs.lib.commands import scsi + class TestUnfenceNode(TestCase): + def setUp(self): + self.env_assist, self.config = get_env_tools(self) ++ self.old_devices = ["device1", "device3"] ++ self.new_devices = ["device3", "device0", "device2"] ++ self.added_devices = set(self.new_devices) - set(self.old_devices) ++ self.node = "node1" + +- def test_success(self): +- self.config.runner.scsi.unfence_node("node1", ["/dev/sda", "/dev/sdb"]) ++ def test_success_devices_to_unfence(self): ++ for old_dev in self.old_devices: ++ self.config.runner.scsi.get_status( ++ self.node, old_dev, name=f"runner.scsi.is_fenced.{old_dev}" ++ ) ++ self.config.runner.scsi.unfence_node(self.node, self.added_devices) + scsi.unfence_node( +- self.env_assist.get_env(), "node1", ["/dev/sdb", "/dev/sda"] ++ self.env_assist.get_env(), ++ self.node, ++ self.old_devices, ++ self.new_devices, + ) + self.env_assist.assert_reports([]) + +- def test_failure(self): ++ def test_success_no_devices_to_unfence(self): ++ scsi.unfence_node( ++ self.env_assist.get_env(), ++ self.node, ++ {"device1", "device2", "device3"}, ++ {"device3"}, ++ ) ++ self.env_assist.assert_reports([]) ++ ++ def test_unfencing_failure(self): ++ err_msg = "stderr" ++ for old_dev in self.old_devices: ++ self.config.runner.scsi.get_status( ++ self.node, old_dev, name=f"runner.scsi.is_fenced.{old_dev}" ++ ) + self.config.runner.scsi.unfence_node( +- "node1", ["/dev/sda", "/dev/sdb"], stderr="stderr", return_code=1 ++ self.node, self.added_devices, stderr=err_msg, return_code=1 + ) + self.env_assist.assert_raise_library_error( + lambda: scsi.unfence_node( +- self.env_assist.get_env(), "node1", ["/dev/sdb", "/dev/sda"] ++ self.env_assist.get_env(), ++ self.node, ++ self.old_devices, ++ self.new_devices, + ), + [ + fixture.error( +- report_codes.STONITH_UNFENCING_FAILED, reason="stderr" ++ report_codes.STONITH_UNFENCING_FAILED, reason=err_msg + ) + ], + expected_in_processor=False, + ) ++ ++ def test_device_status_failed(self): ++ err_msg = "stderr" ++ new_devices = ["device1", "device2", "device3", "device4"] ++ old_devices = new_devices[:-1] ++ ok_devices = new_devices[0:2] ++ err_device = new_devices[2] ++ for dev in ok_devices: ++ self.config.runner.scsi.get_status( ++ self.node, dev, name=f"runner.scsi.is_fenced.{dev}" ++ ) ++ self.config.runner.scsi.get_status( ++ self.node, ++ err_device, ++ name=f"runner.scsi.is_fenced.{err_device}", ++ stderr=err_msg, ++ return_code=1, ++ ) ++ self.env_assist.assert_raise_library_error( ++ lambda: scsi.unfence_node( ++ self.env_assist.get_env(), ++ self.node, ++ old_devices, ++ new_devices, ++ ), ++ [ ++ fixture.error( ++ report_codes.STONITH_UNFENCING_DEVICE_STATUS_FAILED, ++ device=err_device, ++ reason=err_msg, ++ ) ++ ], ++ expected_in_processor=False, ++ ) ++ ++ def test_unfencing_skipped_devices_are_fenced(self): ++ stdout_off = "Status: OFF" ++ for old_dev in self.old_devices: ++ self.config.runner.scsi.get_status( ++ self.node, ++ old_dev, ++ name=f"runner.scsi.is_fenced.{old_dev}", ++ stdout=stdout_off, ++ return_code=2, ++ ) ++ scsi.unfence_node( ++ self.env_assist.get_env(), ++ self.node, ++ self.old_devices, ++ self.new_devices, ++ ) ++ self.env_assist.assert_reports( ++ [ ++ fixture.info( ++ report_codes.STONITH_UNFENCING_SKIPPED_DEVICES_FENCED, ++ devices=sorted(self.old_devices), ++ ) ++ ] ++ ) +diff --git a/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py b/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py +index 6ff6b99a..ed8f5d4f 100644 +--- a/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py ++++ b/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py +@@ -1,3 +1,4 @@ ++# pylint: disable=too-many-lines + import json + from unittest import mock, TestCase + +@@ -297,7 +298,9 @@ class UpdateScsiDevicesMixin: + node_labels=self.existing_nodes + ) + self.config.http.scsi.unfence_node( +- unfence, node_labels=self.existing_nodes ++ original_devices=devices_before, ++ updated_devices=devices_updated, ++ node_labels=self.existing_nodes, + ) + self.config.env.push_cib( + resources=fixture_scsi( +@@ -449,14 +452,14 @@ class UpdateScsiDevicesFailuresMixin: + node_labels=self.existing_nodes + ) + self.config.http.scsi.unfence_node( +- DEVICES_2, + communication_list=[ + dict( + label=self.existing_nodes[0], + raw_data=json.dumps( + dict( +- devices=[DEV_2], + node=self.existing_nodes[0], ++ original_devices=DEVICES_1, ++ updated_devices=DEVICES_2, + ) + ), + was_connected=False, +@@ -466,8 +469,9 @@ class UpdateScsiDevicesFailuresMixin: + label=self.existing_nodes[1], + raw_data=json.dumps( + dict( +- devices=[DEV_2], + node=self.existing_nodes[1], ++ original_devices=DEVICES_1, ++ updated_devices=DEVICES_2, + ) + ), + output=json.dumps( +@@ -491,8 +495,9 @@ class UpdateScsiDevicesFailuresMixin: + label=self.existing_nodes[2], + raw_data=json.dumps( + dict( +- devices=[DEV_2], + node=self.existing_nodes[2], ++ original_devices=DEVICES_1, ++ updated_devices=DEVICES_2, + ) + ), + ), +@@ -504,7 +509,7 @@ class UpdateScsiDevicesFailuresMixin: + fixture.error( + reports.codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, + node=self.existing_nodes[0], +- command="api/v1/scsi-unfence-node/v1", ++ command="api/v1/scsi-unfence-node/v2", + reason="errA", + ), + fixture.error( +@@ -517,20 +522,76 @@ class UpdateScsiDevicesFailuresMixin: + ] + ) + ++ def test_unfence_failure_unknown_command(self): ++ self._unfence_failure_common_calls() ++ self.config.http.corosync.get_corosync_online_targets( ++ node_labels=self.existing_nodes ++ ) ++ communication_list = [ ++ dict( ++ label=node, ++ raw_data=json.dumps( ++ dict( ++ node=node, ++ original_devices=DEVICES_1, ++ updated_devices=DEVICES_2, ++ ) ++ ), ++ ) ++ for node in self.existing_nodes[0:2] ++ ] ++ communication_list.append( ++ dict( ++ label=self.existing_nodes[2], ++ response_code=404, ++ raw_data=json.dumps( ++ dict( ++ node=self.existing_nodes[2], ++ original_devices=DEVICES_1, ++ updated_devices=DEVICES_2, ++ ) ++ ), ++ output=json.dumps( ++ dto.to_dict( ++ communication.dto.InternalCommunicationResultDto( ++ status=communication.const.COM_STATUS_UNKNOWN_CMD, ++ status_msg=( ++ "Unknown command '/api/v1/scsi-unfence-node/v2'" ++ ), ++ report_list=[], ++ data=None, ++ ) ++ ) ++ ), ++ ), ++ ) ++ self.config.http.scsi.unfence_node( ++ communication_list=communication_list ++ ) ++ self.env_assist.assert_raise_library_error(self.command()) ++ self.env_assist.assert_reports( ++ [ ++ fixture.error( ++ reports.codes.PCSD_VERSION_TOO_OLD, ++ node=self.existing_nodes[2], ++ ), ++ ] ++ ) ++ + def test_unfence_failure_agent_script_failed(self): + self._unfence_failure_common_calls() + self.config.http.corosync.get_corosync_online_targets( + node_labels=self.existing_nodes + ) + self.config.http.scsi.unfence_node( +- DEVICES_2, + communication_list=[ + dict( + label=self.existing_nodes[0], + raw_data=json.dumps( + dict( +- devices=[DEV_2], + node=self.existing_nodes[0], ++ original_devices=DEVICES_1, ++ updated_devices=DEVICES_2, + ) + ), + ), +@@ -538,8 +599,9 @@ class UpdateScsiDevicesFailuresMixin: + label=self.existing_nodes[1], + raw_data=json.dumps( + dict( +- devices=[DEV_2], + node=self.existing_nodes[1], ++ original_devices=DEVICES_1, ++ updated_devices=DEVICES_2, + ) + ), + output=json.dumps( +@@ -563,8 +625,9 @@ class UpdateScsiDevicesFailuresMixin: + label=self.existing_nodes[2], + raw_data=json.dumps( + dict( +- devices=[DEV_2], + node=self.existing_nodes[2], ++ original_devices=DEVICES_1, ++ updated_devices=DEVICES_2, + ) + ), + ), +@@ -639,14 +702,14 @@ class UpdateScsiDevicesFailuresMixin: + ] + ) + self.config.http.scsi.unfence_node( +- DEVICES_2, + communication_list=[ + dict( + label=self.existing_nodes[0], + raw_data=json.dumps( + dict( +- devices=[DEV_2], + node=self.existing_nodes[0], ++ original_devices=DEVICES_1, ++ updated_devices=DEVICES_2, + ) + ), + ), +diff --git a/pcs_test/tools/command_env/config_http_scsi.py b/pcs_test/tools/command_env/config_http_scsi.py +index 0e9f63af..7150eef9 100644 +--- a/pcs_test/tools/command_env/config_http_scsi.py ++++ b/pcs_test/tools/command_env/config_http_scsi.py +@@ -14,7 +14,8 @@ class ScsiShortcuts: + + def unfence_node( + self, +- devices, ++ original_devices=(), ++ updated_devices=(), + node_labels=None, + communication_list=None, + name="http.scsi.unfence_node", +@@ -22,7 +23,8 @@ class ScsiShortcuts: + """ + Create a calls for node unfencing + +- list devices -- list of scsi devices ++ list original_devices -- list of scsi devices before an update ++ list updated_devices -- list of scsi devices after an update + list node_labels -- create success responses from these nodes + list communication_list -- use these custom responses + string name -- the key of this call +@@ -39,7 +41,13 @@ class ScsiShortcuts: + communication_list = [ + dict( + label=node, +- raw_data=json.dumps(dict(devices=devices, node=node)), ++ raw_data=json.dumps( ++ dict( ++ node=node, ++ original_devices=original_devices, ++ updated_devices=updated_devices, ++ ) ++ ), + ) + for node in node_labels + ] +@@ -47,7 +55,7 @@ class ScsiShortcuts: + self.__calls, + name, + communication_list, +- action="api/v1/scsi-unfence-node/v1", ++ action="api/v1/scsi-unfence-node/v2", + output=json.dumps( + to_dict( + communication.dto.InternalCommunicationResultDto( +diff --git a/pcs_test/tools/command_env/config_runner_scsi.py b/pcs_test/tools/command_env/config_runner_scsi.py +index 4b671bb7..3cee13d6 100644 +--- a/pcs_test/tools/command_env/config_runner_scsi.py ++++ b/pcs_test/tools/command_env/config_runner_scsi.py +@@ -35,7 +35,41 @@ class ScsiShortcuts: + os.path.join(settings.fence_agent_binaries, "fence_scsi"), + "--action=on", + "--devices", +- ",".join(devices), ++ ",".join(sorted(devices)), ++ f"--plug={node}", ++ ], ++ stdout=stdout, ++ stderr=stderr, ++ returncode=return_code, ++ ), ++ ) ++ ++ def get_status( ++ self, ++ node, ++ device, ++ stdout="", ++ stderr="", ++ return_code=0, ++ name="runner.scsi.is_fenced", ++ ): ++ """ ++ Create a call for getting scsi status ++ ++ string node -- a node from which is unfencing performed ++ str device -- a device to check ++ string stdout -- stdout from fence_scsi agent script ++ string stderr -- stderr from fence_scsi agent script ++ int return_code -- return code of the fence_scsi agent script ++ string name -- the key of this call ++ """ ++ self.__calls.place( ++ name, ++ RunnerCall( ++ [ ++ os.path.join(settings.fence_agent_binaries, "fence_scsi"), ++ "--action=status", ++ f"--devices={device}", + f"--plug={node}", + ], + stdout=stdout, +diff --git a/pcsd/api_v1.rb b/pcsd/api_v1.rb +index 7edeeabf..e55c2be7 100644 +--- a/pcsd/api_v1.rb ++++ b/pcsd/api_v1.rb +@@ -291,7 +291,7 @@ def route_api_v1(auth_user, params, request) + :only_superuser => false, + :permissions => Permissions::WRITE, + }, +- 'scsi-unfence-node/v1' => { ++ 'scsi-unfence-node/v2' => { + :cmd => 'scsi.unfence_node', + :only_superuser => false, + :permissions => Permissions::WRITE, +diff --git a/pcsd/capabilities.xml b/pcsd/capabilities.xml +index 58ebcf0f..3954aa5d 100644 +--- a/pcsd/capabilities.xml ++++ b/pcsd/capabilities.xml +@@ -1892,11 +1892,13 @@ + pcs commands: stonith update-scsi-devices + + +- ++ + +- Unfence scsi devices on a cluster node. ++ Unfence scsi devices on a cluster node. In comparison with v1, only ++ newly added devices are unfenced. In case any existing device is ++ fenced, unfencing will be skipped. + +- daemon urls: /api/v1/scsi-unfence-node/v1 ++ daemon urls: /api/v1/scsi-unfence-node/v2 + + + +-- +2.31.1 + diff --git a/SOURCES/bz1992668-01-add-add-remove-syntax-for-command-pcs-stonith-update.patch b/SOURCES/bz1992668-01-add-add-remove-syntax-for-command-pcs-stonith-update.patch new file mode 100644 index 0000000..cdad5a1 --- /dev/null +++ b/SOURCES/bz1992668-01-add-add-remove-syntax-for-command-pcs-stonith-update.patch @@ -0,0 +1,3629 @@ +From d20c356298eacec1a71a85c29f7d1f8b63fd8cb7 Mon Sep 17 00:00:00 2001 +From: Miroslav Lisik +Date: Fri, 6 Aug 2021 17:35:03 +0200 +Subject: [PATCH 1/2] add add/remove syntax for command `pcs stonith + update-scsi-devices` + +--- + CHANGELOG.md | 9 - + pcs/cli/common/lib_wrapper.py | 1 + + pcs/common/reports/codes.py | 39 + + pcs/common/reports/const.py | 4 + + pcs/common/reports/messages.py | 289 ++++ + pcs/common/reports/types.py | 2 + + pcs/common/str_tools.py | 26 +- + pcs/lib/commands/stonith.py | 307 +++- + pcs/pcs.8.in | 4 +- + pcs/stonith.py | 43 +- + pcs/usage.py | 13 +- + pcs_test/Makefile.am | 1 + + pcs_test/tier0/cli/test_stonith.py | 169 +- + .../tier0/common/reports/test_messages.py | 185 +++ + pcs_test/tier0/common/test_str_tools.py | 63 +- + pcs_test/tier0/lib/cib/test_stonith.py | 135 +- + .../test_stonith_update_scsi_devices.py | 1439 ++++++++++------- + pcsd/capabilities.xml | 8 + + 18 files changed, 2041 insertions(+), 696 deletions(-) + +diff --git a/CHANGELOG.md b/CHANGELOG.md +index c15546ba..f768cc36 100644 +--- a/CHANGELOG.md ++++ b/CHANGELOG.md +@@ -1,14 +1,5 @@ + # Change Log + +-## [Unreleased] +- +-### Fixed +-- Fixed an error when creating a resource which defines 'depth' attribute for +- its operations ([rhbz#1998454]) +- +-[rhbz#1998454]: https://bugzilla.redhat.com/show_bug.cgi?id=1998454 +- +- + ## [0.10.10] - 2021-08-19 + + ### Added +diff --git a/pcs/cli/common/lib_wrapper.py b/pcs/cli/common/lib_wrapper.py +index 06410b41..2bf83485 100644 +--- a/pcs/cli/common/lib_wrapper.py ++++ b/pcs/cli/common/lib_wrapper.py +@@ -436,6 +436,7 @@ def load_module(env, middleware_factory, name): + "history_cleanup": stonith.history_cleanup, + "history_update": stonith.history_update, + "update_scsi_devices": stonith.update_scsi_devices, ++ "update_scsi_devices_add_remove": stonith.update_scsi_devices_add_remove, + }, + ) + +diff --git a/pcs/common/reports/codes.py b/pcs/common/reports/codes.py +index 32898154..bbd61500 100644 +--- a/pcs/common/reports/codes.py ++++ b/pcs/common/reports/codes.py +@@ -12,6 +12,29 @@ SKIP_OFFLINE_NODES = F("SKIP_OFFLINE_NODES") + # messages + + ++ADD_REMOVE_ITEMS_NOT_SPECIFIED = M("ADD_REMOVE_ITEMS_NOT_SPECIFIED") ++ADD_REMOVE_ITEMS_DUPLICATION = M("ADD_REMOVE_ITEMS_DUPLICATION") ++ADD_REMOVE_CANNOT_ADD_ITEMS_ALREADY_IN_THE_CONTAINER = M( ++ "ADD_REMOVE_CANNOT_ADD_ITEMS_ALREADY_IN_THE_CONTAINER" ++) ++ADD_REMOVE_CANNOT_REMOVE_ITEMS_NOT_IN_THE_CONTAINER = M( ++ "ADD_REMOVE_CANNOT_REMOVE_ITEMS_NOT_IN_THE_CONTAINER" ++) ++ADD_REMOVE_CANNOT_ADD_AND_REMOVE_ITEMS_AT_THE_SAME_TIME = M( ++ "ADD_REMOVE_CANNOT_ADD_AND_REMOVE_ITEMS_AT_THE_SAME_TIME" ++) ++ADD_REMOVE_CANNOT_REMOVE_ALL_ITEMS_FROM_THE_CONTAINER = M( ++ "ADD_REMOVE_CANNOT_REMOVE_ALL_ITEMS_FROM_THE_CONTAINER" ++) ++ADD_REMOVE_ADJACENT_ITEM_NOT_IN_THE_CONTAINER = M( ++ "ADD_REMOVE_ADJACENT_ITEM_NOT_IN_THE_CONTAINER" ++) ++ADD_REMOVE_CANNOT_PUT_ITEM_NEXT_TO_ITSELF = M( ++ "ADD_REMOVE_CANNOT_PUT_ITEM_NEXT_TO_ITSELF" ++) ++ADD_REMOVE_CANNOT_SPECIFY_ADJACENT_ITEM_WITHOUT_ITEMS_TO_ADD = M( ++ "ADD_REMOVE_CANNOT_SPECIFY_ADJACENT_ITEM_WITHOUT_ITEMS_TO_ADD" ++) + AGENT_NAME_GUESS_FOUND_MORE_THAN_ONE = M("AGENT_NAME_GUESS_FOUND_MORE_THAN_ONE") + AGENT_NAME_GUESS_FOUND_NONE = M("AGENT_NAME_GUESS_FOUND_NONE") + AGENT_NAME_GUESSED = M("AGENT_NAME_GUESSED") +@@ -44,17 +67,23 @@ CANNOT_BAN_RESOURCE_MASTER_RESOURCE_NOT_PROMOTABLE = M( + CANNOT_BAN_RESOURCE_STOPPED_NO_NODE_SPECIFIED = M( + "CANNOT_BAN_RESOURCE_STOPPED_NO_NODE_SPECIFIED" + ) ++# TODO: remove, use ADD_REMOVE reports + CANNOT_GROUP_RESOURCE_ADJACENT_RESOURCE_FOR_NEW_GROUP = M( + "CANNOT_GROUP_RESOURCE_ADJACENT_RESOURCE_FOR_NEW_GROUP" + ) ++# TODO: remove, use ADD_REMOVE reports + CANNOT_GROUP_RESOURCE_ADJACENT_RESOURCE_NOT_IN_GROUP = M( + "CANNOT_GROUP_RESOURCE_ADJACENT_RESOURCE_NOT_IN_GROUP" + ) ++# TODO: remove, use ADD_REMOVE reports + CANNOT_GROUP_RESOURCE_ALREADY_IN_THE_GROUP = M( + "CANNOT_GROUP_RESOURCE_ALREADY_IN_THE_GROUP" + ) ++# TODO: remove, use ADD_REMOVE reports + CANNOT_GROUP_RESOURCE_MORE_THAN_ONCE = M("CANNOT_GROUP_RESOURCE_MORE_THAN_ONCE") ++# TODO: remove, use ADD_REMOVE reports + CANNOT_GROUP_RESOURCE_NEXT_TO_ITSELF = M("CANNOT_GROUP_RESOURCE_NEXT_TO_ITSELF") ++# TODO: remove, use ADD_REMOVE reports + CANNOT_GROUP_RESOURCE_NO_RESOURCES = M("CANNOT_GROUP_RESOURCE_NO_RESOURCES") + CANNOT_GROUP_RESOURCE_WRONG_TYPE = M("CANNOT_GROUP_RESOURCE_WRONG_TYPE") + CANNOT_LEAVE_GROUP_EMPTY_AFTER_MOVE = M("CANNOT_LEAVE_GROUP_EMPTY_AFTER_MOVE") +@@ -448,13 +477,17 @@ SERVICE_COMMAND_ON_NODE_ERROR = M("SERVICE_COMMAND_ON_NODE_ERROR") + SERVICE_COMMAND_ON_NODE_SUCCESS = M("SERVICE_COMMAND_ON_NODE_SUCCESS") + SERVICE_UNABLE_TO_DETECT_INIT_SYSTEM = M("SERVICE_UNABLE_TO_DETECT_INIT_SYSTEM") + SYSTEM_WILL_RESET = M("SYSTEM_WILL_RESET") ++# TODO: remove, use ADD_REMOVE reports + TAG_ADD_REMOVE_IDS_DUPLICATION = M("TAG_ADD_REMOVE_IDS_DUPLICATION") ++# TODO: remove, use ADD_REMOVE reports + TAG_ADJACENT_REFERENCE_ID_NOT_IN_THE_TAG = M( + "TAG_ADJACENT_REFERENCE_ID_NOT_IN_THE_TAG" + ) ++# TODO: remove, use ADD_REMOVE reports + TAG_CANNOT_ADD_AND_REMOVE_IDS_AT_THE_SAME_TIME = M( + "TAG_CANNOT_ADD_AND_REMOVE_IDS_AT_THE_SAME_TIME" + ) ++# TODO: remove, use ADD_REMOVE reports + TAG_CANNOT_ADD_REFERENCE_IDS_ALREADY_IN_THE_TAG = M( + "TAG_CANNOT_ADD_REFERENCE_IDS_ALREADY_IN_THE_TAG" + ) +@@ -462,8 +495,11 @@ TAG_CANNOT_CONTAIN_ITSELF = M("TAG_CANNOT_CONTAIN_ITSELF") + TAG_CANNOT_CREATE_EMPTY_TAG_NO_IDS_SPECIFIED = M( + "TAG_CANNOT_CREATE_EMPTY_TAG_NO_IDS_SPECIFIED" + ) ++# TODO: remove, use ADD_REMOVE reports + TAG_CANNOT_PUT_ID_NEXT_TO_ITSELF = M("TAG_CANNOT_PUT_ID_NEXT_TO_ITSELF") ++# TODO: remove, use ADD_REMOVE reports + TAG_CANNOT_REMOVE_ADJACENT_ID = M("TAG_CANNOT_REMOVE_ADJACENT_ID") ++# TODO: remove, use ADD_REMOVE reports + TAG_CANNOT_REMOVE_REFERENCES_WITHOUT_REMOVING_TAG = M( + "TAG_CANNOT_REMOVE_REFERENCES_WITHOUT_REMOVING_TAG" + ) +@@ -473,12 +509,15 @@ TAG_CANNOT_REMOVE_TAG_REFERENCED_IN_CONSTRAINTS = M( + TAG_CANNOT_REMOVE_TAGS_NO_TAGS_SPECIFIED = M( + "TAG_CANNOT_REMOVE_TAGS_NO_TAGS_SPECIFIED" + ) ++# TODO: remove, use ADD_REMOVE reports + TAG_CANNOT_SPECIFY_ADJACENT_ID_WITHOUT_IDS_TO_ADD = M( + "TAG_CANNOT_SPECIFY_ADJACENT_ID_WITHOUT_IDS_TO_ADD" + ) ++# TODO: remove, use ADD_REMOVE reports + TAG_CANNOT_UPDATE_TAG_NO_IDS_SPECIFIED = M( + "TAG_CANNOT_UPDATE_TAG_NO_IDS_SPECIFIED" + ) ++# TODO: remove, use ADD_REMOVE reports + TAG_IDS_NOT_IN_THE_TAG = M("TAG_IDS_NOT_IN_THE_TAG") + TMP_FILE_WRITE = M("TMP_FILE_WRITE") + UNABLE_TO_CONNECT_TO_ANY_REMAINING_NODE = M( +diff --git a/pcs/common/reports/const.py b/pcs/common/reports/const.py +index c551338e..88725eb3 100644 +--- a/pcs/common/reports/const.py ++++ b/pcs/common/reports/const.py +@@ -1,4 +1,6 @@ + from .types import ( ++ AddRemoveContainerType, ++ AddRemoveItemType, + BoothConfigUsedWhere, + DefaultAddressSource, + FenceHistoryCommandType, +@@ -9,6 +11,8 @@ from .types import ( + ) + + ++ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE = AddRemoveContainerType("stonith") ++ADD_REMOVE_ITEM_TYPE_DEVICE = AddRemoveItemType("device") + BOOTH_CONFIG_USED_IN_CLUSTER_RESOURCE = BoothConfigUsedWhere( + "in a cluster resource" + ) +diff --git a/pcs/common/reports/messages.py b/pcs/common/reports/messages.py +index a1c5db11..f9688437 100644 +--- a/pcs/common/reports/messages.py ++++ b/pcs/common/reports/messages.py +@@ -24,6 +24,7 @@ from pcs.common.str_tools import ( + format_list_custom_last_separator, + format_optional, + format_plural, ++ get_plural, + indent, + is_iterable_not_str, + ) +@@ -95,6 +96,14 @@ def _key_numeric(item: str) -> Tuple[int, str]: + return (int(item), item) if item.isdigit() else (-1, item) + + ++_add_remove_container_translation = { ++ const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE: "stonith resource", ++} ++ ++_add_remove_item_translation = { ++ const.ADD_REMOVE_ITEM_TYPE_DEVICE: "device", ++} ++ + _file_role_translation = { + file_type_codes.BOOTH_CONFIG: "Booth configuration", + file_type_codes.BOOTH_KEY: "Booth key", +@@ -129,6 +138,16 @@ _type_articles = { + } + + ++def _add_remove_container_str( ++ container: types.AddRemoveContainerType, ++) -> str: ++ return _add_remove_container_translation.get(container, container) ++ ++ ++def _add_remove_item_str(item: types.AddRemoveItemType) -> str: ++ return _add_remove_item_translation.get(item, item) ++ ++ + def _format_file_role(role: file_type_codes.FileTypeCode) -> str: + return _file_role_translation.get(role, role) + +@@ -2528,6 +2547,7 @@ class ResourceBundleAlreadyContainsAResource(ReportItemMessage): + ) + + ++# TODO: remove, use ADD_REMOVE reports + @dataclass(frozen=True) + class CannotGroupResourceAdjacentResourceForNewGroup(ReportItemMessage): + """ +@@ -2551,6 +2571,7 @@ class CannotGroupResourceAdjacentResourceForNewGroup(ReportItemMessage): + ) + + ++# TODO: remove, use ADD_REMOVE reports + @dataclass(frozen=True) + class CannotGroupResourceAdjacentResourceNotInGroup(ReportItemMessage): + """ +@@ -2573,6 +2594,7 @@ class CannotGroupResourceAdjacentResourceNotInGroup(ReportItemMessage): + ) + + ++# TODO: remove, use ADD_REMOVE reports + @dataclass(frozen=True) + class CannotGroupResourceAlreadyInTheGroup(ReportItemMessage): + """ +@@ -2593,6 +2615,7 @@ class CannotGroupResourceAlreadyInTheGroup(ReportItemMessage): + return f"{resources} already {exist} in '{self.group_id}'" + + ++# TODO: remove, use ADD_REMOVE reports + @dataclass(frozen=True) + class CannotGroupResourceMoreThanOnce(ReportItemMessage): + """ +@@ -2610,6 +2633,7 @@ class CannotGroupResourceMoreThanOnce(ReportItemMessage): + return f"Resources specified more than once: {resources}" + + ++# TODO: remove, use ADD_REMOVE reports + @dataclass(frozen=True) + class CannotGroupResourceNoResources(ReportItemMessage): + """ +@@ -2623,6 +2647,7 @@ class CannotGroupResourceNoResources(ReportItemMessage): + return "No resources to add" + + ++# TODO: remove, use ADD_REMOVE reports + @dataclass(frozen=True) + class CannotGroupResourceNextToItself(ReportItemMessage): + """ +@@ -6482,6 +6507,7 @@ class BoothTicketOperationFailed(ReportItemMessage): + ) + + ++# TODO: remove, use ADD_REMOVE reports + @dataclass(frozen=True) + class TagAddRemoveIdsDuplication(ReportItemMessage): + """ +@@ -6500,6 +6526,7 @@ class TagAddRemoveIdsDuplication(ReportItemMessage): + return f"Ids to {action} must be unique, duplicate ids: {duplicate_ids}" + + ++# TODO: remove, use ADD_REMOVE reports + @dataclass(frozen=True) + class TagAdjacentReferenceIdNotInTheTag(ReportItemMessage): + """ +@@ -6522,6 +6549,7 @@ class TagAdjacentReferenceIdNotInTheTag(ReportItemMessage): + ) + + ++# TODO: remove, use ADD_REMOVE reports + @dataclass(frozen=True) + class TagCannotAddAndRemoveIdsAtTheSameTime(ReportItemMessage): + """ +@@ -6540,6 +6568,7 @@ class TagCannotAddAndRemoveIdsAtTheSameTime(ReportItemMessage): + return f"Ids cannot be added and removed at the same time: {idref_list}" + + ++# TODO: remove, use ADD_REMOVE reports + @dataclass(frozen=True) + class TagCannotAddReferenceIdsAlreadyInTheTag(ReportItemMessage): + """ +@@ -6591,6 +6620,7 @@ class TagCannotCreateEmptyTagNoIdsSpecified(ReportItemMessage): + return "Cannot create empty tag, no resource ids specified" + + ++# TODO: remove, use ADD_REMOVE reports + @dataclass(frozen=True) + class TagCannotPutIdNextToItself(ReportItemMessage): + """ +@@ -6607,6 +6637,7 @@ class TagCannotPutIdNextToItself(ReportItemMessage): + return f"Cannot put id '{self.adjacent_id}' next to itself." + + ++# TODO: remove, use ADD_REMOVE reports + @dataclass(frozen=True) + class TagCannotRemoveAdjacentId(ReportItemMessage): + """ +@@ -6626,6 +6657,7 @@ class TagCannotRemoveAdjacentId(ReportItemMessage): + ) + + ++# TODO: remove, use ADD_REMOVE reports + @dataclass(frozen=True) + class TagCannotRemoveReferencesWithoutRemovingTag(ReportItemMessage): + """ +@@ -6678,6 +6710,7 @@ class TagCannotRemoveTagsNoTagsSpecified(ReportItemMessage): + return "Cannot remove tags, no tags to remove specified" + + ++# TODO: remove, use ADD_REMOVE reports + @dataclass(frozen=True) + class TagCannotSpecifyAdjacentIdWithoutIdsToAdd(ReportItemMessage): + """ +@@ -6697,6 +6730,7 @@ class TagCannotSpecifyAdjacentIdWithoutIdsToAdd(ReportItemMessage): + ) + + ++# TODO: remove, use ADD_REMOVE reports + @dataclass(frozen=True) + class TagCannotUpdateTagNoIdsSpecified(ReportItemMessage): + """ +@@ -6710,6 +6744,7 @@ class TagCannotUpdateTagNoIdsSpecified(ReportItemMessage): + return "Cannot update tag, no ids to be added or removed specified" + + ++# TODO: remove, use ADD_REMOVE reports + @dataclass(frozen=True) + class TagIdsNotInTheTag(ReportItemMessage): + """ +@@ -6850,3 +6885,257 @@ class CibNvsetAmbiguousProvideNvsetId(ReportItemMessage): + @property + def message(self) -> str: + return "Several options sets exist, please specify an option set ID" ++ ++ ++@dataclass(frozen=True) ++class AddRemoveItemsNotSpecified(ReportItemMessage): ++ """ ++ Cannot modify container, no add or remove items specified. ++ ++ container_type -- type of item container ++ item_type -- type of item in a container ++ container_id -- id of a container ++ """ ++ ++ container_type: types.AddRemoveContainerType ++ item_type: types.AddRemoveItemType ++ container_id: str ++ _code = codes.ADD_REMOVE_ITEMS_NOT_SPECIFIED ++ ++ @property ++ def message(self) -> str: ++ container = _add_remove_container_str(self.container_type) ++ items = get_plural(_add_remove_item_str(self.item_type)) ++ return ( ++ f"Cannot modify {container} '{self.container_id}', no {items} to " ++ "add or remove specified" ++ ) ++ ++ ++@dataclass(frozen=True) ++class AddRemoveItemsDuplication(ReportItemMessage): ++ """ ++ Duplicate items were found in add/remove item lists. ++ ++ container_type -- type of item container ++ item_type -- type of item in a container ++ container_id -- id of a container ++ duplicate_items_list -- list of duplicate items ++ """ ++ ++ container_type: types.AddRemoveContainerType ++ item_type: types.AddRemoveItemType ++ container_id: str ++ duplicate_items_list: List[str] ++ _code = codes.ADD_REMOVE_ITEMS_DUPLICATION ++ ++ @property ++ def message(self) -> str: ++ items = get_plural(_add_remove_item_str(self.item_type)) ++ duplicate_items = format_list(self.duplicate_items_list) ++ return ( ++ f"{items.capitalize()} to add or remove must be unique, duplicate " ++ f"{items}: {duplicate_items}" ++ ) ++ ++ ++@dataclass(frozen=True) ++class AddRemoveCannotAddItemsAlreadyInTheContainer(ReportItemMessage): ++ """ ++ Cannot add items already existing in the container. ++ ++ container_type -- type of item container ++ item_type -- type of item in a container ++ container_id -- id of a container ++ item_list -- list of items already in the container ++ """ ++ ++ container_type: types.AddRemoveContainerType ++ item_type: types.AddRemoveItemType ++ container_id: str ++ item_list: List[str] ++ _code = codes.ADD_REMOVE_CANNOT_ADD_ITEMS_ALREADY_IN_THE_CONTAINER ++ ++ @property ++ def message(self) -> str: ++ items = format_plural( ++ self.item_list, _add_remove_item_str(self.item_type) ++ ) ++ item_list = format_list(self.item_list) ++ they = format_plural(self.item_list, "it") ++ are = format_plural(self.item_list, "is") ++ container = _add_remove_container_str(self.container_type) ++ return ( ++ f"Cannot add {items} {item_list}, {they} {are} already present in " ++ f"{container} '{self.container_id}'" ++ ) ++ ++ ++@dataclass(frozen=True) ++class AddRemoveCannotRemoveItemsNotInTheContainer(ReportItemMessage): ++ """ ++ Cannot remove items not existing in the container. ++ ++ container_type -- type of item container ++ item_type -- type of item in a container ++ container_id -- id of a container ++ item_list -- list of items not in the container ++ """ ++ ++ container_type: types.AddRemoveContainerType ++ item_type: types.AddRemoveItemType ++ container_id: str ++ item_list: List[str] ++ _code = codes.ADD_REMOVE_CANNOT_REMOVE_ITEMS_NOT_IN_THE_CONTAINER ++ ++ @property ++ def message(self) -> str: ++ items = format_plural( ++ self.item_list, _add_remove_item_str(self.item_type) ++ ) ++ item_list = format_list(self.item_list) ++ they = format_plural(self.item_list, "it") ++ are = format_plural(self.item_list, "is") ++ container = _add_remove_container_str(self.container_type) ++ items = format_plural( ++ self.item_list, _add_remove_item_str(self.item_type) ++ ) ++ return ( ++ f"Cannot remove {items} {item_list}, {they} {are} not present in " ++ f"{container} '{self.container_id}'" ++ ) ++ ++ ++@dataclass(frozen=True) ++class AddRemoveCannotAddAndRemoveItemsAtTheSameTime(ReportItemMessage): ++ """ ++ Cannot add and remove items at the same time. Avoid operation without an ++ effect. ++ ++ container_type -- type of item container ++ item_type -- type of item in a container ++ container_id -- id of a container ++ item_list -- common items from add and remove item lists ++ """ ++ ++ container_type: types.AddRemoveContainerType ++ item_type: types.AddRemoveItemType ++ container_id: str ++ item_list: List[str] ++ _code = codes.ADD_REMOVE_CANNOT_ADD_AND_REMOVE_ITEMS_AT_THE_SAME_TIME ++ ++ @property ++ def message(self) -> str: ++ items = format_plural( ++ self.item_list, _add_remove_item_str(self.item_type) ++ ) ++ item_list = format_list(self.item_list) ++ return ( ++ f"{items.capitalize()} cannot be added and removed at the same " ++ f"time: {item_list}" ++ ) ++ ++ ++@dataclass(frozen=True) ++class AddRemoveCannotRemoveAllItemsFromTheContainer(ReportItemMessage): ++ """ ++ Cannot remove all items from a container. ++ ++ container_type -- type of item container ++ item_type -- type of item in a container ++ container_id -- id of a container ++ item_list -- common items from add and remove item lists ++ """ ++ ++ container_type: types.AddRemoveContainerType ++ item_type: types.AddRemoveItemType ++ container_id: str ++ item_list: List[str] ++ _code = codes.ADD_REMOVE_CANNOT_REMOVE_ALL_ITEMS_FROM_THE_CONTAINER ++ ++ @property ++ def message(self) -> str: ++ container = _add_remove_container_str(self.container_type) ++ items = get_plural(_add_remove_item_str(self.item_type)) ++ return ( ++ f"Cannot remove all {items} from {container} '{self.container_id}'" ++ ) ++ ++ ++@dataclass(frozen=True) ++class AddRemoveAdjacentItemNotInTheContainer(ReportItemMessage): ++ """ ++ Cannot put items next to an adjacent item in the container, because the ++ adjacent item does not exist in the container. ++ ++ container_type -- type of item container ++ item_type -- type of item in a container ++ container_id -- id of a container ++ adjacent_item_id -- id of an adjacent item ++ """ ++ ++ container_type: types.AddRemoveContainerType ++ item_type: types.AddRemoveItemType ++ container_id: str ++ adjacent_item_id: str ++ _code = codes.ADD_REMOVE_ADJACENT_ITEM_NOT_IN_THE_CONTAINER ++ ++ @property ++ def message(self) -> str: ++ container = _add_remove_container_str(self.container_type) ++ item = _add_remove_item_str(self.item_type) ++ items = get_plural(item) ++ return ( ++ f"There is no {item} '{self.adjacent_item_id}' in the " ++ f"{container} '{self.container_id}', cannot add {items} next to it" ++ ) ++ ++ ++@dataclass(frozen=True) ++class AddRemoveCannotPutItemNextToItself(ReportItemMessage): ++ """ ++ Cannot put an item into a container next to itself. ++ ++ container_type -- type of item container ++ item_type -- type of item in a container ++ container_id -- id of a container ++ adjacent_item_id -- id of an adjacent item ++ """ ++ ++ container_type: types.AddRemoveContainerType ++ item_type: types.AddRemoveItemType ++ container_id: str ++ adjacent_item_id: str ++ _code = codes.ADD_REMOVE_CANNOT_PUT_ITEM_NEXT_TO_ITSELF ++ ++ @property ++ def message(self) -> str: ++ item = _add_remove_item_str(self.item_type) ++ return f"Cannot put {item} '{self.adjacent_item_id}' next to itself" ++ ++ ++@dataclass(frozen=True) ++class AddRemoveCannotSpecifyAdjacentItemWithoutItemsToAdd(ReportItemMessage): ++ """ ++ Cannot specify adjacent item without items to add. ++ ++ container_type -- type of item container ++ item_type -- type of item in a container ++ container_id -- id of a container ++ adjacent_item_id -- id of an adjacent item ++ """ ++ ++ container_type: types.AddRemoveContainerType ++ item_type: types.AddRemoveItemType ++ container_id: str ++ adjacent_item_id: str ++ _code = codes.ADD_REMOVE_CANNOT_SPECIFY_ADJACENT_ITEM_WITHOUT_ITEMS_TO_ADD ++ ++ @property ++ def message(self) -> str: ++ item = _add_remove_item_str(self.item_type) ++ items = get_plural(item) ++ return ( ++ f"Cannot specify adjacent {item} '{self.adjacent_item_id}' without " ++ f"{items} to add" ++ ) +diff --git a/pcs/common/reports/types.py b/pcs/common/reports/types.py +index fa7fdf4d..610c16f1 100644 +--- a/pcs/common/reports/types.py ++++ b/pcs/common/reports/types.py +@@ -1,5 +1,7 @@ + from typing import NewType + ++AddRemoveContainerType = NewType("AddRemoveContainerType", str) ++AddRemoveItemType = NewType("AddRemoveItemType", str) + BoothConfigUsedWhere = NewType("BoothConfigUsedWhere", str) + DefaultAddressSource = NewType("DefaultAddressSource", str) + FenceHistoryCommandType = NewType("FenceHistoryCommandType", str) +diff --git a/pcs/common/str_tools.py b/pcs/common/str_tools.py +index 98fe5f50..b8dccc0c 100644 +--- a/pcs/common/str_tools.py ++++ b/pcs/common/str_tools.py +@@ -131,6 +131,23 @@ def _add_s(word): + return word + "s" + + ++def get_plural(singular: str) -> str: ++ """ ++ Take singular word form and return plural. ++ ++ singular -- singular word (like: is, do, node) ++ """ ++ common_plurals = { ++ "is": "are", ++ "has": "have", ++ "does": "do", ++ "it": "they", ++ } ++ if singular in common_plurals: ++ return common_plurals[singular] ++ return _add_s(singular) ++ ++ + def format_plural( + depends_on: Union[int, Iterable[Any]], + singular: str, +@@ -145,18 +162,11 @@ def format_plural( + singular -- singular word (like: is, do, node) + plural -- optional irregular plural form + """ +- common_plurals = { +- "is": "are", +- "has": "have", +- "does": "do", +- } + if not _is_multiple(depends_on): + return singular + if plural: + return plural +- if singular in common_plurals: +- return common_plurals[singular] +- return _add_s(singular) ++ return get_plural(singular) + + + T = TypeVar("T") +diff --git a/pcs/lib/commands/stonith.py b/pcs/lib/commands/stonith.py +index 036e3fa5..6f26e7d3 100644 +--- a/pcs/lib/commands/stonith.py ++++ b/pcs/lib/commands/stonith.py +@@ -1,9 +1,15 @@ +-from typing import Container, Iterable, Optional ++from collections import Counter ++from typing import Container, Iterable, List, Optional, Set, Tuple ++ ++from lxml.etree import _Element + + from pcs.common import reports ++from pcs.common.reports import ReportItemList ++from pcs.common.reports import ReportProcessor + from pcs.common.reports.item import ReportItem + from pcs.lib.cib import resource + from pcs.lib.cib import stonith ++from pcs.lib.cib.nvpair import INSTANCE_ATTRIBUTES_TAG, get_value + from pcs.lib.cib.resource.common import are_meta_disabled + from pcs.lib.cib.tools import IdProvider + from pcs.lib.commands.resource import ( +@@ -20,6 +26,7 @@ from pcs.lib.communication.tools import ( + ) + from pcs.lib.env import LibraryEnvironment + from pcs.lib.errors import LibraryError ++from pcs.lib.external import CommandRunner + from pcs.lib.node import get_existing_nodes_names + from pcs.lib.pacemaker.live import ( + FenceHistoryCommandErrorException, +@@ -268,55 +275,195 @@ def history_update(env: LibraryEnvironment): + ) from e + + +-def update_scsi_devices( +- env: LibraryEnvironment, +- stonith_id: str, +- set_device_list: Iterable[str], +- force_flags: Container[reports.types.ForceCode] = (), +-) -> None: ++def _validate_add_remove_items( ++ add_item_list: Iterable[str], ++ remove_item_list: Iterable[str], ++ current_item_list: Iterable[str], ++ container_type: reports.types.AddRemoveContainerType, ++ item_type: reports.types.AddRemoveItemType, ++ container_id: str, ++ adjacent_item_id: Optional[str] = None, ++ container_can_be_empty: bool = False, ++) -> ReportItemList: + """ +- Update scsi fencing devices without restart and affecting other resources. ++ Validate if items can be added or removed to or from a container. + +- env -- provides all for communication with externals +- stonith_id -- id of stonith resource +- set_device_list -- paths to the scsi devices that would be set for stonith +- resource +- force_flags -- list of flags codes ++ add_item_list -- items to be added ++ remove_item_list -- items to be removed ++ current_item_list -- items currently in the container ++ container_type -- container type ++ item_type -- item type ++ container_id -- id of the container ++ adjacent_item_id -- an adjacent item in the container ++ container_can_be_empty -- flag to decide if container can be left empty + """ +- if not is_getting_resource_digest_supported(env.cmd_runner()): +- raise LibraryError( ++ # pylint: disable=too-many-locals ++ report_list: ReportItemList = [] ++ if not add_item_list and not remove_item_list: ++ report_list.append( + ReportItem.error( +- reports.messages.StonithRestartlessUpdateOfScsiDevicesNotSupported() ++ reports.messages.AddRemoveItemsNotSpecified( ++ container_type, item_type, container_id ++ ) + ) + ) +- cib = env.get_cib() +- if not set_device_list: +- env.report_processor.report( ++ ++ def _get_duplicate_items(item_list: Iterable[str]) -> Set[str]: ++ return {item for item, count in Counter(item_list).items() if count > 1} ++ ++ duplicate_items_list = _get_duplicate_items( ++ add_item_list ++ ) | _get_duplicate_items(remove_item_list) ++ if duplicate_items_list: ++ report_list.append( + ReportItem.error( +- reports.messages.InvalidOptionValue( +- "devices", "", None, cannot_be_empty=True ++ reports.messages.AddRemoveItemsDuplication( ++ container_type, ++ item_type, ++ container_id, ++ sorted(duplicate_items_list), ++ ) ++ ) ++ ) ++ already_present = set(add_item_list).intersection(current_item_list) ++ # report only if an adjacent id is not defined, because we want to allow ++ # to move items when adjacent_item_id is specified ++ if adjacent_item_id is None and already_present: ++ report_list.append( ++ ReportItem.error( ++ reports.messages.AddRemoveCannotAddItemsAlreadyInTheContainer( ++ container_type, ++ item_type, ++ container_id, ++ sorted(already_present), ++ ) ++ ) ++ ) ++ missing_items = set(remove_item_list).difference(current_item_list) ++ if missing_items: ++ report_list.append( ++ ReportItem.error( ++ reports.messages.AddRemoveCannotRemoveItemsNotInTheContainer( ++ container_type, ++ item_type, ++ container_id, ++ sorted(missing_items), + ) + ) + ) ++ common_items = set(add_item_list) & set(remove_item_list) ++ if common_items: ++ report_list.append( ++ ReportItem.error( ++ reports.messages.AddRemoveCannotAddAndRemoveItemsAtTheSameTime( ++ container_type, ++ item_type, ++ container_id, ++ sorted(common_items), ++ ) ++ ) ++ ) ++ if not container_can_be_empty and not add_item_list: ++ remaining_items = set(current_item_list).difference(remove_item_list) ++ if not remaining_items: ++ report_list.append( ++ ReportItem.error( ++ reports.messages.AddRemoveCannotRemoveAllItemsFromTheContainer( ++ container_type, ++ item_type, ++ container_id, ++ list(current_item_list), ++ ) ++ ) ++ ) ++ if adjacent_item_id: ++ if adjacent_item_id not in current_item_list: ++ report_list.append( ++ ReportItem.error( ++ reports.messages.AddRemoveAdjacentItemNotInTheContainer( ++ container_type, ++ item_type, ++ container_id, ++ adjacent_item_id, ++ ) ++ ) ++ ) ++ if adjacent_item_id in add_item_list: ++ report_list.append( ++ ReportItem.error( ++ reports.messages.AddRemoveCannotPutItemNextToItself( ++ container_type, ++ item_type, ++ container_id, ++ adjacent_item_id, ++ ) ++ ) ++ ) ++ if not add_item_list: ++ report_list.append( ++ ReportItem.error( ++ reports.messages.AddRemoveCannotSpecifyAdjacentItemWithoutItemsToAdd( ++ container_type, ++ item_type, ++ container_id, ++ adjacent_item_id, ++ ) ++ ) ++ ) ++ return report_list ++ ++ ++def _update_scsi_devices_get_element_and_devices( ++ runner: CommandRunner, ++ report_processor: ReportProcessor, ++ cib: _Element, ++ stonith_id: str, ++) -> Tuple[_Element, List[str]]: ++ """ ++ Do checks and return stonith element and list of current scsi devices. ++ Raise LibraryError if checks fail. ++ ++ runner -- command runner instance ++ report_processor -- tool for warning/info/error reporting ++ cib -- cib element ++ stonith_id -- id of stonith resource ++ """ ++ if not is_getting_resource_digest_supported(runner): ++ raise LibraryError( ++ ReportItem.error( ++ reports.messages.StonithRestartlessUpdateOfScsiDevicesNotSupported() ++ ) ++ ) + ( + stonith_el, + report_list, + ) = stonith.validate_stonith_restartless_update(cib, stonith_id) +- if env.report_processor.report_list(report_list).has_errors: ++ if report_processor.report_list(report_list).has_errors: + raise LibraryError() +- # for mypy, this should not happen because exeption would be raised ++ # for mypy, this should not happen because exception would be raised + if stonith_el is None: + raise AssertionError("stonith element is None") +- +- stonith.update_scsi_devices_without_restart( +- env.cmd_runner(), +- env.get_cluster_state(), +- stonith_el, +- IdProvider(cib), +- set_device_list, ++ current_device_list = get_value( ++ INSTANCE_ATTRIBUTES_TAG, stonith_el, "devices" + ) ++ if current_device_list is None: ++ raise AssertionError("current_device_list is None") ++ return stonith_el, current_device_list.split(",") ++ ++ ++def _unfencing_scsi_devices( ++ env: LibraryEnvironment, ++ device_list: Iterable[str], ++ force_flags: Container[reports.types.ForceCode] = (), ++) -> None: ++ """ ++ Unfence scsi devices provided in device_list if it is possible to connect ++ to pcsd and corosync is running. + +- # Unfencing ++ env -- provides all for communication with externals ++ device_list -- devices to be unfenced ++ force_flags -- list of flags codes ++ """ + cluster_nodes_names, nodes_report_list = get_existing_nodes_names( + env.get_corosync_conf(), + error_on_missing_name=True, +@@ -340,8 +487,104 @@ def update_scsi_devices( + online_corosync_target_list = run_and_raise( + env.get_node_communicator(), com_cmd + ) +- com_cmd = Unfence(env.report_processor, sorted(set_device_list)) ++ com_cmd = Unfence(env.report_processor, sorted(device_list)) + com_cmd.set_targets(online_corosync_target_list) + run_and_raise(env.get_node_communicator(), com_cmd) + ++ ++def update_scsi_devices( ++ env: LibraryEnvironment, ++ stonith_id: str, ++ set_device_list: Iterable[str], ++ force_flags: Container[reports.types.ForceCode] = (), ++) -> None: ++ """ ++ Update scsi fencing devices without restart and affecting other resources. ++ ++ env -- provides all for communication with externals ++ stonith_id -- id of stonith resource ++ set_device_list -- paths to the scsi devices that would be set for stonith ++ resource ++ force_flags -- list of flags codes ++ """ ++ if not set_device_list: ++ env.report_processor.report( ++ ReportItem.error( ++ reports.messages.InvalidOptionValue( ++ "devices", "", None, cannot_be_empty=True ++ ) ++ ) ++ ) ++ runner = env.cmd_runner() ++ ( ++ stonith_el, ++ current_device_list, ++ ) = _update_scsi_devices_get_element_and_devices( ++ runner, env.report_processor, env.get_cib(), stonith_id ++ ) ++ if env.report_processor.has_errors: ++ raise LibraryError() ++ stonith.update_scsi_devices_without_restart( ++ runner, ++ env.get_cluster_state(), ++ stonith_el, ++ IdProvider(stonith_el), ++ set_device_list, ++ ) ++ devices_for_unfencing = set(set_device_list).difference(current_device_list) ++ if devices_for_unfencing: ++ _unfencing_scsi_devices(env, devices_for_unfencing, force_flags) ++ env.push_cib() ++ ++ ++def update_scsi_devices_add_remove( ++ env: LibraryEnvironment, ++ stonith_id: str, ++ add_device_list: Iterable[str], ++ remove_device_list: Iterable[str], ++ force_flags: Container[reports.types.ForceCode] = (), ++) -> None: ++ """ ++ Update scsi fencing devices without restart and affecting other resources. ++ ++ env -- provides all for communication with externals ++ stonith_id -- id of stonith resource ++ add_device_list -- paths to the scsi devices that would be added to the ++ stonith resource ++ remove_device_list -- paths to the scsi devices that would be removed from ++ the stonith resource ++ force_flags -- list of flags codes ++ """ ++ runner = env.cmd_runner() ++ ( ++ stonith_el, ++ current_device_list, ++ ) = _update_scsi_devices_get_element_and_devices( ++ runner, env.report_processor, env.get_cib(), stonith_id ++ ) ++ if env.report_processor.report_list( ++ _validate_add_remove_items( ++ add_device_list, ++ remove_device_list, ++ current_device_list, ++ reports.const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE, ++ reports.const.ADD_REMOVE_ITEM_TYPE_DEVICE, ++ stonith_el.get("id", ""), ++ ) ++ ).has_errors: ++ raise LibraryError() ++ updated_device_set = ( ++ set(current_device_list) ++ .union(add_device_list) ++ .difference(remove_device_list) ++ ) ++ stonith.update_scsi_devices_without_restart( ++ env.cmd_runner(), ++ env.get_cluster_state(), ++ stonith_el, ++ IdProvider(stonith_el), ++ updated_device_set, ++ ) ++ if add_device_list: ++ _unfencing_scsi_devices(env, add_device_list, force_flags) + env.push_cib() +diff --git a/pcs/pcs.8.in b/pcs/pcs.8.in +index ac093d69..1695d75c 100644 +--- a/pcs/pcs.8.in ++++ b/pcs/pcs.8.in +@@ -664,8 +664,8 @@ pcs stonith create MyFence fence_virt 'pcmk_host_map=n1:p1;n2:p2,p3' + update [stonith device options] + Add/Change options to specified stonith id. + .TP +-update\-scsi\-devices set [...] +-Update scsi fencing devices without affecting other resources. Stonith resource must be running on one cluster node. Each device will be unfenced on each cluster node running cluster. Supported fence agents: fence_scsi. ++update\-scsi\-devices (set [...]) | (add [...] delete|remove [...] ) ++Update scsi fencing devices without affecting other resources. You must specify either list of set devices or at least one device for add or delete/remove devices. Stonith resource must be running on one cluster node. Each device will be unfenced on each cluster node running cluster. Supported fence agents: fence_scsi. + .TP + delete + Remove stonith id from configuration. +diff --git a/pcs/stonith.py b/pcs/stonith.py +index c7eb14de..6ed8b751 100644 +--- a/pcs/stonith.py ++++ b/pcs/stonith.py +@@ -894,24 +894,43 @@ def stonith_update_scsi_devices(lib, argv, modifiers): + * --skip-offline - skip unreachable nodes + """ + modifiers.ensure_only_supported("--request-timeout", "--skip-offline") ++ force_flags = [] ++ if modifiers.get("--skip-offline"): ++ force_flags.append(reports.codes.SKIP_OFFLINE_NODES) ++ + if len(argv) < 2: + raise CmdLineInputError() + stonith_id = argv[0] + parsed_args = parse_args.group_by_keywords( + argv[1:], +- ["set"], ++ ["set", "add", "remove", "delete"], + keyword_repeat_allowed=False, + only_found_keywords=True, + ) +- set_args = parsed_args["set"] if "set" in parsed_args else [] +- if not set_args: +- raise CmdLineInputError( +- show_both_usage_and_message=True, +- hint="You must specify set devices to be updated", +- ) +- force_flags = [] +- if modifiers.get("--skip-offline"): +- force_flags.append(reports.codes.SKIP_OFFLINE_NODES) +- lib.stonith.update_scsi_devices( +- stonith_id, set_args, force_flags=force_flags ++ cmd_exception = CmdLineInputError( ++ show_both_usage_and_message=True, ++ hint=( ++ "You must specify either list of set devices or at least one device" ++ " for add or delete/remove devices" ++ ), + ) ++ if "set" in parsed_args and {"add", "remove", "delete"} & set( ++ parsed_args.keys() ++ ): ++ raise cmd_exception ++ if "set" in parsed_args: ++ if not parsed_args["set"]: ++ raise cmd_exception ++ lib.stonith.update_scsi_devices( ++ stonith_id, parsed_args["set"], force_flags=force_flags ++ ) ++ else: ++ for key in ("add", "remove", "delete"): ++ if key in parsed_args and not parsed_args[key]: ++ raise cmd_exception ++ lib.stonith.update_scsi_devices_add_remove( ++ stonith_id, ++ parsed_args.get("add", []), ++ parsed_args.get("delete", []) + parsed_args.get("remove", []), ++ force_flags=force_flags, ++ ) +diff --git a/pcs/usage.py b/pcs/usage.py +index 38e21ed9..66e097f1 100644 +--- a/pcs/usage.py ++++ b/pcs/usage.py +@@ -1289,11 +1289,14 @@ Commands: + update [stonith device options] + Add/Change options to specified stonith id. + +- update-scsi-devices set [...] +- Update scsi fencing devices without affecting other resources. Stonith +- resource must be running on one cluster node. Each device will be +- unfenced on each cluster node running cluster. Supported fence agents: +- fence_scsi. ++ update-scsi-devices (set [...]) ++ | (add [...] delete|remove ++ [device-path>...]) ++ Update scsi fencing devices without affecting other resources. You must ++ specify either list of set devices or at least one device for add or ++ delete/remove devices. Stonith resource must be running on one cluster ++ node. Each device will be unfenced on each cluster node running ++ cluster. Supported fence agents: fence_scsi. + + delete + Remove stonith id from configuration. +diff --git a/pcs_test/Makefile.am b/pcs_test/Makefile.am +index b4df00e2..c7346f96 100644 +--- a/pcs_test/Makefile.am ++++ b/pcs_test/Makefile.am +@@ -236,6 +236,7 @@ EXTRA_DIST = \ + tier0/lib/commands/test_stonith_agent.py \ + tier0/lib/commands/test_stonith_history.py \ + tier0/lib/commands/test_stonith.py \ ++ tier0/lib/commands/test_stonith_update_scsi_devices.py \ + tier0/lib/commands/test_ticket.py \ + tier0/lib/communication/__init__.py \ + tier0/lib/communication/test_booth.py \ +diff --git a/pcs_test/tier0/cli/test_stonith.py b/pcs_test/tier0/cli/test_stonith.py +index 5bc18f3c..a54b442e 100644 +--- a/pcs_test/tier0/cli/test_stonith.py ++++ b/pcs_test/tier0/cli/test_stonith.py +@@ -149,15 +149,41 @@ class SbdDeviceSetup(TestCase): + + + class StonithUpdateScsiDevices(TestCase): ++ # pylint: disable=too-many-public-methods + def setUp(self): + self.lib = mock.Mock(spec_set=["stonith"]) +- self.stonith = mock.Mock(spec_set=["update_scsi_devices"]) ++ self.stonith = mock.Mock( ++ spec_set=["update_scsi_devices", "update_scsi_devices_add_remove"] ++ ) + self.lib.stonith = self.stonith + + def assert_called_with(self, stonith_id, set_devices, force_flags): + self.stonith.update_scsi_devices.assert_called_once_with( + stonith_id, set_devices, force_flags=force_flags + ) ++ self.stonith.update_scsi_devices_add_remove.assert_not_called() ++ ++ def assert_add_remove_called_with( ++ self, stonith_id, add_devices, remove_devices, force_flags ++ ): ++ self.stonith.update_scsi_devices_add_remove.assert_called_once_with( ++ stonith_id, add_devices, remove_devices, force_flags=force_flags ++ ) ++ self.stonith.update_scsi_devices.assert_not_called() ++ ++ def assert_bad_syntax_cli_exception(self, args): ++ with self.assertRaises(CmdLineInputError) as cm: ++ self.call_cmd(args) ++ self.assertEqual(cm.exception.message, None) ++ self.assertEqual( ++ cm.exception.hint, ++ ( ++ "You must specify either list of set devices or at least one " ++ "device for add or delete/remove devices" ++ ), ++ ) ++ self.stonith.update_scsi_devices.assert_not_called() ++ self.stonith.update_scsi_devices_add_remove.assert_not_called() + + def call_cmd(self, argv, modifiers=None): + stonith.stonith_update_scsi_devices( +@@ -174,44 +200,141 @@ class StonithUpdateScsiDevices(TestCase): + self.call_cmd(["stonith-id"]) + self.assertEqual(cm.exception.message, None) + +- def test_not_set_keyword(self): ++ def test_unknown_keyword(self): + with self.assertRaises(CmdLineInputError) as cm: + self.call_cmd(["stonith-id", "unset"]) + self.assertEqual(cm.exception.message, None) + +- def test_only_set_keyword(self): +- with self.assertRaises(CmdLineInputError) as cm: +- self.call_cmd(["stonith-id", "set"]) +- self.assertEqual(cm.exception.message, None) +- self.assertEqual( +- cm.exception.hint, "You must specify set devices to be updated" +- ) +- +- def test_one_device(self): +- self.call_cmd(["stonith-id", "set", "device1"]) +- self.assert_called_with("stonith-id", ["device1"], []) +- +- def test_more_devices(self): +- self.call_cmd(["stonith-id", "set", "device1", "device2"]) +- self.assert_called_with("stonith-id", ["device1", "device2"], []) +- + def test_supported_options(self): + self.call_cmd( +- ["stonith-id", "set", "device1", "device2"], ++ ["stonith-id", "set", "d1", "d2"], + {"skip-offline": True, "request-timeout": 60}, + ) + self.assert_called_with( + "stonith-id", +- ["device1", "device2"], ++ ["d1", "d2"], + [reports.codes.SKIP_OFFLINE_NODES], + ) + + def test_unsupported_options(self): + with self.assertRaises(CmdLineInputError) as cm: +- self.call_cmd( +- ["stonith-id", "set", "device1", "device2"], {"force": True} +- ) ++ self.call_cmd(["stonith-id", "set", "d1", "d2"], {"force": True}) + self.assertEqual( + cm.exception.message, + "Specified option '--force' is not supported in this command", + ) ++ ++ def test_only_set_keyword(self): ++ self.assert_bad_syntax_cli_exception(["stonith-id", "set"]) ++ ++ def test_only_add_keyword(self): ++ self.assert_bad_syntax_cli_exception(["stonith-id", "add"]) ++ ++ def test_only_remove_keyword(self): ++ self.assert_bad_syntax_cli_exception(["stonith-id", "remove"]) ++ ++ def test_only_delete_keyword(self): ++ self.assert_bad_syntax_cli_exception(["stonith-id", "delete"]) ++ ++ def test_add_and_empty_remove(self): ++ self.assert_bad_syntax_cli_exception( ++ ["stonith-id", "add", "d1", "remove"] ++ ) ++ ++ def test_add_and_empty_delete(self): ++ self.assert_bad_syntax_cli_exception( ++ ["stonith-id", "add", "d1", "delete"] ++ ) ++ ++ def test_empty_add_and_remove(self): ++ self.assert_bad_syntax_cli_exception( ++ ["stonith-id", "add", "remove", "d1"] ++ ) ++ ++ def test_empty_add_and_delete(self): ++ self.assert_bad_syntax_cli_exception( ++ ["stonith-id", "add", "delete", "d1"] ++ ) ++ ++ def test_empty_remove_and_delete(self): ++ self.assert_bad_syntax_cli_exception( ++ ["stonith-id", "remove", "delete", "d1"] ++ ) ++ ++ def test_empty_delete_and_remove(self): ++ self.assert_bad_syntax_cli_exception( ++ ["stonith-id", "delete", "remove", "d1"] ++ ) ++ ++ def test_empty_add_empty_remove_empty_delete(self): ++ self.assert_bad_syntax_cli_exception( ++ ["stonith-id", "add", "delete", "remove"] ++ ) ++ ++ def test_set_add_remove_delete_devices(self): ++ self.assert_bad_syntax_cli_exception( ++ [ ++ "stonith-id", ++ "set", ++ "add", ++ "d2", ++ "remove", ++ "d3", ++ "delete", ++ "d4", ++ ] ++ ) ++ ++ def test_set_devices(self): ++ self.call_cmd(["stonith-id", "set", "d1", "d2"]) ++ self.assert_called_with("stonith-id", ["d1", "d2"], []) ++ ++ def test_add_devices(self): ++ self.call_cmd(["stonith-id", "add", "d1", "d2"]) ++ self.assert_add_remove_called_with("stonith-id", ["d1", "d2"], [], []) ++ ++ def test_remove_devices(self): ++ self.call_cmd(["stonith-id", "remove", "d1", "d2"]) ++ self.assert_add_remove_called_with("stonith-id", [], ["d1", "d2"], []) ++ ++ def test_delete_devices(self): ++ self.call_cmd(["stonith-id", "delete", "d1", "d2"]) ++ self.assert_add_remove_called_with("stonith-id", [], ["d1", "d2"], []) ++ ++ def test_add_remove_devices(self): ++ self.call_cmd(["stonith-id", "add", "d1", "d2", "remove", "d3", "d4"]) ++ self.assert_add_remove_called_with( ++ "stonith-id", ["d1", "d2"], ["d3", "d4"], [] ++ ) ++ ++ def test_add_delete_devices(self): ++ self.call_cmd(["stonith-id", "add", "d1", "d2", "delete", "d3", "d4"]) ++ self.assert_add_remove_called_with( ++ "stonith-id", ["d1", "d2"], ["d3", "d4"], [] ++ ) ++ ++ def test_add_delete_remove_devices(self): ++ self.call_cmd( ++ [ ++ "stonith-id", ++ "add", ++ "d1", ++ "d2", ++ "delete", ++ "d3", ++ "d4", ++ "remove", ++ "d5", ++ ] ++ ) ++ self.assert_add_remove_called_with( ++ "stonith-id", ["d1", "d2"], ["d3", "d4", "d5"], [] ++ ) ++ ++ def test_remove_delete_devices(self): ++ self.call_cmd( ++ ["stonith-id", "remove", "d2", "d1", "delete", "d4", "d3"] ++ ) ++ self.assert_add_remove_called_with( ++ "stonith-id", [], ["d4", "d3", "d2", "d1"], [] ++ ) +diff --git a/pcs_test/tier0/common/reports/test_messages.py b/pcs_test/tier0/common/reports/test_messages.py +index 0cb97138..b0826cfd 100644 +--- a/pcs_test/tier0/common/reports/test_messages.py ++++ b/pcs_test/tier0/common/reports/test_messages.py +@@ -1761,6 +1761,7 @@ class ResourceBundleAlreadyContainsAResource(NameBuildTest): + ) + + ++# TODO: remove, use ADD_REMOVE reports + class CannotGroupResourceAdjacentResourceForNewGroup(NameBuildTest): + def test_success(self): + self.assert_message_from_report( +@@ -1772,6 +1773,7 @@ class CannotGroupResourceAdjacentResourceForNewGroup(NameBuildTest): + ) + + ++# TODO: remove, use ADD_REMOVE reports + class CannotGroupResourceAdjacentResourceNotInGroup(NameBuildTest): + def test_success(self): + self.assert_message_from_report( +@@ -1783,6 +1785,7 @@ class CannotGroupResourceAdjacentResourceNotInGroup(NameBuildTest): + ) + + ++# TODO: remove, use ADD_REMOVE reports + class CannotGroupResourceAlreadyInTheGroup(NameBuildTest): + def test_single_resource(self): + self.assert_message_from_report( +@@ -1797,6 +1800,7 @@ class CannotGroupResourceAlreadyInTheGroup(NameBuildTest): + ) + + ++# TODO: remove, use ADD_REMOVE reports + class CannotGroupResourceMoreThanOnce(NameBuildTest): + def test_single_resource(self): + self.assert_message_from_report( +@@ -1811,6 +1815,7 @@ class CannotGroupResourceMoreThanOnce(NameBuildTest): + ) + + ++# TODO: remove, use ADD_REMOVE reports + class CannotGroupResourceNoResources(NameBuildTest): + def test_success(self): + self.assert_message_from_report( +@@ -1818,6 +1823,7 @@ class CannotGroupResourceNoResources(NameBuildTest): + ) + + ++# TODO: remove, use ADD_REMOVE reports + class CannotGroupResourceNextToItself(NameBuildTest): + def test_success(self): + self.assert_message_from_report( +@@ -4836,6 +4842,7 @@ class BoothTicketOperationFailed(NameBuildTest): + ) + + ++# TODO: remove, use ADD_REMOVE reports + class TagAddRemoveIdsDuplication(NameBuildTest): + def test_message_add(self): + self.assert_message_from_report( +@@ -4855,6 +4862,7 @@ class TagAddRemoveIdsDuplication(NameBuildTest): + ) + + ++# TODO: remove, use ADD_REMOVE reports + class TagAdjacentReferenceIdNotInTheTag(NameBuildTest): + def test_messag(self): + self.assert_message_from_report( +@@ -4866,6 +4874,7 @@ class TagAdjacentReferenceIdNotInTheTag(NameBuildTest): + ) + + ++# TODO: remove, use ADD_REMOVE reports + class TagCannotAddAndRemoveIdsAtTheSameTime(NameBuildTest): + def test_message_one_item(self): + self.assert_message_from_report( +@@ -4885,6 +4894,7 @@ class TagCannotAddAndRemoveIdsAtTheSameTime(NameBuildTest): + ) + + ++# TODO: remove, use ADD_REMOVE reports + class TagCannotAddReferenceIdsAlreadyInTheTag(NameBuildTest): + def test_message_singular(self): + self.assert_message_from_report( +@@ -4920,6 +4930,7 @@ class TagCannotCreateEmptyTagNoIdsSpecified(NameBuildTest): + ) + + ++# TODO: remove, use ADD_REMOVE reports + class TagCannotPutIdNextToItself(NameBuildTest): + def test_message(self): + self.assert_message_from_report( +@@ -4928,6 +4939,7 @@ class TagCannotPutIdNextToItself(NameBuildTest): + ) + + ++# TODO: remove, use ADD_REMOVE reports + class TagCannotRemoveAdjacentId(NameBuildTest): + def test_message(self): + self.assert_message_from_report( +@@ -4936,6 +4948,7 @@ class TagCannotRemoveAdjacentId(NameBuildTest): + ) + + ++# TODO: remove, use ADD_REMOVE reports + class TagCannotRemoveReferencesWithoutRemovingTag(NameBuildTest): + def test_message(self): + self.assert_message_from_report( +@@ -4974,6 +4987,7 @@ class TagCannotRemoveTagsNoTagsSpecified(NameBuildTest): + ) + + ++# TODO: remove, use ADD_REMOVE reports + class TagCannotSpecifyAdjacentIdWithoutIdsToAdd(NameBuildTest): + def test_message(self): + self.assert_message_from_report( +@@ -4982,6 +4996,7 @@ class TagCannotSpecifyAdjacentIdWithoutIdsToAdd(NameBuildTest): + ) + + ++# TODO: remove, use ADD_REMOVE reports + class TagCannotUpdateTagNoIdsSpecified(NameBuildTest): + def test_message(self): + self.assert_message_from_report( +@@ -4990,6 +5005,7 @@ class TagCannotUpdateTagNoIdsSpecified(NameBuildTest): + ) + + ++# TODO: remove, use ADD_REMOVE reports + class TagIdsNotInTheTag(NameBuildTest): + def test_message_singular(self): + self.assert_message_from_report( +@@ -5080,3 +5096,172 @@ class CibNvsetAmbiguousProvideNvsetId(NameBuildTest): + const.PCS_COMMAND_RESOURCE_DEFAULTS_UPDATE + ), + ) ++ ++ ++class AddRemoveItemsNotSpecified(NameBuildTest): ++ def test_message(self): ++ self.assert_message_from_report( ++ ( ++ "Cannot modify stonith resource 'container-id', no devices to " ++ "add or remove specified" ++ ), ++ reports.AddRemoveItemsNotSpecified( ++ const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE, ++ const.ADD_REMOVE_ITEM_TYPE_DEVICE, ++ "container-id", ++ ), ++ ) ++ ++ ++class AddRemoveItemsDuplication(NameBuildTest): ++ def test_message(self): ++ self.assert_message_from_report( ++ ( ++ "Devices to add or remove must be unique, duplicate devices: " ++ "'dup1', 'dup2'" ++ ), ++ reports.AddRemoveItemsDuplication( ++ const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE, ++ const.ADD_REMOVE_ITEM_TYPE_DEVICE, ++ "container-id", ++ ["dup2", "dup1"], ++ ), ++ ) ++ ++ ++class AddRemoveCannotAddItemsAlreadyInTheContainer(NameBuildTest): ++ def test_message_plural(self): ++ self.assert_message_from_report( ++ "Cannot add devices 'i1', 'i2', they are already present in stonith" ++ " resource 'container-id'", ++ reports.AddRemoveCannotAddItemsAlreadyInTheContainer( ++ const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE, ++ const.ADD_REMOVE_ITEM_TYPE_DEVICE, ++ "container-id", ++ ["i2", "i1"], ++ ), ++ ) ++ ++ def test_message_singular(self): ++ self.assert_message_from_report( ++ "Cannot add device 'i1', it is already present in stonith resource " ++ "'container-id'", ++ reports.AddRemoveCannotAddItemsAlreadyInTheContainer( ++ const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE, ++ const.ADD_REMOVE_ITEM_TYPE_DEVICE, ++ "container-id", ++ ["i1"], ++ ), ++ ) ++ ++ ++class AddRemoveCannotRemoveItemsNotInTheContainer(NameBuildTest): ++ def test_message_plural(self): ++ self.assert_message_from_report( ++ ( ++ "Cannot remove devices 'i1', 'i2', they are not present in " ++ "stonith resource 'container-id'" ++ ), ++ reports.AddRemoveCannotRemoveItemsNotInTheContainer( ++ const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE, ++ const.ADD_REMOVE_ITEM_TYPE_DEVICE, ++ "container-id", ++ ["i2", "i1"], ++ ), ++ ) ++ ++ def test_message_singular(self): ++ self.assert_message_from_report( ++ ( ++ "Cannot remove device 'i1', it is not present in " ++ "stonith resource 'container-id'" ++ ), ++ reports.AddRemoveCannotRemoveItemsNotInTheContainer( ++ const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE, ++ const.ADD_REMOVE_ITEM_TYPE_DEVICE, ++ "container-id", ++ ["i1"], ++ ), ++ ) ++ ++ ++class AddRemoveCannotAddAndRemoveItemsAtTheSameTime(NameBuildTest): ++ def test_message_plural(self): ++ self.assert_message_from_report( ++ "Devices cannot be added and removed at the same time: 'i1', 'i2'", ++ reports.AddRemoveCannotAddAndRemoveItemsAtTheSameTime( ++ const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE, ++ const.ADD_REMOVE_ITEM_TYPE_DEVICE, ++ "container-id", ++ ["i2", "i1"], ++ ), ++ ) ++ ++ def test_message_singular(self): ++ self.assert_message_from_report( ++ "Device cannot be added and removed at the same time: 'i1'", ++ reports.AddRemoveCannotAddAndRemoveItemsAtTheSameTime( ++ const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE, ++ const.ADD_REMOVE_ITEM_TYPE_DEVICE, ++ "container-id", ++ ["i1"], ++ ), ++ ) ++ ++ ++class AddRemoveCannotRemoveAllItemsFromTheContainer(NameBuildTest): ++ def test_message(self): ++ self.assert_message_from_report( ++ "Cannot remove all devices from stonith resource 'container-id'", ++ reports.AddRemoveCannotRemoveAllItemsFromTheContainer( ++ const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE, ++ const.ADD_REMOVE_ITEM_TYPE_DEVICE, ++ "container-id", ++ ["i1", "i2"], ++ ), ++ ) ++ ++ ++class AddRemoveAdjacentItemNotInTheContainer(NameBuildTest): ++ def test_message(self): ++ self.assert_message_from_report( ++ ( ++ "There is no device 'adjacent-item-id' in the stonith resource " ++ "'container-id', cannot add devices next to it" ++ ), ++ reports.AddRemoveAdjacentItemNotInTheContainer( ++ const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE, ++ const.ADD_REMOVE_ITEM_TYPE_DEVICE, ++ "container-id", ++ "adjacent-item-id", ++ ), ++ ) ++ ++ ++class AddRemoveCannotPutItemNextToItself(NameBuildTest): ++ def test_message(self): ++ self.assert_message_from_report( ++ "Cannot put device 'adjacent-item-id' next to itself", ++ reports.AddRemoveCannotPutItemNextToItself( ++ const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE, ++ const.ADD_REMOVE_ITEM_TYPE_DEVICE, ++ "container-id", ++ "adjacent-item-id", ++ ), ++ ) ++ ++ ++class AddRemoveCannotSpecifyAdjacentItemWithoutItemsToAdd(NameBuildTest): ++ def test_message(self): ++ self.assert_message_from_report( ++ ( ++ "Cannot specify adjacent device 'adjacent-item-id' without " ++ "devices to add" ++ ), ++ reports.AddRemoveCannotSpecifyAdjacentItemWithoutItemsToAdd( ++ const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE, ++ const.ADD_REMOVE_ITEM_TYPE_DEVICE, ++ "container-id", ++ "adjacent-item-id", ++ ), ++ ) +diff --git a/pcs_test/tier0/common/test_str_tools.py b/pcs_test/tier0/common/test_str_tools.py +index 97c1d223..b0028a88 100644 +--- a/pcs_test/tier0/common/test_str_tools.py ++++ b/pcs_test/tier0/common/test_str_tools.py +@@ -1,5 +1,5 @@ + # pylint: disable=protected-access +-from unittest import TestCase, mock ++from unittest import TestCase + + from pcs.common import str_tools as tools + +@@ -124,73 +124,48 @@ class AddSTest(TestCase): + self.assertEqual(tools._add_s("church"), "churches") + + +-@mock.patch("pcs.common.str_tools._add_s") +-@mock.patch("pcs.common.str_tools._is_multiple") ++class GetPluralTest(TestCase): ++ def test_common_plural(self): ++ self.assertEqual("are", tools.get_plural("is")) ++ ++ def test_add_s(self): ++ self.assertEqual("pieces", tools.get_plural("piece")) ++ ++ + class FormatPluralTest(TestCase): +- def test_is_sg(self, mock_is_multiple, mock_add_s): +- mock_is_multiple.return_value = False ++ def test_is_sg(self): + self.assertEqual("is", tools.format_plural(1, "is")) +- mock_add_s.assert_not_called() +- mock_is_multiple.assert_called_once_with(1) + +- def test_is_pl(self, mock_is_multiple, mock_add_s): +- mock_is_multiple.return_value = True ++ def test_is_pl(self): + self.assertEqual("are", tools.format_plural(2, "is")) +- mock_add_s.assert_not_called() +- mock_is_multiple.assert_called_once_with(2) + +- def test_do_sg(self, mock_is_multiple, mock_add_s): +- mock_is_multiple.return_value = False ++ def test_do_sg(self): + self.assertEqual("does", tools.format_plural("he", "does")) +- mock_add_s.assert_not_called() +- mock_is_multiple.assert_called_once_with("he") + +- def test_do_pl(self, mock_is_multiple, mock_add_s): +- mock_is_multiple.return_value = True ++ def test_do_pl(self): + self.assertEqual("do", tools.format_plural(["he", "she"], "does")) +- mock_add_s.assert_not_called() +- mock_is_multiple.assert_called_once_with(["he", "she"]) + +- def test_have_sg(self, mock_is_multiple, mock_add_s): +- mock_is_multiple.return_value = False ++ def test_have_sg(self): + self.assertEqual("has", tools.format_plural("he", "has")) +- mock_add_s.assert_not_called() +- mock_is_multiple.assert_called_once_with("he") + +- def test_have_pl(self, mock_is_multiple, mock_add_s): +- mock_is_multiple.return_value = True ++ def test_have_pl(self): + self.assertEqual("have", tools.format_plural(["he", "she"], "has")) +- mock_add_s.assert_not_called() +- mock_is_multiple.assert_called_once_with(["he", "she"]) + +- def test_plural_sg(self, mock_is_multiple, mock_add_s): +- mock_is_multiple.return_value = False ++ def test_plural_sg(self): + self.assertEqual( + "singular", tools.format_plural(1, "singular", "plural") + ) +- mock_add_s.assert_not_called() +- mock_is_multiple.assert_called_once_with(1) + +- def test_plural_pl(self, mock_is_multiple, mock_add_s): +- mock_is_multiple.return_value = True ++ def test_plural_pl(self): + self.assertEqual( + "plural", tools.format_plural(10, "singular", "plural") + ) +- mock_add_s.assert_not_called() +- mock_is_multiple.assert_called_once_with(10) + +- def test_regular_sg(self, mock_is_multiple, mock_add_s): +- mock_is_multiple.return_value = False ++ def test_regular_sg(self): + self.assertEqual("greeting", tools.format_plural(1, "greeting")) +- mock_add_s.assert_not_called() +- mock_is_multiple.assert_called_once_with(1) + +- def test_regular_pl(self, mock_is_multiple, mock_add_s): +- mock_add_s.return_value = "greetings" +- mock_is_multiple.return_value = True ++ def test_regular_pl(self): + self.assertEqual("greetings", tools.format_plural(10, "greeting")) +- mock_add_s.assert_called_once_with("greeting") +- mock_is_multiple.assert_called_once_with(10) + + + class FormatList(TestCase): +diff --git a/pcs_test/tier0/lib/cib/test_stonith.py b/pcs_test/tier0/lib/cib/test_stonith.py +index ef7571ce..df059121 100644 +--- a/pcs_test/tier0/lib/cib/test_stonith.py ++++ b/pcs_test/tier0/lib/cib/test_stonith.py +@@ -2,8 +2,12 @@ from unittest import TestCase + + from lxml import etree + ++from pcs.common import reports + from pcs.lib.cib import stonith + ++from pcs_test.tools import fixture ++from pcs_test.tools.assertions import assert_report_item_list_equal ++ + + class IsStonithEnabled(TestCase): + def test_not_set(self): +@@ -149,8 +153,129 @@ class GetMisconfiguredResources(TestCase): + ) + + +-class ValidateStonithDeviceExistsAndSupported(TestCase): +- """ +- tested in: +- pcs_test.tier0.lib.commands.test_stonith_update_scsi_devices.TestUpdateScsiDevicesFailures +- """ ++class ValidateStonithRestartlessUpdate(TestCase): ++ RESOURCES = etree.fromstring( ++ """ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ """ ++ ) ++ ++ def assert_unsupported_stonith_agent(self, resource_id, resource_type): ++ stonith_el, report_list = stonith.validate_stonith_restartless_update( ++ self.RESOURCES, resource_id ++ ) ++ self.assertEqual( ++ stonith_el, ++ self.RESOURCES.find(f".//primitive[@id='{resource_id}']"), ++ ) ++ assert_report_item_list_equal( ++ report_list, ++ [ ++ fixture.error( ++ reports.codes.STONITH_RESTARTLESS_UPDATE_UNSUPPORTED_AGENT, ++ resource_id=resource_id, ++ resource_type=resource_type, ++ supported_stonith_types=["fence_scsi"], ++ ) ++ ], ++ ) ++ ++ def assert_no_devices(self, resource_id): ++ stonith_el, report_list = stonith.validate_stonith_restartless_update( ++ self.RESOURCES, resource_id ++ ) ++ self.assertEqual( ++ stonith_el, ++ self.RESOURCES.find(f".//primitive[@id='{resource_id}']"), ++ ) ++ assert_report_item_list_equal( ++ report_list, ++ [ ++ fixture.error( ++ reports.codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM, ++ reason=( ++ "no devices option configured for stonith device " ++ f"'{resource_id}'" ++ ), ++ reason_type="other", ++ ) ++ ], ++ ) ++ ++ def test_supported(self): ++ stonith_el, report_list = stonith.validate_stonith_restartless_update( ++ self.RESOURCES, "supported" ++ ) ++ self.assertEqual( ++ stonith_el, self.RESOURCES.find(".//primitive[@id='supported']") ++ ) ++ assert_report_item_list_equal(report_list, []) ++ ++ def test_nonexistent_id(self): ++ stonith_el, report_list = stonith.validate_stonith_restartless_update( ++ self.RESOURCES, "non-existent" ++ ) ++ self.assertEqual(stonith_el, None) ++ assert_report_item_list_equal( ++ report_list, ++ [ ++ fixture.error( ++ reports.codes.ID_NOT_FOUND, ++ id="non-existent", ++ expected_types=["primitive"], ++ context_type="resources", ++ context_id="", ++ ) ++ ], ++ ) ++ ++ def test_not_a_resource_id(self): ++ stonith_el, report_list = stonith.validate_stonith_restartless_update( ++ self.RESOURCES, "empty-instance_attributes-devices" ++ ) ++ self.assertEqual(stonith_el, None) ++ assert_report_item_list_equal( ++ report_list, ++ [ ++ fixture.error( ++ reports.codes.ID_BELONGS_TO_UNEXPECTED_TYPE, ++ id="empty-instance_attributes-devices", ++ expected_types=["primitive"], ++ current_type="nvpair", ++ ) ++ ], ++ ) ++ ++ def test_devices_empty(self): ++ self.assert_no_devices("empty") ++ ++ def test_missing_devices_attr(self): ++ self.assert_no_devices("no-devices") ++ ++ def test_unsupported_class(self): ++ self.assert_unsupported_stonith_agent("cp-01", "Dummy") ++ ++ def test_unsupported_provider(self): ++ self.assert_unsupported_stonith_agent( ++ "unsupported_provider", "fence_scsi" ++ ) ++ ++ def test_unsupported_type(self): ++ self.assert_unsupported_stonith_agent("unsupported_type", "fence_xvm") +diff --git a/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py b/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py +index 3bc51325..6ff6b99a 100644 +--- a/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py ++++ b/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py +@@ -3,6 +3,7 @@ from unittest import mock, TestCase + + + from pcs_test.tools import fixture ++from pcs_test.tools.assertions import assert_report_item_list_equal + from pcs_test.tools.command_env import get_env_tools + from pcs_test.tools.misc import get_test_resource as rc + +@@ -13,6 +14,10 @@ from pcs.common import ( + reports, + ) + from pcs.common.interface import dto ++from pcs.common.reports.const import ( ++ ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE, ++ ADD_REMOVE_ITEM_TYPE_DEVICE, ++) + from pcs.common.tools import timeout_to_seconds + + from .cluster.common import ( +@@ -28,6 +33,10 @@ DEFAULT_DIGEST = _DIGEST + "0" + ALL_DIGEST = _DIGEST + "1" + NONPRIVATE_DIGEST = _DIGEST + "2" + NONRELOADABLE_DIGEST = _DIGEST + "3" ++DEV_1 = "/dev/sda" ++DEV_2 = "/dev/sdb" ++DEV_3 = "/dev/sdc" ++DEV_4 = "/dev/sdd" + DEVICES_1 = ("/dev/sda",) + DEVICES_2 = ("/dev/sda", "/dev/sdb") + DEVICES_3 = ("/dev/sda", "/dev/sdb", "/dev/sdc") +@@ -197,13 +206,9 @@ FIXTURE_CRM_MON_RES_STOPPED = f""" + """ + + +-@mock.patch.object( +- settings, +- "pacemaker_api_result_schema", +- rc("pcmk_api_rng/api-result.rng"), +-) +-class UpdateScsiDevices(TestCase): ++class UpdateScsiDevicesMixin: + def setUp(self): ++ # pylint: disable=invalid-name + self.env_assist, self.config = get_env_tools(self) + + self.existing_nodes = ["node1", "node2", "node3"] +@@ -217,14 +222,18 @@ class UpdateScsiDevices(TestCase): + self, + devices_before=DEVICES_1, + devices_updated=DEVICES_2, ++ devices_add=(), ++ devices_remove=(), ++ unfence=None, + resource_ops=DEFAULT_OPS, + lrm_monitor_ops=DEFAULT_LRM_MONITOR_OPS, + lrm_start_ops=DEFAULT_LRM_START_OPS, + lrm_monitor_ops_updated=DEFAULT_LRM_MONITOR_OPS_UPDATED, + lrm_start_ops_updated=DEFAULT_LRM_START_OPS_UPDATED, + ): ++ # pylint: disable=too-many-arguments + # pylint: disable=too-many-locals +- self.config.runner.pcmk.is_resource_digests_supported() ++ devices_value = ",".join(sorted(devices_updated)) + self.config.runner.cib.load( + resources=fixture_scsi( + devices=devices_before, resource_ops=resource_ops +@@ -235,16 +244,17 @@ class UpdateScsiDevices(TestCase): + lrm_monitor_ops=lrm_monitor_ops, + ), + ) ++ self.config.runner.pcmk.is_resource_digests_supported() + self.config.runner.pcmk.load_state( + resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES + ) +- devices_opt = "devices={}".format(",".join(devices_updated)) ++ devices_opt = "devices={}".format(devices_value) + self.config.runner.pcmk.resource_digests( + SCSI_STONITH_ID, + SCSI_NODE, + name="start.op.digests", + stdout=fixture_digests_xml( +- SCSI_STONITH_ID, SCSI_NODE, devices=",".join(devices_updated) ++ SCSI_STONITH_ID, SCSI_NODE, devices=devices_value + ), + args=[devices_opt], + ) +@@ -272,22 +282,23 @@ class UpdateScsiDevices(TestCase): + stdout=fixture_digests_xml( + SCSI_STONITH_ID, + SCSI_NODE, +- devices=",".join(devices_updated), ++ devices=devices_value, + ), + args=args, + ) +- self.config.corosync_conf.load_content( +- corosync_conf_fixture( +- self.existing_corosync_nodes, +- get_two_node(len(self.existing_corosync_nodes)), ++ if unfence: ++ self.config.corosync_conf.load_content( ++ corosync_conf_fixture( ++ self.existing_corosync_nodes, ++ get_two_node(len(self.existing_corosync_nodes)), ++ ) ++ ) ++ self.config.http.corosync.get_corosync_online_targets( ++ node_labels=self.existing_nodes ++ ) ++ self.config.http.scsi.unfence_node( ++ unfence, node_labels=self.existing_nodes + ) +- ) +- self.config.http.corosync.get_corosync_online_targets( +- node_labels=self.existing_nodes +- ) +- self.config.http.scsi.unfence_node( +- devices_updated, node_labels=self.existing_nodes +- ) + self.config.env.push_cib( + resources=fixture_scsi( + devices=devices_updated, resource_ops=resource_ops +@@ -298,113 +309,25 @@ class UpdateScsiDevices(TestCase): + lrm_monitor_ops=lrm_monitor_ops_updated, + ), + ) +- stonith.update_scsi_devices( +- self.env_assist.get_env(), SCSI_STONITH_ID, devices_updated +- ) ++ if devices_add or devices_remove: ++ stonith.update_scsi_devices_add_remove( ++ self.env_assist.get_env(), ++ SCSI_STONITH_ID, ++ devices_add, ++ devices_remove, ++ ) ++ else: ++ stonith.update_scsi_devices( ++ self.env_assist.get_env(), SCSI_STONITH_ID, devices_updated ++ ) + self.env_assist.assert_reports([]) + +- def test_update_1_to_1_devices(self): +- self.assert_command_success( +- devices_before=DEVICES_1, devices_updated=DEVICES_1 +- ) +- +- def test_update_2_to_2_devices(self): +- self.assert_command_success( +- devices_before=DEVICES_1, devices_updated=DEVICES_1 +- ) +- +- def test_update_1_to_2_devices(self): +- self.assert_command_success() +- +- def test_update_1_to_3_devices(self): +- self.assert_command_success( +- devices_before=DEVICES_1, devices_updated=DEVICES_3 +- ) +- +- def test_update_3_to_1_devices(self): +- self.assert_command_success( +- devices_before=DEVICES_3, devices_updated=DEVICES_1 +- ) +- +- def test_update_3_to_2_devices(self): +- self.assert_command_success( +- devices_before=DEVICES_3, devices_updated=DEVICES_2 +- ) +- +- def test_default_monitor(self): +- self.assert_command_success() +- +- def test_no_monitor_ops(self): +- self.assert_command_success( +- resource_ops=(), lrm_monitor_ops=(), lrm_monitor_ops_updated=() +- ) +- +- def test_1_monitor_with_timeout(self): +- self.assert_command_success( +- resource_ops=(("monitor", "30s", "10s", None),), +- lrm_monitor_ops=(("30000", DEFAULT_DIGEST, None, None),), +- lrm_monitor_ops_updated=(("30000", ALL_DIGEST, None, None),), +- ) +- +- def test_2_monitor_ops_with_timeouts(self): +- self.assert_command_success( +- resource_ops=( +- ("monitor", "30s", "10s", None), +- ("monitor", "40s", "20s", None), +- ), +- lrm_monitor_ops=( +- ("30000", DEFAULT_DIGEST, None, None), +- ("40000", DEFAULT_DIGEST, None, None), +- ), +- lrm_monitor_ops_updated=( +- ("30000", ALL_DIGEST, None, None), +- ("40000", ALL_DIGEST, None, None), +- ), +- ) +- +- def test_2_monitor_ops_with_one_timeout(self): +- self.assert_command_success( +- resource_ops=( +- ("monitor", "30s", "10s", None), +- ("monitor", "60s", None, None), +- ), +- lrm_monitor_ops=( +- ("30000", DEFAULT_DIGEST, None, None), +- ("60000", DEFAULT_DIGEST, None, None), +- ), +- lrm_monitor_ops_updated=( +- ("30000", ALL_DIGEST, None, None), +- ("60000", ALL_DIGEST, None, None), +- ), +- ) +- +- def test_various_start_ops_one_lrm_start_op(self): +- self.assert_command_success( +- resource_ops=( +- ("monitor", "60s", None, None), +- ("start", "0s", "40s", None), +- ("start", "0s", "30s", "1"), +- ("start", "10s", "5s", None), +- ("start", "20s", None, None), +- ), +- ) +- +- def test_1_nonrecurring_start_op_with_timeout(self): +- self.assert_command_success( +- resource_ops=( +- ("monitor", "60s", None, None), +- ("start", "0s", "40s", None), +- ), +- ) + ++class UpdateScsiDevicesFailuresMixin: ++ def command(self, force_flags=()): ++ raise NotImplementedError + +-@mock.patch.object( +- settings, +- "pacemaker_api_result_schema", +- rc("pcmk_api_rng/api-result.rng"), +-) +-class TestUpdateScsiDevicesFailures(TestCase): +- # pylint: disable=too-many-public-methods ++ # pylint: disable=invalid-name + def setUp(self): + self.env_assist, self.config = get_env_tools(self) + +@@ -416,13 +339,12 @@ class TestUpdateScsiDevicesFailures(TestCase): + self.config.env.set_known_nodes(self.existing_nodes) + + def test_pcmk_doesnt_support_digests(self): ++ self.config.runner.cib.load(resources=fixture_scsi()) + self.config.runner.pcmk.is_resource_digests_supported( + is_supported=False + ) + self.env_assist.assert_raise_library_error( +- lambda: stonith.update_scsi_devices( +- self.env_assist.get_env(), SCSI_STONITH_ID, () +- ), ++ self.command(), + [ + fixture.error( + reports.codes.STONITH_RESTARTLESS_UPDATE_OF_SCSI_DEVICES_NOT_SUPPORTED, +@@ -431,134 +353,557 @@ class TestUpdateScsiDevicesFailures(TestCase): + expected_in_processor=False, + ) + +- def test_devices_cannot_be_empty(self): +- self.config.runner.pcmk.is_resource_digests_supported() +- self.config.runner.cib.load(resources=fixture_scsi()) +- self.env_assist.assert_raise_library_error( +- lambda: stonith.update_scsi_devices( +- self.env_assist.get_env(), SCSI_STONITH_ID, () +- ) +- ) +- self.env_assist.assert_reports( +- [ +- fixture.error( +- reports.codes.INVALID_OPTION_VALUE, +- option_name="devices", +- option_value="", +- allowed_values=None, +- cannot_be_empty=True, +- forbidden_characters=None, +- ) +- ] ++ def test_node_missing_name_and_missing_auth_token(self): ++ self.config.runner.cib.load( ++ resources=fixture_scsi(), ++ status=_fixture_status_lrm_ops(SCSI_STONITH_ID), + ) +- +- def test_nonexistant_id(self): + self.config.runner.pcmk.is_resource_digests_supported() +- self.config.runner.cib.load(resources=fixture_scsi()) +- self.env_assist.assert_raise_library_error( +- lambda: stonith.update_scsi_devices( +- self.env_assist.get_env(), "non-existent-id", DEVICES_2 +- ) +- ) +- self.env_assist.assert_reports( +- [ +- fixture.error( +- reports.codes.ID_NOT_FOUND, +- id="non-existent-id", +- expected_types=["primitive"], +- context_type="cib", +- context_id="", +- ) +- ] ++ self.config.runner.pcmk.load_state( ++ resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES + ) +- +- def test_not_a_resource_id(self): +- self.config.runner.pcmk.is_resource_digests_supported() +- self.config.runner.cib.load(resources=fixture_scsi()) +- self.env_assist.assert_raise_library_error( +- lambda: stonith.update_scsi_devices( +- self.env_assist.get_env(), +- f"{SCSI_STONITH_ID}-instance_attributes-devices", +- DEVICES_2, +- ) ++ self.config.runner.pcmk.resource_digests( ++ SCSI_STONITH_ID, ++ SCSI_NODE, ++ name="start.op.digests", ++ stdout=fixture_digests_xml( ++ SCSI_STONITH_ID, SCSI_NODE, devices=",".join(DEVICES_2) ++ ), ++ args=["devices={}".format(",".join(DEVICES_2))], + ) +- self.env_assist.assert_reports( +- [ +- fixture.error( +- reports.codes.ID_BELONGS_TO_UNEXPECTED_TYPE, +- id=f"{SCSI_STONITH_ID}-instance_attributes-devices", +- expected_types=["primitive"], +- current_type="nvpair", +- ) +- ] ++ self.config.runner.pcmk.resource_digests( ++ SCSI_STONITH_ID, ++ SCSI_NODE, ++ name="monitor.op.digests", ++ stdout=fixture_digests_xml( ++ SCSI_STONITH_ID, SCSI_NODE, devices=",".join(DEVICES_2) ++ ), ++ args=[ ++ "devices={}".format(",".join(DEVICES_2)), ++ "CRM_meta_interval=60000", ++ ], + ) +- +- def test_not_supported_resource_type(self): +- self.config.runner.pcmk.is_resource_digests_supported() +- self.config.runner.cib.load(resources=fixture_scsi()) +- self.env_assist.assert_raise_library_error( +- lambda: stonith.update_scsi_devices( +- self.env_assist.get_env(), "dummy", DEVICES_2 ++ self.config.corosync_conf.load_content( ++ corosync_conf_fixture( ++ self.existing_corosync_nodes ++ + [[("ring0_addr", "custom_node"), ("nodeid", "5")]], + ) + ) ++ self.config.env.set_known_nodes(self.existing_nodes[:-1]) ++ self.env_assist.assert_raise_library_error(self.command()) + self.env_assist.assert_reports( + [ + fixture.error( +- reports.codes.STONITH_RESTARTLESS_UPDATE_UNSUPPORTED_AGENT, +- resource_id="dummy", +- resource_type="Dummy", +- supported_stonith_types=["fence_scsi"], +- ) +- ] +- ) +- +- def test_devices_option_missing(self): +- self.config.runner.pcmk.is_resource_digests_supported() +- self.config.runner.cib.load(resources=fixture_scsi(devices=None)) +- self.env_assist.assert_raise_library_error( +- lambda: stonith.update_scsi_devices( +- self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2 +- ) +- ) +- self.env_assist.assert_reports( +- [ ++ reports.codes.COROSYNC_CONFIG_MISSING_NAMES_OF_NODES, ++ fatal=True, ++ ), + fixture.error( +- reports.codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM, +- reason=( +- "no devices option configured for stonith device " +- f"'{SCSI_STONITH_ID}'" +- ), +- reason_type=reports.const.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_OTHER, +- ) ++ reports.codes.HOST_NOT_FOUND, ++ host_list=[self.existing_nodes[-1]], ++ ), + ] + ) + +- def test_devices_option_empty(self): +- self.config.runner.pcmk.is_resource_digests_supported() +- self.config.runner.cib.load(resources=fixture_scsi(devices="")) +- self.env_assist.assert_raise_library_error( +- lambda: stonith.update_scsi_devices( +- self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2 +- ) +- ) +- self.env_assist.assert_reports( +- [ +- fixture.error( +- reports.codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM, +- reason=( +- "no devices option configured for stonith device " +- f"'{SCSI_STONITH_ID}'" +- ), +- reason_type=reports.const.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_OTHER, +- ) +- ] ++ def _unfence_failure_common_calls(self): ++ devices = ",".join(DEVICES_2) ++ self.config.runner.cib.load( ++ resources=fixture_scsi(), ++ status=_fixture_status_lrm_ops(SCSI_STONITH_ID), + ) +- +- def test_stonith_resource_is_not_running(self): + self.config.runner.pcmk.is_resource_digests_supported() +- self.config.runner.cib.load(resources=fixture_scsi()) + self.config.runner.pcmk.load_state( +- resources=FIXTURE_CRM_MON_RES_STOPPED, nodes=FIXTURE_CRM_MON_NODES ++ resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES ++ ) ++ self.config.runner.pcmk.resource_digests( ++ SCSI_STONITH_ID, ++ SCSI_NODE, ++ name="start.op.digests", ++ stdout=fixture_digests_xml( ++ SCSI_STONITH_ID, ++ SCSI_NODE, ++ devices=devices, ++ ), ++ args=[f"devices={devices}"], ++ ) ++ self.config.runner.pcmk.resource_digests( ++ SCSI_STONITH_ID, ++ SCSI_NODE, ++ name="monitor.op.digests", ++ stdout=fixture_digests_xml( ++ SCSI_STONITH_ID, ++ SCSI_NODE, ++ devices=devices, ++ ), ++ args=[ ++ f"devices={devices}", ++ "CRM_meta_interval=60000", ++ ], ++ ) ++ self.config.corosync_conf.load_content( ++ corosync_conf_fixture(self.existing_corosync_nodes) ++ ) ++ ++ def test_unfence_failure_unable_to_connect(self): ++ self._unfence_failure_common_calls() ++ self.config.http.corosync.get_corosync_online_targets( ++ node_labels=self.existing_nodes ++ ) ++ self.config.http.scsi.unfence_node( ++ DEVICES_2, ++ communication_list=[ ++ dict( ++ label=self.existing_nodes[0], ++ raw_data=json.dumps( ++ dict( ++ devices=[DEV_2], ++ node=self.existing_nodes[0], ++ ) ++ ), ++ was_connected=False, ++ error_msg="errA", ++ ), ++ dict( ++ label=self.existing_nodes[1], ++ raw_data=json.dumps( ++ dict( ++ devices=[DEV_2], ++ node=self.existing_nodes[1], ++ ) ++ ), ++ output=json.dumps( ++ dto.to_dict( ++ communication.dto.InternalCommunicationResultDto( ++ status=communication.const.COM_STATUS_ERROR, ++ status_msg="error", ++ report_list=[ ++ reports.ReportItem.error( ++ reports.messages.StonithUnfencingFailed( ++ "errB" ++ ) ++ ).to_dto() ++ ], ++ data=None, ++ ) ++ ) ++ ), ++ ), ++ dict( ++ label=self.existing_nodes[2], ++ raw_data=json.dumps( ++ dict( ++ devices=[DEV_2], ++ node=self.existing_nodes[2], ++ ) ++ ), ++ ), ++ ], ++ ) ++ self.env_assist.assert_raise_library_error(self.command()) ++ self.env_assist.assert_reports( ++ [ ++ fixture.error( ++ reports.codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, ++ node=self.existing_nodes[0], ++ command="api/v1/scsi-unfence-node/v1", ++ reason="errA", ++ ), ++ fixture.error( ++ reports.codes.STONITH_UNFENCING_FAILED, ++ reason="errB", ++ context=reports.dto.ReportItemContextDto( ++ node=self.existing_nodes[1], ++ ), ++ ), ++ ] ++ ) ++ ++ def test_unfence_failure_agent_script_failed(self): ++ self._unfence_failure_common_calls() ++ self.config.http.corosync.get_corosync_online_targets( ++ node_labels=self.existing_nodes ++ ) ++ self.config.http.scsi.unfence_node( ++ DEVICES_2, ++ communication_list=[ ++ dict( ++ label=self.existing_nodes[0], ++ raw_data=json.dumps( ++ dict( ++ devices=[DEV_2], ++ node=self.existing_nodes[0], ++ ) ++ ), ++ ), ++ dict( ++ label=self.existing_nodes[1], ++ raw_data=json.dumps( ++ dict( ++ devices=[DEV_2], ++ node=self.existing_nodes[1], ++ ) ++ ), ++ output=json.dumps( ++ dto.to_dict( ++ communication.dto.InternalCommunicationResultDto( ++ status=communication.const.COM_STATUS_ERROR, ++ status_msg="error", ++ report_list=[ ++ reports.ReportItem.error( ++ reports.messages.StonithUnfencingFailed( ++ "errB" ++ ) ++ ).to_dto() ++ ], ++ data=None, ++ ) ++ ) ++ ), ++ ), ++ dict( ++ label=self.existing_nodes[2], ++ raw_data=json.dumps( ++ dict( ++ devices=[DEV_2], ++ node=self.existing_nodes[2], ++ ) ++ ), ++ ), ++ ], ++ ) ++ self.env_assist.assert_raise_library_error(self.command()) ++ self.env_assist.assert_reports( ++ [ ++ fixture.error( ++ reports.codes.STONITH_UNFENCING_FAILED, ++ reason="errB", ++ context=reports.dto.ReportItemContextDto( ++ node=self.existing_nodes[1], ++ ), ++ ), ++ ] ++ ) ++ ++ def test_corosync_targets_unable_to_connect(self): ++ self._unfence_failure_common_calls() ++ self.config.http.corosync.get_corosync_online_targets( ++ communication_list=[ ++ dict( ++ label=self.existing_nodes[0], ++ output='{"corosync":true}', ++ ), ++ ] ++ + [ ++ dict( ++ label=node, ++ was_connected=False, ++ errno=7, ++ error_msg="an error", ++ ) ++ for node in self.existing_nodes[1:] ++ ] ++ ) ++ self.env_assist.assert_raise_library_error(self.command()) ++ self.env_assist.assert_reports( ++ [ ++ fixture.error( ++ reports.codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, ++ force_code=reports.codes.SKIP_OFFLINE_NODES, ++ node=node, ++ command="remote/status", ++ reason="an error", ++ ) ++ for node in self.existing_nodes[1:] ++ ] ++ ) ++ ++ def test_corosync_targets_skip_offline_unfence_node_running_corosync( ++ self, ++ ): ++ self._unfence_failure_common_calls() ++ self.config.http.corosync.get_corosync_online_targets( ++ communication_list=[ ++ dict( ++ label=self.existing_nodes[0], ++ output='{"corosync":true}', ++ ), ++ dict( ++ label=self.existing_nodes[1], ++ output='{"corosync":false}', ++ ), ++ dict( ++ label=self.existing_nodes[2], ++ was_connected=False, ++ errno=7, ++ error_msg="an error", ++ ), ++ ] ++ ) ++ self.config.http.scsi.unfence_node( ++ DEVICES_2, ++ communication_list=[ ++ dict( ++ label=self.existing_nodes[0], ++ raw_data=json.dumps( ++ dict( ++ devices=[DEV_2], ++ node=self.existing_nodes[0], ++ ) ++ ), ++ ), ++ ], ++ ) ++ self.config.env.push_cib( ++ resources=fixture_scsi(devices=DEVICES_2), ++ status=_fixture_status_lrm_ops( ++ SCSI_STONITH_ID, ++ lrm_start_ops=DEFAULT_LRM_START_OPS_UPDATED, ++ lrm_monitor_ops=DEFAULT_LRM_MONITOR_OPS_UPDATED, ++ ), ++ ) ++ self.command(force_flags=[reports.codes.SKIP_OFFLINE_NODES])() ++ self.env_assist.assert_reports( ++ [ ++ fixture.warn( ++ reports.codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, ++ node=self.existing_nodes[2], ++ command="remote/status", ++ reason="an error", ++ ), ++ ] ++ ) ++ ++ def test_corosync_targets_unable_to_perform_unfencing_operation( ++ self, ++ ): ++ self._unfence_failure_common_calls() ++ self.config.http.corosync.get_corosync_online_targets( ++ communication_list=[ ++ dict( ++ label=self.existing_nodes[0], ++ was_connected=False, ++ errno=7, ++ error_msg="an error", ++ ), ++ dict( ++ label=self.existing_nodes[1], ++ was_connected=False, ++ errno=7, ++ error_msg="an error", ++ ), ++ dict( ++ label=self.existing_nodes[2], ++ output='{"corosync":false}', ++ ), ++ ] ++ ) ++ self.config.http.scsi.unfence_node([DEV_2], communication_list=[]) ++ self.env_assist.assert_raise_library_error( ++ self.command(force_flags=[reports.codes.SKIP_OFFLINE_NODES]) ++ ) ++ self.env_assist.assert_reports( ++ [ ++ fixture.warn( ++ reports.codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, ++ node=node, ++ command="remote/status", ++ reason="an error", ++ ) ++ for node in self.existing_nodes[0:2] ++ ] ++ + [ ++ fixture.error( ++ reports.codes.UNABLE_TO_PERFORM_OPERATION_ON_ANY_NODE, ++ ), ++ ] ++ ) ++ ++ ++@mock.patch.object( ++ settings, ++ "pacemaker_api_result_schema", ++ rc("pcmk_api_rng/api-result.rng"), ++) ++class UpdateScsiDevices(UpdateScsiDevicesMixin, TestCase): ++ def test_update_1_to_1_devices(self): ++ self.assert_command_success( ++ devices_before=DEVICES_1, ++ devices_updated=DEVICES_1, ++ ) ++ ++ def test_update_2_to_2_devices(self): ++ self.assert_command_success( ++ devices_before=DEVICES_2, ++ devices_updated=DEVICES_2, ++ ) ++ ++ def test_update_1_to_2_devices(self): ++ self.assert_command_success(unfence=[DEV_2]) ++ ++ def test_update_1_to_3_devices(self): ++ self.assert_command_success( ++ devices_before=DEVICES_1, ++ devices_updated=DEVICES_3, ++ unfence=[DEV_2, DEV_3], ++ ) ++ ++ def test_update_3_to_1_devices(self): ++ self.assert_command_success( ++ devices_before=DEVICES_3, ++ devices_updated=DEVICES_1, ++ ) ++ ++ def test_update_3_to_2_devices(self): ++ self.assert_command_success( ++ devices_before=DEVICES_3, ++ devices_updated=DEVICES_2, ++ ) ++ ++ def test_update_add_2_to_2_remove_1(self): ++ self.assert_command_success( ++ devices_before=[DEV_1, DEV_2], ++ devices_updated=[DEV_2, DEV_3, DEV_4], ++ unfence=[DEV_3, DEV_4], ++ ) ++ ++ def test_default_monitor(self): ++ self.assert_command_success(unfence=[DEV_2]) ++ ++ def test_no_monitor_ops(self): ++ self.assert_command_success( ++ unfence=[DEV_2], ++ resource_ops=(), ++ lrm_monitor_ops=(), ++ lrm_monitor_ops_updated=(), ++ ) ++ ++ def test_1_monitor_with_timeout(self): ++ self.assert_command_success( ++ unfence=[DEV_2], ++ resource_ops=(("monitor", "30s", "10s", None),), ++ lrm_monitor_ops=(("30000", DEFAULT_DIGEST, None, None),), ++ lrm_monitor_ops_updated=(("30000", ALL_DIGEST, None, None),), ++ ) ++ ++ def test_2_monitor_ops_with_timeouts(self): ++ self.assert_command_success( ++ unfence=[DEV_2], ++ resource_ops=( ++ ("monitor", "30s", "10s", None), ++ ("monitor", "40s", "20s", None), ++ ), ++ lrm_monitor_ops=( ++ ("30000", DEFAULT_DIGEST, None, None), ++ ("40000", DEFAULT_DIGEST, None, None), ++ ), ++ lrm_monitor_ops_updated=( ++ ("30000", ALL_DIGEST, None, None), ++ ("40000", ALL_DIGEST, None, None), ++ ), ++ ) ++ ++ def test_2_monitor_ops_with_one_timeout(self): ++ self.assert_command_success( ++ unfence=[DEV_2], ++ resource_ops=( ++ ("monitor", "30s", "10s", None), ++ ("monitor", "60s", None, None), ++ ), ++ lrm_monitor_ops=( ++ ("30000", DEFAULT_DIGEST, None, None), ++ ("60000", DEFAULT_DIGEST, None, None), ++ ), ++ lrm_monitor_ops_updated=( ++ ("30000", ALL_DIGEST, None, None), ++ ("60000", ALL_DIGEST, None, None), ++ ), ++ ) ++ ++ def test_various_start_ops_one_lrm_start_op(self): ++ self.assert_command_success( ++ unfence=[DEV_2], ++ resource_ops=( ++ ("monitor", "60s", None, None), ++ ("start", "0s", "40s", None), ++ ("start", "0s", "30s", "1"), ++ ("start", "10s", "5s", None), ++ ("start", "20s", None, None), ++ ), ++ ) ++ ++ def test_1_nonrecurring_start_op_with_timeout(self): ++ self.assert_command_success( ++ unfence=[DEV_2], ++ resource_ops=( ++ ("monitor", "60s", None, None), ++ ("start", "0s", "40s", None), ++ ), ++ ) ++ ++ ++@mock.patch.object( ++ settings, ++ "pacemaker_api_result_schema", ++ rc("pcmk_api_rng/api-result.rng"), ++) ++class TestUpdateScsiDevicesFailures(UpdateScsiDevicesFailuresMixin, TestCase): ++ def command(self, force_flags=()): ++ return lambda: stonith.update_scsi_devices( ++ self.env_assist.get_env(), ++ SCSI_STONITH_ID, ++ DEVICES_2, ++ force_flags=force_flags, ++ ) ++ ++ def test_devices_cannot_be_empty(self): ++ self.config.runner.cib.load(resources=fixture_scsi()) ++ self.config.runner.pcmk.is_resource_digests_supported() ++ self.env_assist.assert_raise_library_error( ++ lambda: stonith.update_scsi_devices( ++ self.env_assist.get_env(), SCSI_STONITH_ID, () ++ ) ++ ) ++ self.env_assist.assert_reports( ++ [ ++ fixture.error( ++ reports.codes.INVALID_OPTION_VALUE, ++ option_name="devices", ++ option_value="", ++ allowed_values=None, ++ cannot_be_empty=True, ++ forbidden_characters=None, ++ ) ++ ] ++ ) ++ ++ def test_nonexistant_id(self): ++ """ ++ lower level tested in ++ pcs_test.tier0.lib.cib.test_stonith.ValidateStonithRestartlessUpdate ++ """ ++ self.config.runner.cib.load(resources=fixture_scsi()) ++ self.config.runner.pcmk.is_resource_digests_supported() ++ self.env_assist.assert_raise_library_error( ++ lambda: stonith.update_scsi_devices( ++ self.env_assist.get_env(), "non-existent-id", DEVICES_2 ++ ) ++ ) ++ self.env_assist.assert_reports( ++ [ ++ fixture.error( ++ reports.codes.ID_NOT_FOUND, ++ id="non-existent-id", ++ expected_types=["primitive"], ++ context_type="cib", ++ context_id="", ++ ) ++ ] ++ ) ++ ++ def test_stonith_resource_is_not_running(self): ++ self.config.runner.cib.load(resources=fixture_scsi()) ++ self.config.runner.pcmk.is_resource_digests_supported() ++ self.config.runner.pcmk.load_state( ++ resources=FIXTURE_CRM_MON_RES_STOPPED, nodes=FIXTURE_CRM_MON_NODES + ) + self.env_assist.assert_raise_library_error( + lambda: stonith.update_scsi_devices( +@@ -575,8 +920,8 @@ class TestUpdateScsiDevicesFailures(TestCase): + ) + + def test_stonith_resource_is_running_on_more_than_one_node(self): +- self.config.runner.pcmk.is_resource_digests_supported() + self.config.runner.cib.load(resources=fixture_scsi()) ++ self.config.runner.pcmk.is_resource_digests_supported() + self.config.runner.pcmk.load_state( + resources=FIXTURE_CRM_MON_RES_RUNNING_2, nodes=FIXTURE_CRM_MON_NODES + ) +@@ -599,7 +944,6 @@ class TestUpdateScsiDevicesFailures(TestCase): + + def test_lrm_op_missing_digest_attributes(self): + devices = ",".join(DEVICES_2) +- self.config.runner.pcmk.is_resource_digests_supported() + self.config.runner.cib.load( + resources=fixture_scsi(), + status=_fixture_status_lrm_ops_base( +@@ -607,6 +951,7 @@ class TestUpdateScsiDevicesFailures(TestCase): + f'', + ), + ) ++ self.config.runner.pcmk.is_resource_digests_supported() + self.config.runner.pcmk.load_state( + resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES + ) +@@ -637,7 +982,6 @@ class TestUpdateScsiDevicesFailures(TestCase): + + def test_crm_resource_digests_missing(self): + devices = ",".join(DEVICES_2) +- self.config.runner.pcmk.is_resource_digests_supported() + self.config.runner.cib.load( + resources=fixture_scsi(), + status=_fixture_status_lrm_ops_base( +@@ -648,6 +992,7 @@ class TestUpdateScsiDevicesFailures(TestCase): + ), + ), + ) ++ self.config.runner.pcmk.is_resource_digests_supported() + self.config.runner.pcmk.load_state( + resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES + ) +@@ -680,11 +1025,11 @@ class TestUpdateScsiDevicesFailures(TestCase): + ) + + def test_no_lrm_start_op(self): +- self.config.runner.pcmk.is_resource_digests_supported() + self.config.runner.cib.load( + resources=fixture_scsi(), + status=_fixture_status_lrm_ops(SCSI_STONITH_ID, lrm_start_ops=()), + ) ++ self.config.runner.pcmk.is_resource_digests_supported() + self.config.runner.pcmk.load_state( + resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES + ) +@@ -705,7 +1050,6 @@ class TestUpdateScsiDevicesFailures(TestCase): + ) + + def test_monitor_ops_and_lrm_monitor_ops_do_not_match(self): +- self.config.runner.pcmk.is_resource_digests_supported() + self.config.runner.cib.load( + resources=fixture_scsi( + resource_ops=( +@@ -716,6 +1060,7 @@ class TestUpdateScsiDevicesFailures(TestCase): + ), + status=_fixture_status_lrm_ops(SCSI_STONITH_ID), + ) ++ self.config.runner.pcmk.is_resource_digests_supported() + self.config.runner.pcmk.load_state( + resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES + ) +@@ -746,13 +1091,13 @@ class TestUpdateScsiDevicesFailures(TestCase): + ) + + def test_lrm_monitor_ops_not_found(self): +- self.config.runner.pcmk.is_resource_digests_supported() + self.config.runner.cib.load( + resources=fixture_scsi( + resource_ops=(("monitor", "30s", None, None),) + ), + status=_fixture_status_lrm_ops(SCSI_STONITH_ID), + ) ++ self.config.runner.pcmk.is_resource_digests_supported() + self.config.runner.pcmk.load_state( + resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES + ) +@@ -783,371 +1128,353 @@ class TestUpdateScsiDevicesFailures(TestCase): + expected_in_processor=False, + ) + +- def test_node_missing_name_and_missing_auth_token(self): +- self.config.runner.pcmk.is_resource_digests_supported() +- self.config.runner.cib.load( +- resources=fixture_scsi(), +- status=_fixture_status_lrm_ops(SCSI_STONITH_ID), +- ) +- self.config.runner.pcmk.load_state( +- resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES +- ) +- self.config.runner.pcmk.resource_digests( +- SCSI_STONITH_ID, +- SCSI_NODE, +- name="start.op.digests", +- stdout=fixture_digests_xml( +- SCSI_STONITH_ID, SCSI_NODE, devices=",".join(DEVICES_2) +- ), +- args=["devices={}".format(",".join(DEVICES_2))], +- ) +- self.config.runner.pcmk.resource_digests( +- SCSI_STONITH_ID, +- SCSI_NODE, +- name="monitor.op.digests", +- stdout=fixture_digests_xml( +- SCSI_STONITH_ID, SCSI_NODE, devices=",".join(DEVICES_2) +- ), +- args=[ +- "devices={}".format(",".join(DEVICES_2)), +- "CRM_meta_interval=60000", +- ], +- ) +- self.config.corosync_conf.load_content( +- corosync_conf_fixture( +- self.existing_corosync_nodes +- + [[("ring0_addr", "custom_node"), ("nodeid", "5")]], +- ) +- ) +- self.config.env.set_known_nodes(self.existing_nodes[:-1]) +- self.env_assist.assert_raise_library_error( +- lambda: stonith.update_scsi_devices( +- self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2 +- ), ++ ++@mock.patch.object( ++ settings, ++ "pacemaker_api_result_schema", ++ rc("pcmk_api_rng/api-result.rng"), ++) ++class UpdateScsiDevicesAddRemove(UpdateScsiDevicesMixin, TestCase): ++ def test_add_1_to_1(self): ++ self.assert_command_success( ++ devices_before=[DEV_1], ++ devices_updated=[DEV_1, DEV_2], ++ devices_add=[DEV_2], ++ devices_remove=[], ++ unfence=[DEV_2], + ) +- self.env_assist.assert_reports( +- [ +- fixture.error( +- reports.codes.COROSYNC_CONFIG_MISSING_NAMES_OF_NODES, +- fatal=True, +- ), +- fixture.error( +- reports.codes.HOST_NOT_FOUND, +- host_list=[self.existing_nodes[-1]], +- ), +- ] ++ ++ def test_add_2_to_1(self): ++ self.assert_command_success( ++ devices_before=[DEV_1], ++ devices_updated=[DEV_1, DEV_2, DEV_3], ++ devices_add=[DEV_2, DEV_3], ++ devices_remove=[], ++ unfence=[DEV_2, DEV_3], + ) + +- def _unfence_failure_common_calls(self): +- devices = ",".join(DEVICES_2) +- self.config.runner.pcmk.is_resource_digests_supported() +- self.config.runner.cib.load( +- resources=fixture_scsi(), +- status=_fixture_status_lrm_ops(SCSI_STONITH_ID), ++ def test_add_2_to_2_and_remove_1(self): ++ self.assert_command_success( ++ devices_before=[DEV_1, DEV_2], ++ devices_updated=[DEV_2, DEV_3, DEV_4], ++ devices_add=[DEV_3, DEV_4], ++ devices_remove=[DEV_1], ++ unfence=[DEV_3, DEV_4], + ) +- self.config.runner.pcmk.load_state( +- resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES ++ ++ def test_remove_1_from_2(self): ++ self.assert_command_success( ++ devices_before=[DEV_1, DEV_2], ++ devices_updated=[DEV_2], ++ devices_add=[], ++ devices_remove=[DEV_1], + ) +- self.config.runner.pcmk.resource_digests( +- SCSI_STONITH_ID, +- SCSI_NODE, +- name="start.op.digests", +- stdout=fixture_digests_xml( +- SCSI_STONITH_ID, +- SCSI_NODE, +- devices=devices, +- ), +- args=[f"devices={devices}"], ++ ++ def test_remove_2_from_3(self): ++ self.assert_command_success( ++ devices_before=[DEV_1, DEV_2, DEV_3], ++ devices_updated=[DEV_3], ++ devices_add=[], ++ devices_remove=[DEV_2, DEV_1], + ) +- self.config.runner.pcmk.resource_digests( +- SCSI_STONITH_ID, +- SCSI_NODE, +- name="monitor.op.digests", +- stdout=fixture_digests_xml( +- SCSI_STONITH_ID, +- SCSI_NODE, +- devices=devices, +- ), +- args=[ +- f"devices={devices}", +- "CRM_meta_interval=60000", +- ], ++ ++ def test_remove_2_from_3_add_1(self): ++ self.assert_command_success( ++ devices_before=[DEV_1, DEV_2, DEV_3], ++ devices_updated=[DEV_3, DEV_4], ++ devices_add=[DEV_4], ++ devices_remove=[DEV_2, DEV_1], ++ unfence=[DEV_4], + ) +- self.config.corosync_conf.load_content( +- corosync_conf_fixture(self.existing_corosync_nodes) ++ ++ def test_add_1_remove_1(self): ++ self.assert_command_success( ++ devices_before=[DEV_1, DEV_2], ++ devices_updated=[DEV_2, DEV_3], ++ devices_add=[DEV_3], ++ devices_remove=[DEV_1], ++ unfence=[DEV_3], + ) + +- def test_unfence_failure_unable_to_connect(self): +- self._unfence_failure_common_calls() +- self.config.http.corosync.get_corosync_online_targets( +- node_labels=self.existing_nodes ++ def test_add_2_remove_2(self): ++ self.assert_command_success( ++ devices_before=[DEV_1, DEV_2], ++ devices_updated=[DEV_3, DEV_4], ++ devices_add=[DEV_3, DEV_4], ++ devices_remove=[DEV_1, DEV_2], ++ unfence=[DEV_3, DEV_4], + ) +- self.config.http.scsi.unfence_node( +- DEVICES_2, +- communication_list=[ +- dict( +- label=self.existing_nodes[0], +- raw_data=json.dumps( +- dict(devices=DEVICES_2, node=self.existing_nodes[0]) +- ), +- was_connected=False, +- error_msg="errA", +- ), +- dict( +- label=self.existing_nodes[1], +- raw_data=json.dumps( +- dict(devices=DEVICES_2, node=self.existing_nodes[1]) +- ), +- output=json.dumps( +- dto.to_dict( +- communication.dto.InternalCommunicationResultDto( +- status=communication.const.COM_STATUS_ERROR, +- status_msg="error", +- report_list=[ +- reports.ReportItem.error( +- reports.messages.StonithUnfencingFailed( +- "errB" +- ) +- ).to_dto() +- ], +- data=None, +- ) +- ) +- ), +- ), +- dict( +- label=self.existing_nodes[2], +- raw_data=json.dumps( +- dict(devices=DEVICES_2, node=self.existing_nodes[2]) +- ), +- ), +- ], ++ ++ ++@mock.patch.object( ++ settings, ++ "pacemaker_api_result_schema", ++ rc("pcmk_api_rng/api-result.rng"), ++) ++class TestUpdateScsiDevicesAddRemoveFailures( ++ UpdateScsiDevicesFailuresMixin, TestCase ++): ++ def command(self, force_flags=()): ++ return lambda: stonith.update_scsi_devices_add_remove( ++ self.env_assist.get_env(), ++ SCSI_STONITH_ID, ++ [DEV_2], ++ [], ++ force_flags=force_flags, + ) ++ ++ def test_add_remove_are_empty(self): ++ """ ++ lower level tested in ++ pcs_test/tier0/lib/test_validate.ValidateAddRemoveItems ++ """ ++ self.config.runner.cib.load(resources=fixture_scsi()) ++ self.config.runner.pcmk.is_resource_digests_supported() + self.env_assist.assert_raise_library_error( +- lambda: stonith.update_scsi_devices( +- self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2 +- ), ++ lambda: stonith.update_scsi_devices_add_remove( ++ self.env_assist.get_env(), SCSI_STONITH_ID, (), () ++ ) + ) + self.env_assist.assert_reports( + [ + fixture.error( +- reports.codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, +- node=self.existing_nodes[0], +- command="api/v1/scsi-unfence-node/v1", +- reason="errA", +- ), ++ reports.codes.ADD_REMOVE_ITEMS_NOT_SPECIFIED, ++ container_type=reports.const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE, ++ item_type="device", ++ container_id=SCSI_STONITH_ID, ++ ) ++ ] ++ ) ++ ++ def test_not_supported_resource_type(self): ++ """ ++ lower level tested in ++ pcs_test.tier0.lib.cib.test_stonith.ValidateStonithRestartlessUpdate ++ """ ++ self.config.runner.cib.load(resources=fixture_scsi()) ++ self.config.runner.pcmk.is_resource_digests_supported() ++ self.env_assist.assert_raise_library_error( ++ lambda: stonith.update_scsi_devices_add_remove( ++ self.env_assist.get_env(), "dummy", [DEV_2], [DEV_1] ++ ) ++ ) ++ self.env_assist.assert_reports( ++ [ + fixture.error( +- reports.codes.STONITH_UNFENCING_FAILED, +- reason="errB", +- context=reports.dto.ReportItemContextDto( +- node=self.existing_nodes[1], +- ), +- ), ++ reports.codes.STONITH_RESTARTLESS_UPDATE_UNSUPPORTED_AGENT, ++ resource_id="dummy", ++ resource_type="Dummy", ++ supported_stonith_types=["fence_scsi"], ++ ) + ] + ) + +- def test_unfence_failure_agent_script_failed(self): +- self._unfence_failure_common_calls() +- self.config.http.corosync.get_corosync_online_targets( +- node_labels=self.existing_nodes ++ def test_stonith_resource_is_running_on_more_than_one_node(self): ++ self.config.runner.cib.load(resources=fixture_scsi()) ++ self.config.runner.pcmk.is_resource_digests_supported() ++ self.config.runner.pcmk.load_state( ++ resources=FIXTURE_CRM_MON_RES_RUNNING_2, nodes=FIXTURE_CRM_MON_NODES + ) +- self.config.http.scsi.unfence_node( +- DEVICES_2, +- communication_list=[ +- dict( +- label=self.existing_nodes[0], +- raw_data=json.dumps( +- dict(devices=DEVICES_2, node=self.existing_nodes[0]) +- ), +- ), +- dict( +- label=self.existing_nodes[1], +- raw_data=json.dumps( +- dict(devices=DEVICES_2, node=self.existing_nodes[1]) +- ), +- output=json.dumps( +- dto.to_dict( +- communication.dto.InternalCommunicationResultDto( +- status=communication.const.COM_STATUS_ERROR, +- status_msg="error", +- report_list=[ +- reports.ReportItem.error( +- reports.messages.StonithUnfencingFailed( +- "errB" +- ) +- ).to_dto() +- ], +- data=None, +- ) +- ) +- ), +- ), +- dict( +- label=self.existing_nodes[2], +- raw_data=json.dumps( +- dict(devices=DEVICES_2, node=self.existing_nodes[2]) ++ self.env_assist.assert_raise_library_error( ++ lambda: stonith.update_scsi_devices_add_remove( ++ self.env_assist.get_env(), SCSI_STONITH_ID, [DEV_2], [] ++ ), ++ [ ++ fixture.error( ++ reports.codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM, ++ reason=( ++ f"resource '{SCSI_STONITH_ID}' is running on more than " ++ "1 node" + ), +- ), ++ reason_type=reports.const.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_OTHER, ++ ) + ], ++ expected_in_processor=False, + ) +- self.env_assist.assert_raise_library_error( +- lambda: stonith.update_scsi_devices( +- self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2 +- ), ++ ++ ++class ValidateAddRemoveItems(TestCase): ++ CONTAINER_TYPE = ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE ++ ITEM_TYPE = ADD_REMOVE_ITEM_TYPE_DEVICE ++ CONTAINER_ID = "container_id" ++ ++ def _validate( ++ self, add, remove, current=None, adjacent=None, can_be_empty=False ++ ): ++ # pylint: disable=protected-access ++ return stonith._validate_add_remove_items( ++ add, ++ remove, ++ current, ++ self.CONTAINER_TYPE, ++ self.ITEM_TYPE, ++ self.CONTAINER_ID, ++ adjacent, ++ can_be_empty, + ) +- self.env_assist.assert_reports( ++ ++ def test_success_add_and_remove(self): ++ assert_report_item_list_equal( ++ self._validate(["a1"], ["c3"], ["b2", "c3"]), [] ++ ) ++ ++ def test_success_add_only(self): ++ assert_report_item_list_equal(self._validate(["b2"], [], ["a1"]), []) ++ ++ def test_success_remove_only(self): ++ assert_report_item_list_equal( ++ self._validate([], ["b2"], ["a1", "b2"]), [] ++ ) ++ ++ def test_add_remove_items_not_specified(self): ++ assert_report_item_list_equal( ++ self._validate([], [], ["a1", "b2", "c3"]), + [ + fixture.error( +- reports.codes.STONITH_UNFENCING_FAILED, +- reason="errB", +- context=reports.dto.ReportItemContextDto( +- node=self.existing_nodes[1], +- ), +- ), +- ] ++ reports.codes.ADD_REMOVE_ITEMS_NOT_SPECIFIED, ++ container_type=self.CONTAINER_TYPE, ++ item_type=self.ITEM_TYPE, ++ container_id=self.CONTAINER_ID, ++ ) ++ ], + ) + +- def test_corosync_targets_unable_to_connect(self): +- self._unfence_failure_common_calls() +- self.config.http.corosync.get_corosync_online_targets( +- communication_list=[ +- dict( +- label=self.existing_nodes[0], +- output='{"corosync":true}', +- ), +- ] +- + [ +- dict( +- label=node, +- was_connected=False, +- errno=7, +- error_msg="an error", ++ def test_add_remove_items_duplications(self): ++ assert_report_item_list_equal( ++ self._validate(["b2", "b2"], ["a1", "a1"], ["a1", "c3"]), ++ [ ++ fixture.error( ++ reports.codes.ADD_REMOVE_ITEMS_DUPLICATION, ++ container_type=self.CONTAINER_TYPE, ++ item_type=self.ITEM_TYPE, ++ container_id=self.CONTAINER_ID, ++ duplicate_items_list=["a1", "b2"], + ) +- for node in self.existing_nodes[1:] +- ] ++ ], + ) +- self.env_assist.assert_raise_library_error( +- lambda: stonith.update_scsi_devices( +- self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2 +- ), ++ ++ def test_add_items_already_in_container(self): ++ assert_report_item_list_equal( ++ self._validate(["a1", "b2"], [], ["a1", "b2", "c3"]), ++ [ ++ fixture.error( ++ reports.codes.ADD_REMOVE_CANNOT_ADD_ITEMS_ALREADY_IN_THE_CONTAINER, ++ container_type=self.CONTAINER_TYPE, ++ item_type=self.ITEM_TYPE, ++ container_id=self.CONTAINER_ID, ++ item_list=["a1", "b2"], ++ ), ++ ], + ) +- self.env_assist.assert_reports( ++ ++ def test_remove_items_not_in_container(self): ++ assert_report_item_list_equal( ++ self._validate([], ["a1", "b2"], ["c3"]), + [ + fixture.error( +- reports.codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, +- force_code=reports.codes.SKIP_OFFLINE_NODES, +- node=node, +- command="remote/status", +- reason="an error", ++ reports.codes.ADD_REMOVE_CANNOT_REMOVE_ITEMS_NOT_IN_THE_CONTAINER, ++ container_type=self.CONTAINER_TYPE, ++ item_type=self.ITEM_TYPE, ++ container_id=self.CONTAINER_ID, ++ item_list=["a1", "b2"], + ) +- for node in self.existing_nodes[1:] +- ] ++ ], + ) + +- def test_corosync_targets_skip_offline_unfence_node_running_corosync( +- self, +- ): +- self._unfence_failure_common_calls() +- self.config.http.corosync.get_corosync_online_targets( +- communication_list=[ +- dict( +- label=self.existing_nodes[0], +- output='{"corosync":true}', ++ def test_add_remove_items_at_the_same_time(self): ++ assert_report_item_list_equal( ++ self._validate( ++ ["a1", "a1", "b2", "b2"], ["b2", "b2", "a1", "a1"], ["c3"] ++ ), ++ [ ++ fixture.error( ++ reports.codes.ADD_REMOVE_ITEMS_DUPLICATION, ++ container_type=self.CONTAINER_TYPE, ++ item_type=self.ITEM_TYPE, ++ container_id=self.CONTAINER_ID, ++ duplicate_items_list=["a1", "b2"], + ), +- dict( +- label=self.existing_nodes[1], +- output='{"corosync":false}', ++ fixture.error( ++ reports.codes.ADD_REMOVE_CANNOT_REMOVE_ITEMS_NOT_IN_THE_CONTAINER, ++ container_type=self.CONTAINER_TYPE, ++ item_type=self.ITEM_TYPE, ++ container_id=self.CONTAINER_ID, ++ item_list=["a1", "b2"], + ), +- dict( +- label=self.existing_nodes[2], +- was_connected=False, +- errno=7, +- error_msg="an error", ++ fixture.error( ++ reports.codes.ADD_REMOVE_CANNOT_ADD_AND_REMOVE_ITEMS_AT_THE_SAME_TIME, ++ container_type=self.CONTAINER_TYPE, ++ item_type=self.ITEM_TYPE, ++ container_id=self.CONTAINER_ID, ++ item_list=["a1", "b2"], + ), +- ] ++ ], + ) +- self.config.http.scsi.unfence_node( +- DEVICES_2, +- communication_list=[ +- dict( +- label=self.existing_nodes[0], +- raw_data=json.dumps( +- dict(devices=DEVICES_2, node=self.existing_nodes[0]) +- ), ++ ++ def test_remove_all_items(self): ++ assert_report_item_list_equal( ++ self._validate([], ["a1", "b2"], ["a1", "b2"]), ++ [ ++ fixture.error( ++ reports.codes.ADD_REMOVE_CANNOT_REMOVE_ALL_ITEMS_FROM_THE_CONTAINER, ++ container_type=self.CONTAINER_TYPE, ++ item_type=self.ITEM_TYPE, ++ container_id=self.CONTAINER_ID, ++ item_list=["a1", "b2"], + ), + ], + ) +- self.config.env.push_cib( +- resources=fixture_scsi(devices=DEVICES_2), +- status=_fixture_status_lrm_ops( +- SCSI_STONITH_ID, +- lrm_start_ops=DEFAULT_LRM_START_OPS_UPDATED, +- lrm_monitor_ops=DEFAULT_LRM_MONITOR_OPS_UPDATED, +- ), ++ ++ def test_remove_all_items_can_be_empty(self): ++ assert_report_item_list_equal( ++ self._validate([], ["a1", "b2"], ["a1", "b2"], can_be_empty=True), ++ [], + ) +- stonith.update_scsi_devices( +- self.env_assist.get_env(), +- SCSI_STONITH_ID, +- DEVICES_2, +- force_flags=[reports.codes.SKIP_OFFLINE_NODES], ++ ++ def test_remove_all_items_and_add_new_one(self): ++ assert_report_item_list_equal( ++ self._validate(["c3"], ["a1", "b2"], ["a1", "b2"]), ++ [], + ) +- self.env_assist.assert_reports( ++ ++ def test_missing_adjacent_item(self): ++ assert_report_item_list_equal( ++ self._validate(["a1", "b2"], [], ["c3"], adjacent="d4"), + [ +- fixture.warn( +- reports.codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, +- node=self.existing_nodes[2], +- command="remote/status", +- reason="an error", ++ fixture.error( ++ reports.codes.ADD_REMOVE_ADJACENT_ITEM_NOT_IN_THE_CONTAINER, ++ container_type=self.CONTAINER_TYPE, ++ item_type=self.ITEM_TYPE, ++ container_id=self.CONTAINER_ID, ++ adjacent_item_id="d4", + ), +- ] ++ ], + ) + +- def test_corosync_targets_unable_to_perform_unfencing_operation( +- self, +- ): +- self._unfence_failure_common_calls() +- self.config.http.corosync.get_corosync_online_targets( +- communication_list=[ +- dict( +- label=self.existing_nodes[0], +- was_connected=False, +- errno=7, +- error_msg="an error", +- ), +- dict( +- label=self.existing_nodes[1], +- was_connected=False, +- errno=7, +- error_msg="an error", +- ), +- dict( +- label=self.existing_nodes[2], +- output='{"corosync":false}', ++ def test_adjacent_item_in_add_list(self): ++ assert_report_item_list_equal( ++ self._validate(["a1", "b2"], [], ["a1"], adjacent="a1"), ++ [ ++ fixture.error( ++ reports.codes.ADD_REMOVE_CANNOT_PUT_ITEM_NEXT_TO_ITSELF, ++ container_type=self.CONTAINER_TYPE, ++ item_type=self.ITEM_TYPE, ++ container_id=self.CONTAINER_ID, ++ adjacent_item_id="a1", + ), +- ] +- ) +- self.config.http.scsi.unfence_node(DEVICES_2, communication_list=[]) +- self.env_assist.assert_raise_library_error( +- lambda: stonith.update_scsi_devices( +- self.env_assist.get_env(), +- SCSI_STONITH_ID, +- DEVICES_2, +- force_flags=[reports.codes.SKIP_OFFLINE_NODES], +- ), ++ ], + ) +- self.env_assist.assert_reports( ++ ++ def test_adjacent_item_without_add_list(self): ++ assert_report_item_list_equal( ++ self._validate([], ["b2"], ["a1", "b2"], adjacent="a1"), + [ +- fixture.warn( +- reports.codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, +- node=node, +- command="remote/status", +- reason="an error", +- ) +- for node in self.existing_nodes[0:2] +- ] +- + [ + fixture.error( +- reports.codes.UNABLE_TO_PERFORM_OPERATION_ON_ANY_NODE, ++ reports.codes.ADD_REMOVE_CANNOT_SPECIFY_ADJACENT_ITEM_WITHOUT_ITEMS_TO_ADD, ++ container_type=self.CONTAINER_TYPE, ++ item_type=self.ITEM_TYPE, ++ container_id=self.CONTAINER_ID, ++ adjacent_item_id="a1", + ), +- ] ++ ], + ) +diff --git a/pcsd/capabilities.xml b/pcsd/capabilities.xml +index 745b05ad..58ebcf0f 100644 +--- a/pcsd/capabilities.xml ++++ b/pcsd/capabilities.xml +@@ -1884,6 +1884,14 @@ + pcs commands: stonith update-scsi-devices + + ++ ++ ++ Update scsi fencing devices without affecting other resources using ++ add/remove cli syntax. ++ ++ pcs commands: stonith update-scsi-devices ++ ++ + + + Unfence scsi devices on a cluster node. +-- +2.31.1 + diff --git a/SOURCES/bz1998454-01-fix-creating-resources-with-depth-operation-attribut.patch b/SOURCES/bz1998454-01-fix-creating-resources-with-depth-operation-attribut.patch new file mode 100644 index 0000000..4616131 --- /dev/null +++ b/SOURCES/bz1998454-01-fix-creating-resources-with-depth-operation-attribut.patch @@ -0,0 +1,45 @@ +From 189c73e31f5033413fc4483e40d0bfc78d77f962 Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek +Date: Fri, 27 Aug 2021 12:05:18 +0200 +Subject: [PATCH 1/2] fix creating resources with depth operation attribute + +--- + CHANGELOG.md | 9 +++++++++ + pcs/lib/cib/resource/operations.py | 2 +- + 2 files changed, 10 insertions(+), 1 deletion(-) + +diff --git a/CHANGELOG.md b/CHANGELOG.md +index f768cc36..c15546ba 100644 +--- a/CHANGELOG.md ++++ b/CHANGELOG.md +@@ -1,5 +1,14 @@ + # Change Log + ++## [Unreleased] ++ ++### Fixed ++- Fixed an error when creating a resource which defines 'depth' attribute for ++ its operations ([rhbz#1998454]) ++ ++[rhbz#1998454]: https://bugzilla.redhat.com/show_bug.cgi?id=1998454 ++ ++ + ## [0.10.10] - 2021-08-19 + + ### Added +diff --git a/pcs/lib/cib/resource/operations.py b/pcs/lib/cib/resource/operations.py +index 390db71a..44b2e7dd 100644 +--- a/pcs/lib/cib/resource/operations.py ++++ b/pcs/lib/cib/resource/operations.py +@@ -197,7 +197,7 @@ def _action_dto_to_dict( + ) -> Dict[str, str]: + result = dict( + filter( +- lambda item: item[0] != "deph" and item[1] not in (None, ""), ++ lambda item: item[0] != "depth" and item[1] not in (None, ""), + to_dict(dto).items(), + ) + ) +-- +2.31.1 + diff --git a/SOURCES/do-not-support-cluster-setup-with-udp-u-transport.patch b/SOURCES/do-not-support-cluster-setup-with-udp-u-transport.patch index a0a7aab..a23583a 100644 --- a/SOURCES/do-not-support-cluster-setup-with-udp-u-transport.patch +++ b/SOURCES/do-not-support-cluster-setup-with-udp-u-transport.patch @@ -1,19 +1,19 @@ -From ab9fd9f223e805247319ac5a7318c15417197a0a Mon Sep 17 00:00:00 2001 +From e46d60cb36cb8ca4b153f75caa20b165945b1d26 Mon Sep 17 00:00:00 2001 From: Ivan Devat Date: Tue, 20 Nov 2018 15:03:56 +0100 -Subject: [PATCH] do not support cluster setup with udp(u) transport +Subject: [PATCH 2/2] do not support cluster setup with udp(u) transport --- - pcs/pcs.8 | 2 ++ + pcs/pcs.8.in | 2 ++ pcs/usage.py | 1 + pcsd/public/css/style.css | 3 +++ 3 files changed, 6 insertions(+) -diff --git a/pcs/pcs.8 b/pcs/pcs.8 -index edfdd039..8caf087f 100644 ---- a/pcs/pcs.8 -+++ b/pcs/pcs.8 -@@ -424,6 +424,8 @@ By default, encryption is enabled with cipher=aes256 and hash=sha256. To disable +diff --git a/pcs/pcs.8.in b/pcs/pcs.8.in +index 1695d75c..80d165fc 100644 +--- a/pcs/pcs.8.in ++++ b/pcs/pcs.8.in +@@ -429,6 +429,8 @@ By default, encryption is enabled with cipher=aes256 and hash=sha256. To disable Transports udp and udpu: .br @@ -23,10 +23,10 @@ index edfdd039..8caf087f 100644 .br Transport options are: ip_version, netmtu diff --git a/pcs/usage.py b/pcs/usage.py -index baedb347..f576eaf2 100644 +index 66e097f1..783d926d 100644 --- a/pcs/usage.py +++ b/pcs/usage.py -@@ -852,6 +852,7 @@ Commands: +@@ -872,6 +872,7 @@ Commands: hash=sha256. To disable encryption, set cipher=none and hash=none. Transports udp and udpu: @@ -35,7 +35,7 @@ index baedb347..f576eaf2 100644 support traffic encryption nor compression. Transport options are: diff --git a/pcsd/public/css/style.css b/pcsd/public/css/style.css -index b857cbae..b8d48d92 100644 +index 2f26e831..a7702ac4 100644 --- a/pcsd/public/css/style.css +++ b/pcsd/public/css/style.css @@ -949,6 +949,9 @@ table.args-table td.reg { @@ -49,5 +49,5 @@ index b857cbae..b8d48d92 100644 #csetup-transport-options.knet .without-knet { -- -2.26.2 +2.31.1 diff --git a/SOURCES/pcsd-bundle-config-2 b/SOURCES/pcsd-bundle-config-2 deleted file mode 100644 index c067a62..0000000 --- a/SOURCES/pcsd-bundle-config-2 +++ /dev/null @@ -1,5 +0,0 @@ ---- -BUNDLE_FROZEN: '1' -BUNDLE_PATH: vendor/bundle -BUNDLE_DISABLE_SHARED_GEMS: '1' -BUNDLE_BUILD: --with-ldflags="-Wl,-z,now -Wl,-z,relro" diff --git a/SPECS/pcs.spec b/SPECS/pcs.spec index 9725456..aa28ddc 100644 --- a/SPECS/pcs.spec +++ b/SPECS/pcs.spec @@ -1,6 +1,6 @@ Name: pcs -Version: 0.10.8 -Release: 1%{?dist} +Version: 0.10.10 +Release: 4%{?dist} # https://docs.fedoraproject.org/en-US/packaging-guidelines/LicensingGuidelines/ # https://fedoraproject.org/wiki/Licensing:Main?rd=Licensing#Good_Licenses # GPLv2: pcs @@ -20,13 +20,13 @@ Summary: Pacemaker Configuration System ExclusiveArch: i686 x86_64 s390x ppc64le aarch64 %global version_or_commit %{version} -# %%global version_or_commit 508b3999eb02b4901e83b8e780af8422b522ad30 +# %%global version_or_commit %%{version}.210-9862 %global pcs_source_name %{name}-%{version_or_commit} # ui_commit can be determined by hash, tag or branch -%global ui_commit 0.1.5 -%global ui_modules_version 0.1.5 +%global ui_commit 0.1.7 +%global ui_modules_version 0.1.7 %global ui_src_name pcs-web-ui-%{ui_commit} %global pcs_snmp_pkg_name pcs-snmp @@ -47,6 +47,7 @@ ExclusiveArch: i686 x86_64 s390x ppc64le aarch64 %global version_rubygem_rack 2.2.3 %global version_rubygem_rack_protection 2.0.8.1 %global version_rubygem_rack_test 1.1.0 +%global version_rubygem_rexml 3.2.5 %global version_rubygem_ruby2_keywords 0.0.2 %global version_rubygem_sinatra 2.0.8.1 %global version_rubygem_thin 1.7.2 @@ -55,20 +56,13 @@ ExclusiveArch: i686 x86_64 s390x ppc64le aarch64 # javascript bundled libraries for old web-ui %global ember_version 1.4.0 %global handlebars_version 1.2.1 -%global jquery_ui_version 1.10.1 -%global jquery_version 1.9.1 - -# We do not use _libdir macro because upstream is not prepared for it. -# Pcs does not include binaries and thus it should live in /usr/lib. Tornado -# and gems include binaries and thus it should live in /usr/lib64. But the -# path to tornado/gems is hardcoded in pcs sources. Modify hard links in pcs -# sources is not the way since then rpmdiff complains that the same file has -# different content in different architectures. -%global pcs_libdir %{_prefix}/lib -%global bundled_src_dir pcs/bundled +%global jquery_ui_version 1.12.1 +%global jquery_version 3.6.0 + +%global pcs_bundled_dir pcs_bundled %global pcsd_public_dir pcsd/public -%global rubygem_cache_dir pcsd/vendor/cache -%global rubygem_bundle_dir pcsd/vendor/bundle/ruby +%global rubygem_bundle_dir pcsd/vendor/bundle +%global rubygem_cache_dir %{rubygem_bundle_dir}/cache # mangling shebang in /usr/lib/pcsd/vendor/bundle/ruby/gems/rack-2.0.5/test/cgi/test from /usr/bin/env ruby to #!/usr/bin/ruby #*** ERROR: ./usr/lib/pcsd/vendor/bundle/ruby/gems/rack-2.0.5/test/cgi/test.ru has shebang which doesn't start with '/' (../../bin/rackup) @@ -87,7 +81,6 @@ ExclusiveArch: i686 x86_64 s390x ppc64le aarch64 Source0: %{url}/archive/%{version_or_commit}/%{pcs_source_name}.tar.gz Source1: HAM-logo.png -Source2: pcsd-bundle-config-2 Source41: https://github.com/ondrejmular/pyagentx/archive/v%{pyagentx_version}/pyagentx-%{pyagentx_version}.tar.gz Source42: https://github.com/tornadoweb/tornado/archive/v%{tornado_version}/tornado-%{tornado_version}.tar.gz @@ -99,6 +92,7 @@ Source81: https://rubygems.org/downloads/backports-%{version_rubygem_backports}. Source82: https://rubygems.org/downloads/ethon-%{version_rubygem_ethon}.gem Source83: https://rubygems.org/downloads/ffi-%{version_rubygem_ffi}.gem Source84: https://rubygems.org/downloads/json-%{version_rubygem_json}.gem +Source85: https://rubygems.org/downloads/rexml-%{version_rubygem_rexml}.gem Source86: https://rubygems.org/downloads/mustermann-%{version_rubygem_mustermann}.gem # We needed to re-upload open4 rubygem because of issues with sources in gating. # Unfortunately, there was no newer version available, therefore we had to @@ -114,14 +108,18 @@ Source94: https://rubygems.org/downloads/daemons-%{version_rubygem_daemons}.gem Source95: https://rubygems.org/downloads/thin-%{version_rubygem_thin}.gem Source96: https://rubygems.org/downloads/ruby2_keywords-%{version_rubygem_ruby2_keywords}.gem -Source100: https://github.com/idevat/pcs-web-ui/archive/%{ui_commit}/%{ui_src_name}.tar.gz -Source101: https://github.com/idevat/pcs-web-ui/releases/download/%{ui_modules_version}/pcs-web-ui-node-modules-%{ui_modules_version}.tar.xz +Source100: https://github.com/ClusterLabs/pcs-web-ui/archive/%{ui_commit}/%{ui_src_name}.tar.gz +Source101: https://github.com/ClusterLabs/pcs-web-ui/releases/download/%{ui_modules_version}/pcs-web-ui-node-modules-%{ui_modules_version}.tar.xz # Patches from upstream. # They should come before downstream patches to avoid unnecessary conflicts. # Z-streams are exception here: they can come from upstream but should be # applied at the end to keep z-stream changes as straightforward as possible. # Patch1: bzNUMBER-01-name.patch +Patch1: bz1998454-01-fix-creating-resources-with-depth-operation-attribut.patch +Patch2: add-missing-file-test_stonith_update_scsi_devices.py.patch +Patch3: bz1992668-01-add-add-remove-syntax-for-command-pcs-stonith-update.patch +Patch4: bz1991654-01-fix-unfencing-in-pcs-stonith-update-scsi-devices.patch # Downstream patches do not come from upstream. They adapt pcs for specific # RHEL needs. @@ -136,7 +134,12 @@ BuildRequires: platform-python BuildRequires: python3-devel BuildRequires: platform-python-setuptools BuildRequires: python3-pycurl +BuildRequires: python3-pip BuildRequires: python3-pyparsing +BuildRequires: python3-cryptography +BuildRequires: python3-lxml +# for building bundled python packages +BuildRequires: python3-wheel # for bundled python dateutil BuildRequires: python3-setuptools_scm # gcc for compiling custom rubygems @@ -146,15 +149,13 @@ BuildRequires: gcc-c++ BuildRequires: ruby >= 2.2.0 BuildRequires: ruby-devel BuildRequires: rubygems +BuildRequires: rubygem-bundler # ruby libraries for tests BuildRequires: rubygem-test-unit # for touching patch files (sanitization function) BuildRequires: diffstat # for post, preun and postun macros BuildRequires: systemd -# for tests -BuildRequires: python3-lxml -BuildRequires: python3-pyOpenSSL # pcsd fonts and font management tools for creating symlinks to fonts BuildRequires: fontconfig BuildRequires: liberation-sans-fonts @@ -166,6 +167,15 @@ BuildRequires: redhat-logos # for building web ui BuildRequires: npm +# cluster stack packages for pkg-config +BuildRequires: booth +BuildRequires: corosync-qdevice-devel +BuildRequires: corosynclib-devel >= 3.0 +BuildRequires: fence-agents-common +BuildRequires: pacemaker-libs-devel >= 2.0.0 +BuildRequires: resource-agents +BuildRequires: sbd + # python and libraries for pcs, setuptools for pcs entrypoint Requires: platform-python Requires: python3-lxml @@ -173,14 +183,12 @@ Requires: platform-python-setuptools Requires: python3-clufter => 0.70.0 Requires: python3-pycurl Requires: python3-pyparsing +Requires: python3-cryptography # ruby and gems for pcsd Requires: ruby >= 2.2.0 Requires: rubygems # for killall Requires: psmisc -# for working with certificates (validation etc.) -Requires: openssl -Requires: python3-pyOpenSSL # cluster stack and related packages Requires: pcmk-cluster-manager >= 2.0.0 Suggests: pacemaker @@ -218,6 +226,7 @@ Provides: bundled(open4) = %{version_rubygem_open4} Provides: bundled(rack) = %{version_rubygem_rack} Provides: bundled(rack_protection) = %{version_rubygem_rack_protection} Provides: bundled(rack_test) = %{version_rubygem_rack_test} +Provides: bundled(rexml) = %{version_rubygem_rexml} Provides: bundled(ruby2_keywords) = %{version_rubygem_ruby2_keywords} Provides: bundled(sinatra) = %{version_rubygem_sinatra} Provides: bundled(thin) = %{version_rubygem_thin} @@ -297,10 +306,13 @@ update_times_patch(){ update_times ${patch_file_name} `diffstat -p1 -l ${patch_file_name}` } -# update_times_patch %%{PATCH1} +update_times_patch %{PATCH1} +update_times_patch %{PATCH2} +update_times_patch %{PATCH3} +update_times_patch %{PATCH4} update_times_patch %{PATCH101} -cp -f %SOURCE1 pcsd/public/images +cp -f %SOURCE1 %{pcsd_public_dir}/images # prepare dirs/files necessary for building web ui # inside SOURCE100 is only directory %%{ui_src_name} tar -xzf %SOURCE100 -C %{pcsd_public_dir} @@ -308,104 +320,54 @@ tar -xf %SOURCE101 -C %{pcsd_public_dir}/%{ui_src_name} # prepare dirs/files necessary for building all bundles # ----------------------------------------------------- -# 1) configuration for rubygems -mkdir -p pcsd/.bundle -cp -f %SOURCE2 pcsd/.bundle/config - -# 2) rubygems sources -mkdir -p pcsd/vendor/cache -cp -f %SOURCE81 pcsd/vendor/cache -cp -f %SOURCE82 pcsd/vendor/cache -cp -f %SOURCE83 pcsd/vendor/cache -cp -f %SOURCE84 pcsd/vendor/cache -cp -f %SOURCE86 pcsd/vendor/cache +# 1) rubygems sources + +mkdir -p %{rubygem_cache_dir} +cp -f %SOURCE81 %{rubygem_cache_dir} +cp -f %SOURCE82 %{rubygem_cache_dir} +cp -f %SOURCE83 %{rubygem_cache_dir} +cp -f %SOURCE84 %{rubygem_cache_dir} +cp -f %SOURCE85 %{rubygem_cache_dir} +cp -f %SOURCE86 %{rubygem_cache_dir} # For reason why we are renaming open4 rubygem, see comment of source # definition above. -cp -f %SOURCE87 pcsd/vendor/cache/open4-%{version_rubygem_open4}.gem -cp -f %SOURCE88 pcsd/vendor/cache -cp -f %SOURCE89 pcsd/vendor/cache -cp -f %SOURCE90 pcsd/vendor/cache -cp -f %SOURCE91 pcsd/vendor/cache -cp -f %SOURCE92 pcsd/vendor/cache -cp -f %SOURCE93 pcsd/vendor/cache -cp -f %SOURCE94 pcsd/vendor/cache -cp -f %SOURCE95 pcsd/vendor/cache -cp -f %SOURCE96 pcsd/vendor/cache - - -# 3) dir for python bundles -mkdir -p %{bundled_src_dir} - -# 4) sources for pyagentx -tar -xzf %SOURCE41 -C %{bundled_src_dir} -mv %{bundled_src_dir}/pyagentx-%{pyagentx_version} %{bundled_src_dir}/pyagentx -update_times %SOURCE41 `find %{bundled_src_dir}/pyagentx -follow` -cp %{bundled_src_dir}/pyagentx/LICENSE.txt pyagentx_LICENSE.txt -cp %{bundled_src_dir}/pyagentx/CONTRIBUTORS.txt pyagentx_CONTRIBUTORS.txt -cp %{bundled_src_dir}/pyagentx/README.md pyagentx_README.md - -# 5) sources for tornado -tar -xzf %SOURCE42 -C %{bundled_src_dir} -mv %{bundled_src_dir}/tornado-%{tornado_version} %{bundled_src_dir}/tornado -update_times %SOURCE42 `find %{bundled_src_dir}/tornado -follow` -cp %{bundled_src_dir}/tornado/LICENSE tornado_LICENSE -cp %{bundled_src_dir}/tornado/README.rst tornado_README.rst - -# 6) sources for python dataclasses -tar -xzf %SOURCE43 -C %{bundled_src_dir} -mv %{bundled_src_dir}/dataclasses-%{dataclasses_version} %{bundled_src_dir}/dataclasses -update_times %SOURCE43 `find %{bundled_src_dir}/dataclasses -follow` -cp %{bundled_src_dir}/dataclasses/LICENSE.txt dataclasses_LICENSE.txt -cp %{bundled_src_dir}/dataclasses/README.rst dataclasses_README.rst - -# 7) sources for python dacite -tar -xzf %SOURCE44 -C %{bundled_src_dir} -mv %{bundled_src_dir}/dacite-%{dacite_version} %{bundled_src_dir}/dacite -update_times %SOURCE44 `find %{bundled_src_dir}/dacite -follow` -cp %{bundled_src_dir}/dacite/LICENSE dacite_LICENSE -cp %{bundled_src_dir}/dacite/README.md dacite_README.md - -# 8) sources for python dateutil -tar -xzf %SOURCE45 -C %{bundled_src_dir} -mv %{bundled_src_dir}/python-dateutil-%{dateutil_version} %{bundled_src_dir}/python-dateutil -update_times %SOURCE45 `find %{bundled_src_dir}/python-dateutil -follow` -cp %{bundled_src_dir}/python-dateutil/LICENSE dateutil_LICENSE -cp %{bundled_src_dir}/python-dateutil/README.rst dateutil_README.rst +cp -f %SOURCE87 %{rubygem_cache_dir}/open4-%{version_rubygem_open4}.gem +cp -f %SOURCE88 %{rubygem_cache_dir} +cp -f %SOURCE89 %{rubygem_cache_dir} +cp -f %SOURCE90 %{rubygem_cache_dir} +cp -f %SOURCE91 %{rubygem_cache_dir} +cp -f %SOURCE92 %{rubygem_cache_dir} +cp -f %SOURCE93 %{rubygem_cache_dir} +cp -f %SOURCE94 %{rubygem_cache_dir} +cp -f %SOURCE95 %{rubygem_cache_dir} +cp -f %SOURCE96 %{rubygem_cache_dir} + + +# 2) prepare python bundles +mkdir -p %{pcs_bundled_dir}/src +cp -f %SOURCE41 rpm/ +cp -f %SOURCE42 rpm/ +cp -f %SOURCE43 rpm/ +cp -f %SOURCE44 rpm/ +cp -f %SOURCE45 rpm/ %build %define debug_package %{nil} +./autogen.sh +%{configure} --enable-local-build --enable-use-local-cache-only --enable-individual-bundling PYTHON=%{__python3} +make all + %install rm -rf $RPM_BUILD_ROOT pwd -# build bundled rubygems (in main install it is disabled by BUILD_GEMS=false) -mkdir -p %{rubygem_bundle_dir} -# The '-g' cflags option is needed for generation of MiniDebugInfo for shared -# libraries from rubygem extensions -# Currently used rubygems with extensions: eventmachine, ffi, json, thin -# There was rpmdiff issue with missing .gnu_debugdata section -# see https://docs.engineering.redhat.com/display/HTD/rpmdiff-elf-stripping -gem install \ - --force --verbose --no-rdoc --no-ri -l --no-user-install \ - -i %{rubygem_bundle_dir} \ - %{rubygem_cache_dir}/backports-%{version_rubygem_backports}.gem \ - %{rubygem_cache_dir}/daemons-%{version_rubygem_daemons}.gem \ - %{rubygem_cache_dir}/ethon-%{version_rubygem_ethon}.gem \ - %{rubygem_cache_dir}/eventmachine-%{version_rubygem_eventmachine}.gem \ - %{rubygem_cache_dir}/ffi-%{version_rubygem_ffi}.gem \ - %{rubygem_cache_dir}/json-%{version_rubygem_json}.gem \ - %{rubygem_cache_dir}/mustermann-%{version_rubygem_mustermann}.gem \ - %{rubygem_cache_dir}/open4-%{version_rubygem_open4}.gem \ - %{rubygem_cache_dir}/rack-protection-%{version_rubygem_rack_protection}.gem \ - %{rubygem_cache_dir}/rack-test-%{version_rubygem_rack_test}.gem \ - %{rubygem_cache_dir}/rack-%{version_rubygem_rack}.gem \ - %{rubygem_cache_dir}/ruby2_keywords-%{version_rubygem_ruby2_keywords}.gem \ - %{rubygem_cache_dir}/sinatra-%{version_rubygem_sinatra}.gem \ - %{rubygem_cache_dir}/thin-%{version_rubygem_thin}.gem \ - %{rubygem_cache_dir}/tilt-%{version_rubygem_tilt}.gem \ - -- '--with-ldflags="-Wl,-z,relro -Wl,-z,ibt -Wl,-z,now -Wl,--gc-sections"' \ - '--with-cflags="-g -O2 -ffunction-sections"' +%make_install + +# build web ui and put it to pcsd +make -C %{pcsd_public_dir}/%{ui_src_name} build +mv %{pcsd_public_dir}/%{ui_src_name}/build ${RPM_BUILD_ROOT}%{_libdir}/%{pcsd_public_dir}/ui +rm -r %{pcsd_public_dir}/%{ui_src_name} # prepare license files # some rubygems do not have a license file (ruby2_keywords, thin) @@ -426,36 +388,25 @@ mv %{rubygem_bundle_dir}/gems/rack-test-%{version_rubygem_rack_test}/MIT-LICENSE mv %{rubygem_bundle_dir}/gems/sinatra-%{version_rubygem_sinatra}/LICENSE sinatra_LICENSE mv %{rubygem_bundle_dir}/gems/tilt-%{version_rubygem_tilt}/COPYING tilt_COPYING -# build web ui and put it to pcsd -make -C %{pcsd_public_dir}/%{ui_src_name} build -mv %{pcsd_public_dir}/%{ui_src_name}/build pcsd/public/ui -rm -r %{pcsd_public_dir}/%{ui_src_name} +# symlink favicon into pcsd directories +ln -fs /etc/favicon.png ${RPM_BUILD_ROOT}%{_libdir}/%{pcsd_public_dir}/images/favicon.png -# main pcs install -make install \ - DESTDIR=$RPM_BUILD_ROOT \ - PREFIX=%{_prefix} \ - SYSTEMD_UNIT_DIR=%{_unitdir} \ - LIB_DIR=%{pcs_libdir} \ - PYTHON=%{__python3} \ - PYTHON_SITELIB=%{python3_sitelib} \ - BASH_COMPLETION_DIR=%{_datadir}/bash-completion/completions \ - BUNDLE_PYAGENTX_SRC_DIR=`readlink -f %{bundled_src_dir}/pyagentx` \ - BUNDLE_TORNADO_SRC_DIR=`readlink -f %{bundled_src_dir}/tornado` \ - BUNDLE_DACITE_SRC_DIR=`readlink -f %{bundled_src_dir}/dacite` \ - BUNDLE_DATEUTIL_SRC_DIR=`readlink -f %{bundled_src_dir}/python-dateutil` \ - BUNDLE_DATACLASSES_SRC_DIR=`readlink -f %{bundled_src_dir}/dataclasses` \ - BUILD_GEMS=false \ - SYSTEMCTL_OVERRIDE=true \ - hdrdir="%{_includedir}" \ - rubyhdrdir="%{_includedir}" \ - includedir="%{_includedir}" -# symlink favicon into pcsd directories -ln -fs /etc/favicon.png ${RPM_BUILD_ROOT}%{pcs_libdir}/%{pcsd_public_dir}/images/favicon.png +cp %{pcs_bundled_dir}/src/pyagentx-*/LICENSE.txt pyagentx_LICENSE.txt +cp %{pcs_bundled_dir}/src/pyagentx-*/CONTRIBUTORS.txt pyagentx_CONTRIBUTORS.txt +cp %{pcs_bundled_dir}/src/pyagentx-*/README.md pyagentx_README.md + +cp %{pcs_bundled_dir}/src/tornado-*/LICENSE tornado_LICENSE +cp %{pcs_bundled_dir}/src/tornado-*/README.rst tornado_README.rst + +cp %{pcs_bundled_dir}/src/dataclasses-*/LICENSE.txt dataclasses_LICENSE.txt +cp %{pcs_bundled_dir}/src/dataclasses-*/README.rst dataclasses_README.rst + +cp %{pcs_bundled_dir}/src/dacite-*/LICENSE dacite_LICENSE +cp %{pcs_bundled_dir}/src/dacite-*/README.md dacite_README.md -#after the ruby gem compilation we do not need ruby gems in the cache -rm -r -v $RPM_BUILD_ROOT%{pcs_libdir}/%{rubygem_cache_dir} +cp %{pcs_bundled_dir}/src/python-dateutil-*/LICENSE dateutil_LICENSE +cp %{pcs_bundled_dir}/src/python-dateutil-*/README.rst dateutil_README.rst # We are not building debug package for pcs but we need to add MiniDebuginfo # to the bundled shared libraries from rubygem extensions in order to satisfy @@ -465,14 +416,15 @@ rm -r -v $RPM_BUILD_ROOT%{pcs_libdir}/%{rubygem_cache_dir} /usr/lib/rpm/find-debuginfo.sh -j2 -m -i -S debugsourcefiles.list # find-debuginfo.sh generated some files into /usr/lib/debug and # /usr/src/debug/ that we don't want in the package -rm -rf $RPM_BUILD_ROOT%{pcs_libdir}/debug +rm -rf $RPM_BUILD_ROOT%{_libdir}/debug +rm -rf $RPM_BUILD_ROOT/usr/lib/debug rm -rf $RPM_BUILD_ROOT%{_prefix}/src/debug # We can remove files required for gem compilation -rm -rf $RPM_BUILD_ROOT%{pcs_libdir}/%{rubygem_bundle_dir}/gems/eventmachine-%{version_rubygem_eventmachine}/ext -rm -rf $RPM_BUILD_ROOT%{pcs_libdir}/%{rubygem_bundle_dir}/gems/ffi-%{version_rubygem_ffi}/ext -rm -rf $RPM_BUILD_ROOT%{pcs_libdir}/%{rubygem_bundle_dir}/gems/json-%{version_rubygem_json}/ext -rm -rf $RPM_BUILD_ROOT%{pcs_libdir}/%{rubygem_bundle_dir}/gems/thin-%{version_rubygem_thin}/ext +rm -rf $RPM_BUILD_ROOT%{_libdir}/%{rubygem_bundle_dir}/gems/eventmachine-%{version_rubygem_eventmachine}/ext +rm -rf $RPM_BUILD_ROOT%{_libdir}/%{rubygem_bundle_dir}/gems/ffi-%{version_rubygem_ffi}/ext +rm -rf $RPM_BUILD_ROOT%{_libdir}/%{rubygem_bundle_dir}/gems/json-%{version_rubygem_json}/ext +rm -rf $RPM_BUILD_ROOT%{_libdir}/%{rubygem_bundle_dir}/gems/thin-%{version_rubygem_thin}/ext %check # In the building environment LC_CTYPE is set to C which causes tests to fail @@ -495,8 +447,7 @@ run_all_tests(){ # passing outside the mock environment. # TODO: Investigate the issue - BUNDLED_LIB_LOCATION=$RPM_BUILD_ROOT%{pcs_libdir}/pcs/bundled/packages \ - %{__python3} pcs_test/suite.py --tier0 -v --vanilla --all-but \ + %{__python3} pcs_test/suite --tier0 -v --vanilla --all-but \ pcs_test.tier0.lib.commands.test_resource_agent.DescribeAgentUtf8.test_describe \ pcs_test.tier0.daemon.app.test_app_remote.SyncConfigMutualExclusive.test_get_not_locked \ pcs_test.tier0.daemon.app.test_app_remote.SyncConfigMutualExclusive.test_post_not_locked \ @@ -504,11 +455,10 @@ run_all_tests(){ test_result_python=$? #run pcsd tests and remove them - pcsd_dir=$RPM_BUILD_ROOT%{pcs_libdir}/pcsd - GEM_HOME=$RPM_BUILD_ROOT%{pcs_libdir}/%{rubygem_bundle_dir} ruby \ - -I${pcsd_dir} \ - -I${pcsd_dir}/test \ - ${pcsd_dir}/test/test_all_suite.rb + GEM_HOME=$RPM_BUILD_ROOT%{_libdir}/%{rubygem_bundle_dir} ruby \ + -I$RPM_BUILD_ROOT%{_libdir}/pcsd \ + -Ipcsd/test \ + pcsd/test/test_all_suite.rb test_result_ruby=$? if [ $test_result_python -ne 0 ]; then @@ -518,12 +468,8 @@ run_all_tests(){ } remove_all_tests() { - pcsd_dir=$RPM_BUILD_ROOT%{pcs_libdir}/pcsd - #remove pcsd tests, we do not distribute them in the rpm - rm -r -v ${pcsd_dir}/test - # remove javascript testing files - rm -r -v ${pcsd_dir}/public/js/dev + rm -r -v $RPM_BUILD_ROOT%{_libdir}/%{pcsd_public_dir}/js/dev } run_all_tests @@ -587,19 +533,11 @@ remove_all_tests %license rack-test_MIT-LICENSE.txt %license sinatra_LICENSE %license tilt_COPYING -%{python3_sitelib}/pcs -%{python3_sitelib}/pcs-%{version}-py3.*.egg-info +%{python3_sitelib}/* %{_sbindir}/pcs %{_sbindir}/pcsd -%{pcs_libdir}/pcs/pcs_internal -%{pcs_libdir}/pcsd/* -%{pcs_libdir}/pcsd/.bundle/config -%{pcs_libdir}/pcs/bundled/packages/tornado* -%{pcs_libdir}/pcs/bundled/packages/dacite* -%{pcs_libdir}/pcs/bundled/packages/dataclasses* -%{pcs_libdir}/pcs/bundled/packages/dateutil* -%{pcs_libdir}/pcs/bundled/packages/python_dateutil* -%{pcs_libdir}/pcs/bundled/packages/__pycache__/dataclasses.cpython-36.pyc +%{_libdir}/pcs/* +%{_libdir}/pcsd/* %{_unitdir}/pcsd.service %{_unitdir}/pcsd-ruby.service %{_datadir}/bash-completion/completions/pcs @@ -617,22 +555,13 @@ remove_all_tests %ghost %config(noreplace) %attr(0644,root,root) %{_sharedstatedir}/pcsd/pcs_users.conf %{_mandir}/man8/pcs.* %{_mandir}/man8/pcsd.* -%exclude %{pcs_libdir}/pcsd/*.debian -%exclude %{pcs_libdir}/pcsd/pcsd.service -%exclude %{pcs_libdir}/pcsd/pcsd-ruby.service -%exclude %{pcs_libdir}/pcsd/pcsd.conf -%exclude %{pcs_libdir}/pcsd/pcsd.8 -%exclude %{pcs_libdir}/pcsd/public/js/dev/* -%exclude %{pcs_libdir}/pcsd/Gemfile -%exclude %{pcs_libdir}/pcsd/Gemfile.lock -%exclude %{pcs_libdir}/pcsd/Makefile -%exclude %{python3_sitelib}/pcs/bash_completion -%exclude %{python3_sitelib}/pcs/pcs.8 -%exclude %{python3_sitelib}/pcs/pcs +%exclude %{_libdir}/pcs/pcs_snmp_agent +%exclude %{_libdir}/pcs/%{pcs_bundled_dir}/packages/pyagentx* + %files -n %{pcs_snmp_pkg_name} -%{pcs_libdir}/pcs/pcs_snmp_agent -%{pcs_libdir}/pcs/bundled/packages/pyagentx* +%{_libdir}/pcs/pcs_snmp_agent +%{_libdir}/pcs/%{pcs_bundled_dir}/packages/pyagentx* %{_unitdir}/pcs_snmp_agent.service %{_datadir}/snmp/mibs/PCMK-PCS*-MIB.txt %{_mandir}/man8/pcs_snmp_agent.* @@ -644,6 +573,41 @@ remove_all_tests %license pyagentx_LICENSE.txt %changelog +* Fri Sep 24 2021 Miroslav Lisik - 0.10.10-4 +- Fixed unfencing in `pcs stonith update-scsi-devices` +- Resolves: rhbz#bz1991654 + +* Fri Sep 10 2021 Miroslav Lisik - 0.10.10-3 +- Added add/remove syntax for command `pcs stonith update-scsi-devices` +- Resolves: rhbz#1992668 + +* Fri Aug 27 2021 Miroslav Lisik - 0.10.10-2 +- Fixed create resources with depth operation attribute +- Resolves: rhbz#1998454 + +* Thu Aug 19 2021 Ondrej Mular - 0.10.10-1 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Updated pcs-web-ui +- Resolves: rhbz#1885293 rhbz#1847102 rhbz#1935594 + +* Tue Aug 10 2021 Miroslav Lisik - 0.10.9-1 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Resolves: rhbz#1432097 rhbz#1847102 rhbz#1935594 rhbz#1984901 + +* Tue Jul 20 2021 Miroslav Lisik - 0.10.8-4 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Resolves: rhbz#1759995 rhbz#1872378 rhbz#1935594 + +* Thu Jul 08 2021 Miroslav Lisik - 0.10.8-3 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Gating changes +- Resolves: rhbz#1678273 rhbz#1690419 rhbz#1750240 rhbz#1759995 rhbz#1872378 rhbz#1909901 rhbz#1935594 + +* Thu Jun 10 2021 Miroslav Lisik - 0.10.8-2 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Updated pcs-web-ui +- Resolves: rhbz#1285269 rhbz#1290830 rhbz#1720221 rhbz#1841019 rhbz#1854238 rhbz#1882291 rhbz#1885302 rhbz#1886342 rhbz#1896458 rhbz#1922996 rhbz#1927384 rhbz#1927394 rhbz#1930886 rhbz#1935594 + * Mon Feb 01 2021 Miroslav Lisik - 0.10.8-1 - Rebased to latest upstream sources (see CHANGELOG.md) - Updated pcs-web-ui