diff --git a/.gitignore b/.gitignore
index d3a23d4..4d90c03 100644
--- a/.gitignore
+++ b/.gitignore
@@ -9,9 +9,9 @@ SOURCES/ffi-1.13.1.gem
 SOURCES/json-2.3.0.gem
 SOURCES/mustermann-1.1.1.gem
 SOURCES/open4-1.3.4-1.gem
-SOURCES/pcs-0.10.10.tar.gz
-SOURCES/pcs-web-ui-0.1.7.tar.gz
-SOURCES/pcs-web-ui-node-modules-0.1.7.tar.xz
+SOURCES/pcs-0.10.12.tar.gz
+SOURCES/pcs-web-ui-0.1.12.tar.gz
+SOURCES/pcs-web-ui-node-modules-0.1.12.tar.xz
 SOURCES/pyagentx-0.4.pcs.2.tar.gz
 SOURCES/python-dateutil-2.8.1.tar.gz
 SOURCES/rack-2.2.3.gem
diff --git a/.pcs.metadata b/.pcs.metadata
index 6167c6c..b16fa68 100644
--- a/.pcs.metadata
+++ b/.pcs.metadata
@@ -9,9 +9,9 @@ cfa25e7a3760c3ec16723cb8263d9b7a52d0eadf SOURCES/ffi-1.13.1.gem
 0230e8c5a37f1543982e5b04be503dd5f9004b47 SOURCES/json-2.3.0.gem
 50a4e37904485810cb05e27d75c9783e5a8f3402 SOURCES/mustermann-1.1.1.gem
 41a7fe9f8e3e02da5ae76c821b89c5b376a97746 SOURCES/open4-1.3.4-1.gem
-a1c0585455b7e050c33598598a045ccd2776cb28 SOURCES/pcs-0.10.10.tar.gz
-b9ed12ca957c2f204ec37ea2836b924c36fab379 SOURCES/pcs-web-ui-0.1.7.tar.gz
-8824285e6f1c2807d9222d573c6e6df1e50d8410 SOURCES/pcs-web-ui-node-modules-0.1.7.tar.xz
+1937b826a36bb8396da227361d13f4c25830929c SOURCES/pcs-0.10.12.tar.gz
+a29bfd22130ac978c5d4a6a82108ce37ad2a5db9 SOURCES/pcs-web-ui-0.1.12.tar.gz
+c9723466d7bfb353899307a5700177f47e7e6cff SOURCES/pcs-web-ui-node-modules-0.1.12.tar.xz
 3176b2f2b332c2b6bf79fe882e83feecf3d3f011 SOURCES/pyagentx-0.4.pcs.2.tar.gz
 bd26127e57f83a10f656b62c46524c15aeb844dd SOURCES/python-dateutil-2.8.1.tar.gz
 345b7169d4d2d62176a225510399963bad62b68f SOURCES/rack-2.2.3.gem
diff --git a/SOURCES/add-missing-file-test_stonith_update_scsi_devices.py.patch b/SOURCES/add-missing-file-test_stonith_update_scsi_devices.py.patch
deleted file mode 100644
index 796544d..0000000
--- a/SOURCES/add-missing-file-test_stonith_update_scsi_devices.py.patch
+++ /dev/null
@@ -1,1172 +0,0 @@
-From e3f9823283517bafa8d309fb6148539e0e8ecdb2 Mon Sep 17 00:00:00 2001
-From: Miroslav Lisik <mlisik@redhat.com>
-Date: Fri, 10 Sep 2021 11:40:03 +0200
-Subject: [PATCH] add missing file test_stonith_update_scsi_devices.py
-
----
- .../test_stonith_update_scsi_devices.py       | 1153 +++++++++++++++++
- 1 file changed, 1153 insertions(+)
- create mode 100644 pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py
-
-diff --git a/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py b/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py
-new file mode 100644
-index 0000000..3bc5132
---- /dev/null
-+++ b/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py
-@@ -0,0 +1,1153 @@
-+import json
-+from unittest import mock, TestCase
-+
-+
-+from pcs_test.tools import fixture
-+from pcs_test.tools.command_env import get_env_tools
-+from pcs_test.tools.misc import get_test_resource as rc
-+
-+from pcs import settings
-+from pcs.lib.commands import stonith
-+from pcs.common import (
-+    communication,
-+    reports,
-+)
-+from pcs.common.interface import dto
-+from pcs.common.tools import timeout_to_seconds
-+
-+from .cluster.common import (
-+    corosync_conf_fixture,
-+    get_two_node,
-+    node_fixture,
-+)
-+
-+SCSI_STONITH_ID = "scsi-fence-device"
-+SCSI_NODE = "node1"
-+_DIGEST = "0" * 31
-+DEFAULT_DIGEST = _DIGEST + "0"
-+ALL_DIGEST = _DIGEST + "1"
-+NONPRIVATE_DIGEST = _DIGEST + "2"
-+NONRELOADABLE_DIGEST = _DIGEST + "3"
-+DEVICES_1 = ("/dev/sda",)
-+DEVICES_2 = ("/dev/sda", "/dev/sdb")
-+DEVICES_3 = ("/dev/sda", "/dev/sdb", "/dev/sdc")
-+
-+DEFAULT_MONITOR = ("monitor", "60s", None, None)
-+DEFAULT_OPS = (DEFAULT_MONITOR,)
-+DEFAULT_LRM_START_OPS = (("0", DEFAULT_DIGEST, None, None),)
-+DEFAULT_LRM_MONITOR_OPS = (("60000", DEFAULT_DIGEST, None, None),)
-+DEFAULT_LRM_START_OPS_UPDATED = (("0", ALL_DIGEST, None, None),)
-+DEFAULT_LRM_MONITOR_OPS_UPDATED = (("60000", ALL_DIGEST, None, None),)
-+
-+
-+def _fixture_ops(resource_id, ops):
-+    return "\n".join(
-+        [
-+            (
-+                '<op id="{resource_id}-{name}-interval-{_interval}"'
-+                ' interval="{interval}" {timeout} name="{name}"/>'
-+            ).format(
-+                resource_id=resource_id,
-+                name=name,
-+                _interval=_interval if _interval else interval,
-+                interval=interval,
-+                timeout=f'timeout="{timeout}"' if timeout else "",
-+            )
-+            for name, interval, timeout, _interval in ops
-+        ]
-+    )
-+
-+
-+def _fixture_devices_nvpair(resource_id, devices):
-+    if devices is None:
-+        return ""
-+    return (
-+        '<nvpair id="{resource_id}-instance_attributes-devices" name="devices"'
-+        ' value="{devices}"/>'
-+    ).format(resource_id=resource_id, devices=",".join(sorted(devices)))
-+
-+
-+def fixture_scsi(
-+    stonith_id=SCSI_STONITH_ID, devices=DEVICES_1, resource_ops=DEFAULT_OPS
-+):
-+    return """
-+        <resources>
-+            <primitive class="stonith" id="{stonith_id}" type="fence_scsi">
-+                <instance_attributes id="{stonith_id}-instance_attributes">
-+                    {devices}
-+                    <nvpair id="{stonith_id}-instance_attributes-pcmk_host_check" name="pcmk_host_check" value="static-list"/>
-+                    <nvpair id="{stonith_id}-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="node1 node2 node3"/>
-+                    <nvpair id="{stonith_id}-instance_attributes-pcmk_reboot_action" name="pcmk_reboot_action" value="off"/>
-+                </instance_attributes>
-+                <meta_attributes id="{stonith_id}-meta_attributes">
-+                    <nvpair id="{stonith_id}-meta_attributes-provides" name="provides" value="unfencing"/>
-+                </meta_attributes>
-+                <operations>
-+                    {operations}
-+                </operations>
-+            </primitive>
-+            <primitive class="ocf" id="dummy" provider="pacemaker" type="Dummy"/>
-+        </resources>
-+    """.format(
-+        stonith_id=stonith_id,
-+        devices=_fixture_devices_nvpair(stonith_id, devices),
-+        operations=_fixture_ops(stonith_id, resource_ops),
-+    )
-+
-+
-+def _fixture_lrm_rsc_ops(op_type, resource_id, lrm_ops):
-+    return [
-+        (
-+            '<lrm_rsc_op id="{resource_id}_{op_type_id}_{ms}" operation="{op_type}" '
-+            'interval="{ms}" {_all} {secure} {restart}/>'
-+        ).format(
-+            op_type_id="last" if op_type == "start" else op_type,
-+            op_type=op_type,
-+            resource_id=resource_id,
-+            ms=ms,
-+            _all=f'op-digest="{_all}"' if _all else "",
-+            secure=f'op-secure-digest="{secure}"' if secure else "",
-+            restart=f'op-restart-digest="{restart}"' if restart else "",
-+        )
-+        for ms, _all, secure, restart in lrm_ops
-+    ]
-+
-+
-+def _fixture_lrm_rsc_monitor_ops(resource_id, lrm_monitor_ops):
-+    return _fixture_lrm_rsc_ops("monitor", resource_id, lrm_monitor_ops)
-+
-+
-+def _fixture_lrm_rsc_start_ops(resource_id, lrm_start_ops):
-+    return _fixture_lrm_rsc_ops("start", resource_id, lrm_start_ops)
-+
-+
-+def _fixture_status_lrm_ops_base(
-+    resource_id,
-+    lrm_ops,
-+):
-+    return f"""
-+        <status>
-+            <node_state id="1" uname="node1">
-+                <lrm id="1">
-+                    <lrm_resources>
-+                        <lrm_resource id="{resource_id}" type="fence_scsi" class="stonith">
-+                            {lrm_ops}
-+                        </lrm_resource>
-+                    </lrm_resources>
-+                </lrm>
-+            </node_state>
-+        </status>
-+    """
-+
-+
-+def _fixture_status_lrm_ops(
-+    resource_id,
-+    lrm_start_ops=DEFAULT_LRM_START_OPS,
-+    lrm_monitor_ops=DEFAULT_LRM_MONITOR_OPS,
-+):
-+    return _fixture_status_lrm_ops_base(
-+        resource_id,
-+        "\n".join(
-+            _fixture_lrm_rsc_start_ops(resource_id, lrm_start_ops)
-+            + _fixture_lrm_rsc_monitor_ops(resource_id, lrm_monitor_ops)
-+        ),
-+    )
-+
-+
-+def fixture_digests_xml(resource_id, node_name, devices=""):
-+    return f"""
-+        <pacemaker-result api-version="2.9" request="crm_resource --digests --resource {resource_id} --node {node_name} --output-as xml devices={devices}">
-+            <digests resource="{resource_id}" node="{node_name}" task="stop" interval="0ms">
-+                <digest type="all" hash="{ALL_DIGEST}">
-+                    <parameters devices="{devices}" pcmk_host_check="static-list" pcmk_host_list="node1 node2 node3" pcmk_reboot_action="off"/>
-+                </digest>
-+                <digest type="nonprivate" hash="{NONPRIVATE_DIGEST}">
-+                    <parameters devices="{devices}"/>
-+                </digest>
-+            </digests>
-+            <status code="0" message="OK"/>
-+        </pacemaker-result>
-+    """
-+
-+
-+FIXTURE_CRM_MON_RES_RUNNING_1 = f""" <resources> <resource id="{SCSI_STONITH_ID}" resource_agent="stonith:fence_scsi" role="Started" nodes_running_on="1">
-+            <node name="{SCSI_NODE}" id="1" cached="true"/>
-+        </resource>
-+    </resources>
-+"""
-+
-+FIXTURE_CRM_MON_RES_RUNNING_2 = f"""
-+    <resources>
-+        <resource id="{SCSI_STONITH_ID}" resource_agent="stonith:fence_scsi" role="Started" nodes_running_on="1">
-+            <node name="node1" id="1" cached="true"/>
-+            <node name="node2" id="2" cached="true"/>
-+        </resource>
-+    </resources>
-+"""
-+FIXTURE_CRM_MON_NODES = """
-+    <nodes>
-+        <node name="node1" id="1" is_dc="true" resources_running="1"/>
-+        <node name="node2" id="2"/>
-+        <node name="node3" id="3"/>
-+    </nodes>
-+"""
-+
-+FIXTURE_CRM_MON_RES_STOPPED = f"""
-+    <resource id="{SCSI_STONITH_ID}" resource_agent="stonith:fence_scsi" role="Stopped" nodes_running_on="0"/>
-+"""
-+
-+
-+@mock.patch.object(
-+    settings,
-+    "pacemaker_api_result_schema",
-+    rc("pcmk_api_rng/api-result.rng"),
-+)
-+class UpdateScsiDevices(TestCase):
-+    def setUp(self):
-+        self.env_assist, self.config = get_env_tools(self)
-+
-+        self.existing_nodes = ["node1", "node2", "node3"]
-+        self.existing_corosync_nodes = [
-+            node_fixture(node, node_id)
-+            for node_id, node in enumerate(self.existing_nodes, 1)
-+        ]
-+        self.config.env.set_known_nodes(self.existing_nodes)
-+
-+    def assert_command_success(
-+        self,
-+        devices_before=DEVICES_1,
-+        devices_updated=DEVICES_2,
-+        resource_ops=DEFAULT_OPS,
-+        lrm_monitor_ops=DEFAULT_LRM_MONITOR_OPS,
-+        lrm_start_ops=DEFAULT_LRM_START_OPS,
-+        lrm_monitor_ops_updated=DEFAULT_LRM_MONITOR_OPS_UPDATED,
-+        lrm_start_ops_updated=DEFAULT_LRM_START_OPS_UPDATED,
-+    ):
-+        # pylint: disable=too-many-locals
-+        self.config.runner.pcmk.is_resource_digests_supported()
-+        self.config.runner.cib.load(
-+            resources=fixture_scsi(
-+                devices=devices_before, resource_ops=resource_ops
-+            ),
-+            status=_fixture_status_lrm_ops(
-+                SCSI_STONITH_ID,
-+                lrm_start_ops=lrm_start_ops,
-+                lrm_monitor_ops=lrm_monitor_ops,
-+            ),
-+        )
-+        self.config.runner.pcmk.load_state(
-+            resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES
-+        )
-+        devices_opt = "devices={}".format(",".join(devices_updated))
-+        self.config.runner.pcmk.resource_digests(
-+            SCSI_STONITH_ID,
-+            SCSI_NODE,
-+            name="start.op.digests",
-+            stdout=fixture_digests_xml(
-+                SCSI_STONITH_ID, SCSI_NODE, devices=",".join(devices_updated)
-+            ),
-+            args=[devices_opt],
-+        )
-+
-+        for num, op in enumerate(resource_ops, 1):
-+            name, interval, timeout, _ = op
-+            if name != "monitor":
-+                continue
-+            args = [devices_opt]
-+            args.append(
-+                "CRM_meta_interval={}".format(
-+                    1000 * timeout_to_seconds(interval)
-+                )
-+            )
-+            if timeout:
-+                args.append(
-+                    "CRM_meta_timeout={}".format(
-+                        1000 * timeout_to_seconds(timeout)
-+                    )
-+                )
-+            self.config.runner.pcmk.resource_digests(
-+                SCSI_STONITH_ID,
-+                SCSI_NODE,
-+                name=f"{name}-{num}.op.digests",
-+                stdout=fixture_digests_xml(
-+                    SCSI_STONITH_ID,
-+                    SCSI_NODE,
-+                    devices=",".join(devices_updated),
-+                ),
-+                args=args,
-+            )
-+        self.config.corosync_conf.load_content(
-+            corosync_conf_fixture(
-+                self.existing_corosync_nodes,
-+                get_two_node(len(self.existing_corosync_nodes)),
-+            )
-+        )
-+        self.config.http.corosync.get_corosync_online_targets(
-+            node_labels=self.existing_nodes
-+        )
-+        self.config.http.scsi.unfence_node(
-+            devices_updated, node_labels=self.existing_nodes
-+        )
-+        self.config.env.push_cib(
-+            resources=fixture_scsi(
-+                devices=devices_updated, resource_ops=resource_ops
-+            ),
-+            status=_fixture_status_lrm_ops(
-+                SCSI_STONITH_ID,
-+                lrm_start_ops=lrm_start_ops_updated,
-+                lrm_monitor_ops=lrm_monitor_ops_updated,
-+            ),
-+        )
-+        stonith.update_scsi_devices(
-+            self.env_assist.get_env(), SCSI_STONITH_ID, devices_updated
-+        )
-+        self.env_assist.assert_reports([])
-+
-+    def test_update_1_to_1_devices(self):
-+        self.assert_command_success(
-+            devices_before=DEVICES_1, devices_updated=DEVICES_1
-+        )
-+
-+    def test_update_2_to_2_devices(self):
-+        self.assert_command_success(
-+            devices_before=DEVICES_1, devices_updated=DEVICES_1
-+        )
-+
-+    def test_update_1_to_2_devices(self):
-+        self.assert_command_success()
-+
-+    def test_update_1_to_3_devices(self):
-+        self.assert_command_success(
-+            devices_before=DEVICES_1, devices_updated=DEVICES_3
-+        )
-+
-+    def test_update_3_to_1_devices(self):
-+        self.assert_command_success(
-+            devices_before=DEVICES_3, devices_updated=DEVICES_1
-+        )
-+
-+    def test_update_3_to_2_devices(self):
-+        self.assert_command_success(
-+            devices_before=DEVICES_3, devices_updated=DEVICES_2
-+        )
-+
-+    def test_default_monitor(self):
-+        self.assert_command_success()
-+
-+    def test_no_monitor_ops(self):
-+        self.assert_command_success(
-+            resource_ops=(), lrm_monitor_ops=(), lrm_monitor_ops_updated=()
-+        )
-+
-+    def test_1_monitor_with_timeout(self):
-+        self.assert_command_success(
-+            resource_ops=(("monitor", "30s", "10s", None),),
-+            lrm_monitor_ops=(("30000", DEFAULT_DIGEST, None, None),),
-+            lrm_monitor_ops_updated=(("30000", ALL_DIGEST, None, None),),
-+        )
-+
-+    def test_2_monitor_ops_with_timeouts(self):
-+        self.assert_command_success(
-+            resource_ops=(
-+                ("monitor", "30s", "10s", None),
-+                ("monitor", "40s", "20s", None),
-+            ),
-+            lrm_monitor_ops=(
-+                ("30000", DEFAULT_DIGEST, None, None),
-+                ("40000", DEFAULT_DIGEST, None, None),
-+            ),
-+            lrm_monitor_ops_updated=(
-+                ("30000", ALL_DIGEST, None, None),
-+                ("40000", ALL_DIGEST, None, None),
-+            ),
-+        )
-+
-+    def test_2_monitor_ops_with_one_timeout(self):
-+        self.assert_command_success(
-+            resource_ops=(
-+                ("monitor", "30s", "10s", None),
-+                ("monitor", "60s", None, None),
-+            ),
-+            lrm_monitor_ops=(
-+                ("30000", DEFAULT_DIGEST, None, None),
-+                ("60000", DEFAULT_DIGEST, None, None),
-+            ),
-+            lrm_monitor_ops_updated=(
-+                ("30000", ALL_DIGEST, None, None),
-+                ("60000", ALL_DIGEST, None, None),
-+            ),
-+        )
-+
-+    def test_various_start_ops_one_lrm_start_op(self):
-+        self.assert_command_success(
-+            resource_ops=(
-+                ("monitor", "60s", None, None),
-+                ("start", "0s", "40s", None),
-+                ("start", "0s", "30s", "1"),
-+                ("start", "10s", "5s", None),
-+                ("start", "20s", None, None),
-+            ),
-+        )
-+
-+    def test_1_nonrecurring_start_op_with_timeout(self):
-+        self.assert_command_success(
-+            resource_ops=(
-+                ("monitor", "60s", None, None),
-+                ("start", "0s", "40s", None),
-+            ),
-+        )
-+
-+
-+@mock.patch.object(
-+    settings,
-+    "pacemaker_api_result_schema",
-+    rc("pcmk_api_rng/api-result.rng"),
-+)
-+class TestUpdateScsiDevicesFailures(TestCase):
-+    # pylint: disable=too-many-public-methods
-+    def setUp(self):
-+        self.env_assist, self.config = get_env_tools(self)
-+
-+        self.existing_nodes = ["node1", "node2", "node3"]
-+        self.existing_corosync_nodes = [
-+            node_fixture(node, node_id)
-+            for node_id, node in enumerate(self.existing_nodes, 1)
-+        ]
-+        self.config.env.set_known_nodes(self.existing_nodes)
-+
-+    def test_pcmk_doesnt_support_digests(self):
-+        self.config.runner.pcmk.is_resource_digests_supported(
-+            is_supported=False
-+        )
-+        self.env_assist.assert_raise_library_error(
-+            lambda: stonith.update_scsi_devices(
-+                self.env_assist.get_env(), SCSI_STONITH_ID, ()
-+            ),
-+            [
-+                fixture.error(
-+                    reports.codes.STONITH_RESTARTLESS_UPDATE_OF_SCSI_DEVICES_NOT_SUPPORTED,
-+                )
-+            ],
-+            expected_in_processor=False,
-+        )
-+
-+    def test_devices_cannot_be_empty(self):
-+        self.config.runner.pcmk.is_resource_digests_supported()
-+        self.config.runner.cib.load(resources=fixture_scsi())
-+        self.env_assist.assert_raise_library_error(
-+            lambda: stonith.update_scsi_devices(
-+                self.env_assist.get_env(), SCSI_STONITH_ID, ()
-+            )
-+        )
-+        self.env_assist.assert_reports(
-+            [
-+                fixture.error(
-+                    reports.codes.INVALID_OPTION_VALUE,
-+                    option_name="devices",
-+                    option_value="",
-+                    allowed_values=None,
-+                    cannot_be_empty=True,
-+                    forbidden_characters=None,
-+                )
-+            ]
-+        )
-+
-+    def test_nonexistant_id(self):
-+        self.config.runner.pcmk.is_resource_digests_supported()
-+        self.config.runner.cib.load(resources=fixture_scsi())
-+        self.env_assist.assert_raise_library_error(
-+            lambda: stonith.update_scsi_devices(
-+                self.env_assist.get_env(), "non-existent-id", DEVICES_2
-+            )
-+        )
-+        self.env_assist.assert_reports(
-+            [
-+                fixture.error(
-+                    reports.codes.ID_NOT_FOUND,
-+                    id="non-existent-id",
-+                    expected_types=["primitive"],
-+                    context_type="cib",
-+                    context_id="",
-+                )
-+            ]
-+        )
-+
-+    def test_not_a_resource_id(self):
-+        self.config.runner.pcmk.is_resource_digests_supported()
-+        self.config.runner.cib.load(resources=fixture_scsi())
-+        self.env_assist.assert_raise_library_error(
-+            lambda: stonith.update_scsi_devices(
-+                self.env_assist.get_env(),
-+                f"{SCSI_STONITH_ID}-instance_attributes-devices",
-+                DEVICES_2,
-+            )
-+        )
-+        self.env_assist.assert_reports(
-+            [
-+                fixture.error(
-+                    reports.codes.ID_BELONGS_TO_UNEXPECTED_TYPE,
-+                    id=f"{SCSI_STONITH_ID}-instance_attributes-devices",
-+                    expected_types=["primitive"],
-+                    current_type="nvpair",
-+                )
-+            ]
-+        )
-+
-+    def test_not_supported_resource_type(self):
-+        self.config.runner.pcmk.is_resource_digests_supported()
-+        self.config.runner.cib.load(resources=fixture_scsi())
-+        self.env_assist.assert_raise_library_error(
-+            lambda: stonith.update_scsi_devices(
-+                self.env_assist.get_env(), "dummy", DEVICES_2
-+            )
-+        )
-+        self.env_assist.assert_reports(
-+            [
-+                fixture.error(
-+                    reports.codes.STONITH_RESTARTLESS_UPDATE_UNSUPPORTED_AGENT,
-+                    resource_id="dummy",
-+                    resource_type="Dummy",
-+                    supported_stonith_types=["fence_scsi"],
-+                )
-+            ]
-+        )
-+
-+    def test_devices_option_missing(self):
-+        self.config.runner.pcmk.is_resource_digests_supported()
-+        self.config.runner.cib.load(resources=fixture_scsi(devices=None))
-+        self.env_assist.assert_raise_library_error(
-+            lambda: stonith.update_scsi_devices(
-+                self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2
-+            )
-+        )
-+        self.env_assist.assert_reports(
-+            [
-+                fixture.error(
-+                    reports.codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM,
-+                    reason=(
-+                        "no devices option configured for stonith device "
-+                        f"'{SCSI_STONITH_ID}'"
-+                    ),
-+                    reason_type=reports.const.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_OTHER,
-+                )
-+            ]
-+        )
-+
-+    def test_devices_option_empty(self):
-+        self.config.runner.pcmk.is_resource_digests_supported()
-+        self.config.runner.cib.load(resources=fixture_scsi(devices=""))
-+        self.env_assist.assert_raise_library_error(
-+            lambda: stonith.update_scsi_devices(
-+                self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2
-+            )
-+        )
-+        self.env_assist.assert_reports(
-+            [
-+                fixture.error(
-+                    reports.codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM,
-+                    reason=(
-+                        "no devices option configured for stonith device "
-+                        f"'{SCSI_STONITH_ID}'"
-+                    ),
-+                    reason_type=reports.const.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_OTHER,
-+                )
-+            ]
-+        )
-+
-+    def test_stonith_resource_is_not_running(self):
-+        self.config.runner.pcmk.is_resource_digests_supported()
-+        self.config.runner.cib.load(resources=fixture_scsi())
-+        self.config.runner.pcmk.load_state(
-+            resources=FIXTURE_CRM_MON_RES_STOPPED, nodes=FIXTURE_CRM_MON_NODES
-+        )
-+        self.env_assist.assert_raise_library_error(
-+            lambda: stonith.update_scsi_devices(
-+                self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2
-+            ),
-+            [
-+                fixture.error(
-+                    reports.codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM,
-+                    reason=f"resource '{SCSI_STONITH_ID}' is not running on any node",
-+                    reason_type=reports.const.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_NOT_RUNNING,
-+                )
-+            ],
-+            expected_in_processor=False,
-+        )
-+
-+    def test_stonith_resource_is_running_on_more_than_one_node(self):
-+        self.config.runner.pcmk.is_resource_digests_supported()
-+        self.config.runner.cib.load(resources=fixture_scsi())
-+        self.config.runner.pcmk.load_state(
-+            resources=FIXTURE_CRM_MON_RES_RUNNING_2, nodes=FIXTURE_CRM_MON_NODES
-+        )
-+        self.env_assist.assert_raise_library_error(
-+            lambda: stonith.update_scsi_devices(
-+                self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2
-+            ),
-+            [
-+                fixture.error(
-+                    reports.codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM,
-+                    reason=(
-+                        f"resource '{SCSI_STONITH_ID}' is running on more than "
-+                        "1 node"
-+                    ),
-+                    reason_type=reports.const.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_OTHER,
-+                )
-+            ],
-+            expected_in_processor=False,
-+        )
-+
-+    def test_lrm_op_missing_digest_attributes(self):
-+        devices = ",".join(DEVICES_2)
-+        self.config.runner.pcmk.is_resource_digests_supported()
-+        self.config.runner.cib.load(
-+            resources=fixture_scsi(),
-+            status=_fixture_status_lrm_ops_base(
-+                SCSI_STONITH_ID,
-+                f'<lrm_rsc_op id="{SCSI_STONITH_ID}_last" operation="start"/>',
-+            ),
-+        )
-+        self.config.runner.pcmk.load_state(
-+            resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES
-+        )
-+        self.config.runner.pcmk.resource_digests(
-+            SCSI_STONITH_ID,
-+            SCSI_NODE,
-+            name="start.op.digests",
-+            stdout=fixture_digests_xml(
-+                SCSI_STONITH_ID,
-+                SCSI_NODE,
-+                devices=devices,
-+            ),
-+            args=[f"devices={devices}"],
-+        )
-+        self.env_assist.assert_raise_library_error(
-+            lambda: stonith.update_scsi_devices(
-+                self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2
-+            ),
-+            [
-+                fixture.error(
-+                    reports.codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM,
-+                    reason="no digests attributes in lrm_rsc_op element",
-+                    reason_type=reports.const.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_OTHER,
-+                )
-+            ],
-+            expected_in_processor=False,
-+        )
-+
-+    def test_crm_resource_digests_missing(self):
-+        devices = ",".join(DEVICES_2)
-+        self.config.runner.pcmk.is_resource_digests_supported()
-+        self.config.runner.cib.load(
-+            resources=fixture_scsi(),
-+            status=_fixture_status_lrm_ops_base(
-+                SCSI_STONITH_ID,
-+                (
-+                    f'<lrm_rsc_op id="{SCSI_STONITH_ID}_last" '
-+                    'operation="start" op-restart-digest="somedigest" />'
-+                ),
-+            ),
-+        )
-+        self.config.runner.pcmk.load_state(
-+            resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES
-+        )
-+        self.config.runner.pcmk.resource_digests(
-+            SCSI_STONITH_ID,
-+            SCSI_NODE,
-+            name="start.op.digests",
-+            stdout=fixture_digests_xml(
-+                SCSI_STONITH_ID,
-+                SCSI_NODE,
-+                devices=devices,
-+            ),
-+            args=[f"devices={devices}"],
-+        )
-+        self.env_assist.assert_raise_library_error(
-+            lambda: stonith.update_scsi_devices(
-+                self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2
-+            ),
-+            [
-+                fixture.error(
-+                    reports.codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM,
-+                    reason=(
-+                        "necessary digest for 'op-restart-digest' attribute is "
-+                        "missing"
-+                    ),
-+                    reason_type=reports.const.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_OTHER,
-+                )
-+            ],
-+            expected_in_processor=False,
-+        )
-+
-+    def test_no_lrm_start_op(self):
-+        self.config.runner.pcmk.is_resource_digests_supported()
-+        self.config.runner.cib.load(
-+            resources=fixture_scsi(),
-+            status=_fixture_status_lrm_ops(SCSI_STONITH_ID, lrm_start_ops=()),
-+        )
-+        self.config.runner.pcmk.load_state(
-+            resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES
-+        )
-+        self.env_assist.assert_raise_library_error(
-+            lambda: stonith.update_scsi_devices(
-+                self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2
-+            ),
-+            [
-+                fixture.error(
-+                    reports.codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM,
-+                    reason=(
-+                        "lrm_rsc_op element for start operation was not found"
-+                    ),
-+                    reason_type=reports.const.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_OTHER,
-+                )
-+            ],
-+            expected_in_processor=False,
-+        )
-+
-+    def test_monitor_ops_and_lrm_monitor_ops_do_not_match(self):
-+        self.config.runner.pcmk.is_resource_digests_supported()
-+        self.config.runner.cib.load(
-+            resources=fixture_scsi(
-+                resource_ops=(
-+                    ("monitor", "30s", "10s", None),
-+                    ("monitor", "30s", "20s", "31"),
-+                    ("monitor", "60s", None, None),
-+                )
-+            ),
-+            status=_fixture_status_lrm_ops(SCSI_STONITH_ID),
-+        )
-+        self.config.runner.pcmk.load_state(
-+            resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES
-+        )
-+        self.config.runner.pcmk.resource_digests(
-+            SCSI_STONITH_ID,
-+            SCSI_NODE,
-+            name="start.op.digests",
-+            stdout=fixture_digests_xml(
-+                SCSI_STONITH_ID, SCSI_NODE, devices=",".join(DEVICES_2)
-+            ),
-+            args=["devices={}".format(",".join(DEVICES_2))],
-+        )
-+        self.env_assist.assert_raise_library_error(
-+            lambda: stonith.update_scsi_devices(
-+                self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2
-+            ),
-+            [
-+                fixture.error(
-+                    reports.codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM,
-+                    reason=(
-+                        "number of lrm_rsc_op and op elements for monitor "
-+                        "operation differs"
-+                    ),
-+                    reason_type=reports.const.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_OTHER,
-+                )
-+            ],
-+            expected_in_processor=False,
-+        )
-+
-+    def test_lrm_monitor_ops_not_found(self):
-+        self.config.runner.pcmk.is_resource_digests_supported()
-+        self.config.runner.cib.load(
-+            resources=fixture_scsi(
-+                resource_ops=(("monitor", "30s", None, None),)
-+            ),
-+            status=_fixture_status_lrm_ops(SCSI_STONITH_ID),
-+        )
-+        self.config.runner.pcmk.load_state(
-+            resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES
-+        )
-+        self.config.runner.pcmk.resource_digests(
-+            SCSI_STONITH_ID,
-+            SCSI_NODE,
-+            name="start.op.digests",
-+            stdout=fixture_digests_xml(
-+                SCSI_STONITH_ID, SCSI_NODE, devices=",".join(DEVICES_2)
-+            ),
-+            args=["devices={}".format(",".join(DEVICES_2))],
-+        )
-+        self.env_assist.assert_raise_library_error(
-+            lambda: stonith.update_scsi_devices(
-+                self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2
-+            ),
-+            [
-+                fixture.error(
-+                    reports.codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM,
-+                    reason=(
-+                        "monitor lrm_rsc_op element for resource "
-+                        f"'{SCSI_STONITH_ID}', node '{SCSI_NODE}' and interval "
-+                        "'30000' not found"
-+                    ),
-+                    reason_type=reports.const.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_OTHER,
-+                )
-+            ],
-+            expected_in_processor=False,
-+        )
-+
-+    def test_node_missing_name_and_missing_auth_token(self):
-+        self.config.runner.pcmk.is_resource_digests_supported()
-+        self.config.runner.cib.load(
-+            resources=fixture_scsi(),
-+            status=_fixture_status_lrm_ops(SCSI_STONITH_ID),
-+        )
-+        self.config.runner.pcmk.load_state(
-+            resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES
-+        )
-+        self.config.runner.pcmk.resource_digests(
-+            SCSI_STONITH_ID,
-+            SCSI_NODE,
-+            name="start.op.digests",
-+            stdout=fixture_digests_xml(
-+                SCSI_STONITH_ID, SCSI_NODE, devices=",".join(DEVICES_2)
-+            ),
-+            args=["devices={}".format(",".join(DEVICES_2))],
-+        )
-+        self.config.runner.pcmk.resource_digests(
-+            SCSI_STONITH_ID,
-+            SCSI_NODE,
-+            name="monitor.op.digests",
-+            stdout=fixture_digests_xml(
-+                SCSI_STONITH_ID, SCSI_NODE, devices=",".join(DEVICES_2)
-+            ),
-+            args=[
-+                "devices={}".format(",".join(DEVICES_2)),
-+                "CRM_meta_interval=60000",
-+            ],
-+        )
-+        self.config.corosync_conf.load_content(
-+            corosync_conf_fixture(
-+                self.existing_corosync_nodes
-+                + [[("ring0_addr", "custom_node"), ("nodeid", "5")]],
-+            )
-+        )
-+        self.config.env.set_known_nodes(self.existing_nodes[:-1])
-+        self.env_assist.assert_raise_library_error(
-+            lambda: stonith.update_scsi_devices(
-+                self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2
-+            ),
-+        )
-+        self.env_assist.assert_reports(
-+            [
-+                fixture.error(
-+                    reports.codes.COROSYNC_CONFIG_MISSING_NAMES_OF_NODES,
-+                    fatal=True,
-+                ),
-+                fixture.error(
-+                    reports.codes.HOST_NOT_FOUND,
-+                    host_list=[self.existing_nodes[-1]],
-+                ),
-+            ]
-+        )
-+
-+    def _unfence_failure_common_calls(self):
-+        devices = ",".join(DEVICES_2)
-+        self.config.runner.pcmk.is_resource_digests_supported()
-+        self.config.runner.cib.load(
-+            resources=fixture_scsi(),
-+            status=_fixture_status_lrm_ops(SCSI_STONITH_ID),
-+        )
-+        self.config.runner.pcmk.load_state(
-+            resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES
-+        )
-+        self.config.runner.pcmk.resource_digests(
-+            SCSI_STONITH_ID,
-+            SCSI_NODE,
-+            name="start.op.digests",
-+            stdout=fixture_digests_xml(
-+                SCSI_STONITH_ID,
-+                SCSI_NODE,
-+                devices=devices,
-+            ),
-+            args=[f"devices={devices}"],
-+        )
-+        self.config.runner.pcmk.resource_digests(
-+            SCSI_STONITH_ID,
-+            SCSI_NODE,
-+            name="monitor.op.digests",
-+            stdout=fixture_digests_xml(
-+                SCSI_STONITH_ID,
-+                SCSI_NODE,
-+                devices=devices,
-+            ),
-+            args=[
-+                f"devices={devices}",
-+                "CRM_meta_interval=60000",
-+            ],
-+        )
-+        self.config.corosync_conf.load_content(
-+            corosync_conf_fixture(self.existing_corosync_nodes)
-+        )
-+
-+    def test_unfence_failure_unable_to_connect(self):
-+        self._unfence_failure_common_calls()
-+        self.config.http.corosync.get_corosync_online_targets(
-+            node_labels=self.existing_nodes
-+        )
-+        self.config.http.scsi.unfence_node(
-+            DEVICES_2,
-+            communication_list=[
-+                dict(
-+                    label=self.existing_nodes[0],
-+                    raw_data=json.dumps(
-+                        dict(devices=DEVICES_2, node=self.existing_nodes[0])
-+                    ),
-+                    was_connected=False,
-+                    error_msg="errA",
-+                ),
-+                dict(
-+                    label=self.existing_nodes[1],
-+                    raw_data=json.dumps(
-+                        dict(devices=DEVICES_2, node=self.existing_nodes[1])
-+                    ),
-+                    output=json.dumps(
-+                        dto.to_dict(
-+                            communication.dto.InternalCommunicationResultDto(
-+                                status=communication.const.COM_STATUS_ERROR,
-+                                status_msg="error",
-+                                report_list=[
-+                                    reports.ReportItem.error(
-+                                        reports.messages.StonithUnfencingFailed(
-+                                            "errB"
-+                                        )
-+                                    ).to_dto()
-+                                ],
-+                                data=None,
-+                            )
-+                        )
-+                    ),
-+                ),
-+                dict(
-+                    label=self.existing_nodes[2],
-+                    raw_data=json.dumps(
-+                        dict(devices=DEVICES_2, node=self.existing_nodes[2])
-+                    ),
-+                ),
-+            ],
-+        )
-+        self.env_assist.assert_raise_library_error(
-+            lambda: stonith.update_scsi_devices(
-+                self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2
-+            ),
-+        )
-+        self.env_assist.assert_reports(
-+            [
-+                fixture.error(
-+                    reports.codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
-+                    node=self.existing_nodes[0],
-+                    command="api/v1/scsi-unfence-node/v1",
-+                    reason="errA",
-+                ),
-+                fixture.error(
-+                    reports.codes.STONITH_UNFENCING_FAILED,
-+                    reason="errB",
-+                    context=reports.dto.ReportItemContextDto(
-+                        node=self.existing_nodes[1],
-+                    ),
-+                ),
-+            ]
-+        )
-+
-+    def test_unfence_failure_agent_script_failed(self):
-+        self._unfence_failure_common_calls()
-+        self.config.http.corosync.get_corosync_online_targets(
-+            node_labels=self.existing_nodes
-+        )
-+        self.config.http.scsi.unfence_node(
-+            DEVICES_2,
-+            communication_list=[
-+                dict(
-+                    label=self.existing_nodes[0],
-+                    raw_data=json.dumps(
-+                        dict(devices=DEVICES_2, node=self.existing_nodes[0])
-+                    ),
-+                ),
-+                dict(
-+                    label=self.existing_nodes[1],
-+                    raw_data=json.dumps(
-+                        dict(devices=DEVICES_2, node=self.existing_nodes[1])
-+                    ),
-+                    output=json.dumps(
-+                        dto.to_dict(
-+                            communication.dto.InternalCommunicationResultDto(
-+                                status=communication.const.COM_STATUS_ERROR,
-+                                status_msg="error",
-+                                report_list=[
-+                                    reports.ReportItem.error(
-+                                        reports.messages.StonithUnfencingFailed(
-+                                            "errB"
-+                                        )
-+                                    ).to_dto()
-+                                ],
-+                                data=None,
-+                            )
-+                        )
-+                    ),
-+                ),
-+                dict(
-+                    label=self.existing_nodes[2],
-+                    raw_data=json.dumps(
-+                        dict(devices=DEVICES_2, node=self.existing_nodes[2])
-+                    ),
-+                ),
-+            ],
-+        )
-+        self.env_assist.assert_raise_library_error(
-+            lambda: stonith.update_scsi_devices(
-+                self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2
-+            ),
-+        )
-+        self.env_assist.assert_reports(
-+            [
-+                fixture.error(
-+                    reports.codes.STONITH_UNFENCING_FAILED,
-+                    reason="errB",
-+                    context=reports.dto.ReportItemContextDto(
-+                        node=self.existing_nodes[1],
-+                    ),
-+                ),
-+            ]
-+        )
-+
-+    def test_corosync_targets_unable_to_connect(self):
-+        self._unfence_failure_common_calls()
-+        self.config.http.corosync.get_corosync_online_targets(
-+            communication_list=[
-+                dict(
-+                    label=self.existing_nodes[0],
-+                    output='{"corosync":true}',
-+                ),
-+            ]
-+            + [
-+                dict(
-+                    label=node,
-+                    was_connected=False,
-+                    errno=7,
-+                    error_msg="an error",
-+                )
-+                for node in self.existing_nodes[1:]
-+            ]
-+        )
-+        self.env_assist.assert_raise_library_error(
-+            lambda: stonith.update_scsi_devices(
-+                self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2
-+            ),
-+        )
-+        self.env_assist.assert_reports(
-+            [
-+                fixture.error(
-+                    reports.codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
-+                    force_code=reports.codes.SKIP_OFFLINE_NODES,
-+                    node=node,
-+                    command="remote/status",
-+                    reason="an error",
-+                )
-+                for node in self.existing_nodes[1:]
-+            ]
-+        )
-+
-+    def test_corosync_targets_skip_offline_unfence_node_running_corosync(
-+        self,
-+    ):
-+        self._unfence_failure_common_calls()
-+        self.config.http.corosync.get_corosync_online_targets(
-+            communication_list=[
-+                dict(
-+                    label=self.existing_nodes[0],
-+                    output='{"corosync":true}',
-+                ),
-+                dict(
-+                    label=self.existing_nodes[1],
-+                    output='{"corosync":false}',
-+                ),
-+                dict(
-+                    label=self.existing_nodes[2],
-+                    was_connected=False,
-+                    errno=7,
-+                    error_msg="an error",
-+                ),
-+            ]
-+        )
-+        self.config.http.scsi.unfence_node(
-+            DEVICES_2,
-+            communication_list=[
-+                dict(
-+                    label=self.existing_nodes[0],
-+                    raw_data=json.dumps(
-+                        dict(devices=DEVICES_2, node=self.existing_nodes[0])
-+                    ),
-+                ),
-+            ],
-+        )
-+        self.config.env.push_cib(
-+            resources=fixture_scsi(devices=DEVICES_2),
-+            status=_fixture_status_lrm_ops(
-+                SCSI_STONITH_ID,
-+                lrm_start_ops=DEFAULT_LRM_START_OPS_UPDATED,
-+                lrm_monitor_ops=DEFAULT_LRM_MONITOR_OPS_UPDATED,
-+            ),
-+        )
-+        stonith.update_scsi_devices(
-+            self.env_assist.get_env(),
-+            SCSI_STONITH_ID,
-+            DEVICES_2,
-+            force_flags=[reports.codes.SKIP_OFFLINE_NODES],
-+        )
-+        self.env_assist.assert_reports(
-+            [
-+                fixture.warn(
-+                    reports.codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
-+                    node=self.existing_nodes[2],
-+                    command="remote/status",
-+                    reason="an error",
-+                ),
-+            ]
-+        )
-+
-+    def test_corosync_targets_unable_to_perform_unfencing_operation(
-+        self,
-+    ):
-+        self._unfence_failure_common_calls()
-+        self.config.http.corosync.get_corosync_online_targets(
-+            communication_list=[
-+                dict(
-+                    label=self.existing_nodes[0],
-+                    was_connected=False,
-+                    errno=7,
-+                    error_msg="an error",
-+                ),
-+                dict(
-+                    label=self.existing_nodes[1],
-+                    was_connected=False,
-+                    errno=7,
-+                    error_msg="an error",
-+                ),
-+                dict(
-+                    label=self.existing_nodes[2],
-+                    output='{"corosync":false}',
-+                ),
-+            ]
-+        )
-+        self.config.http.scsi.unfence_node(DEVICES_2, communication_list=[])
-+        self.env_assist.assert_raise_library_error(
-+            lambda: stonith.update_scsi_devices(
-+                self.env_assist.get_env(),
-+                SCSI_STONITH_ID,
-+                DEVICES_2,
-+                force_flags=[reports.codes.SKIP_OFFLINE_NODES],
-+            ),
-+        )
-+        self.env_assist.assert_reports(
-+            [
-+                fixture.warn(
-+                    reports.codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
-+                    node=node,
-+                    command="remote/status",
-+                    reason="an error",
-+                )
-+                for node in self.existing_nodes[0:2]
-+            ]
-+            + [
-+                fixture.error(
-+                    reports.codes.UNABLE_TO_PERFORM_OPERATION_ON_ANY_NODE,
-+                ),
-+            ]
-+        )
--- 
-2.31.1
-
diff --git a/SOURCES/bz1384485-01-fix-rsc-update-cmd-when-unable-to-get-agent-metadata.patch b/SOURCES/bz1384485-01-fix-rsc-update-cmd-when-unable-to-get-agent-metadata.patch
new file mode 100644
index 0000000..2960f32
--- /dev/null
+++ b/SOURCES/bz1384485-01-fix-rsc-update-cmd-when-unable-to-get-agent-metadata.patch
@@ -0,0 +1,73 @@
+From e5fc48f45a60228a82980dcd6d68ca01cf447eac Mon Sep 17 00:00:00 2001
+From: Ondrej Mular <omular@redhat.com>
+Date: Tue, 7 Dec 2021 11:58:09 +0100
+Subject: [PATCH 2/3] fix rsc update cmd when unable to get agent metadata
+
+`resource update` command failed with a traceback when updating a
+resource with a non-existing resource agent
+---
+ pcs/resource.py                        | 14 ++++++++------
+ pcs_test/tier1/legacy/test_resource.py | 21 +++++++++++++++++++++
+ 2 files changed, 29 insertions(+), 6 deletions(-)
+
+diff --git a/pcs/resource.py b/pcs/resource.py
+index c0e8b0d9..4514338d 100644
+--- a/pcs/resource.py
++++ b/pcs/resource.py
+@@ -1049,13 +1049,15 @@ def resource_update(lib, args, modifiers, deal_with_guest_change=True):
+         if report_list:
+             process_library_reports(report_list)
+     except lib_ra.ResourceAgentError as e:
+-        severity = (
+-            reports.ReportItemSeverity.WARNING
+-            if modifiers.get("--force")
+-            else reports.ReportItemSeverity.ERROR
+-        )
+         process_library_reports(
+-            [lib_ra.resource_agent_error_to_report_item(e, severity)]
++            [
++                lib_ra.resource_agent_error_to_report_item(
++                    e,
++                    reports.get_severity(
++                        reports.codes.FORCE, modifiers.get("--force")
++                    ),
++                )
++            ]
+         )
+     except LibraryError as e:
+         process_library_reports(e.args)
+diff --git a/pcs_test/tier1/legacy/test_resource.py b/pcs_test/tier1/legacy/test_resource.py
+index 3f0e08b9..bae0587a 100644
+--- a/pcs_test/tier1/legacy/test_resource.py
++++ b/pcs_test/tier1/legacy/test_resource.py
+@@ -4879,6 +4879,27 @@ class UpdateInstanceAttrs(
+             ),
+         )
+ 
++    def test_nonexisting_agent(self):
++        agent = "ocf:pacemaker:nonexistent"
++        message = (
++            f"Agent '{agent}' is not installed or does "
++            "not provide valid metadata: Metadata query for "
++            f"{agent} failed: Input/output error"
++        )
++        self.assert_pcs_success(
++            f"resource create --force D0 {agent}".split(),
++            f"Warning: {message}\n",
++        )
++
++        self.assert_pcs_fail(
++            "resource update D0 test=testA".split(),
++            f"Error: {message}, use --force to override\n",
++        )
++        self.assert_pcs_success(
++            "resource update --force D0 test=testA".split(),
++            f"Warning: {message}\n",
++        )
++
+     def test_update_existing(self):
+         xml = """
+             <resources>
+-- 
+2.31.1
+
diff --git a/SOURCES/bz1990784-01-Multiple-fixes-of-pcs-resource-move-autodelete-comma.patch b/SOURCES/bz1990784-01-Multiple-fixes-of-pcs-resource-move-autodelete-comma.patch
new file mode 100644
index 0000000..ae32aae
--- /dev/null
+++ b/SOURCES/bz1990784-01-Multiple-fixes-of-pcs-resource-move-autodelete-comma.patch
@@ -0,0 +1,1031 @@
+From fe1ad27f32e69e3e7c046b51e5406a0693ea1c35 Mon Sep 17 00:00:00 2001
+From: Ondrej Mular <omular@redhat.com>
+Date: Tue, 11 Jan 2022 08:01:10 +0100
+Subject: [PATCH 3/5] Multiple fixes of `pcs resource move --autodelete`
+ command
+
+---
+ pcs/common/reports/codes.py                   |   1 +
+ pcs/common/reports/messages.py                |  21 ++
+ pcs/lib/cib/node.py                           |  14 +-
+ pcs/lib/commands/resource.py                  | 105 ++++++-
+ pcs/lib/node.py                               |   7 +-
+ .../tier0/common/reports/test_messages.py     |  12 +
+ .../resource/test_resource_move_autoclean.py  | 280 +++++++++++++++++-
+ .../resource/test_resource_move_ban.py        |  45 ++-
+ .../tools/command_env/config_runner_pcmk.py   |   2 +
+ pcs_test/tools/command_env/mock_runner.py     |   2 +-
+ pcs_test/tools/fixture_cib.py                 |   1 +
+ 11 files changed, 456 insertions(+), 34 deletions(-)
+
+diff --git a/pcs/common/reports/codes.py b/pcs/common/reports/codes.py
+index 5bae7170..3e0512d9 100644
+--- a/pcs/common/reports/codes.py
++++ b/pcs/common/reports/codes.py
+@@ -418,6 +418,7 @@ RESOURCE_UNMOVE_UNBAN_PCMK_EXPIRED_NOT_SUPPORTED = M(
+ )
+ RESOURCE_MOVE_CONSTRAINT_CREATED = M("RESOURCE_MOVE_CONSTRAINT_CREATED")
+ RESOURCE_MOVE_CONSTRAINT_REMOVED = M("RESOURCE_MOVE_CONSTRAINT_REMOVED")
++RESOURCE_MOVE_NOT_AFFECTING_RESOURCE = M("RESOURCE_MOVE_NOT_AFFECTING_RESOURCE")
+ RESOURCE_MOVE_AFFECTS_OTRHER_RESOURCES = M(
+     "RESOURCE_MOVE_AFFECTS_OTRHER_RESOURCES"
+ )
+diff --git a/pcs/common/reports/messages.py b/pcs/common/reports/messages.py
+index 43ce38e1..9d665e73 100644
+--- a/pcs/common/reports/messages.py
++++ b/pcs/common/reports/messages.py
+@@ -6110,6 +6110,27 @@ class ResourceMoveConstraintRemoved(ReportItemMessage):
+         )
+ 
+ 
++@dataclass(frozen=True)
++class ResourceMoveNotAffectingResource(ReportItemMessage):
++    """
++    Creating a location constraint to move a resource has no effect on the
++    resource.
++
++    resource_id -- id of the resource to be moved
++    """
++
++    resource_id: str
++    _code = codes.RESOURCE_MOVE_NOT_AFFECTING_RESOURCE
++
++    @property
++    def message(self) -> str:
++        return (
++            f"Unable to move resource '{self.resource_id}' using a location "
++            "constraint. Current location of the resource may be affected by "
++            "some other constraint."
++        )
++
++
+ @dataclass(frozen=True)
+ class ResourceMoveAffectsOtherResources(ReportItemMessage):
+     """
+diff --git a/pcs/lib/cib/node.py b/pcs/lib/cib/node.py
+index 20a41ca0..df2ffbaa 100644
+--- a/pcs/lib/cib/node.py
++++ b/pcs/lib/cib/node.py
+@@ -1,12 +1,17 @@
+ from collections import namedtuple
++from typing import Set
+ from lxml import etree
++from lxml.etree import _Element
+ 
+ from pcs.common import reports
+ from pcs.common.reports.item import ReportItem
+ from pcs.lib.cib.nvpair import update_nvset
+ from pcs.lib.cib.tools import get_nodes
+ from pcs.lib.errors import LibraryError
+-from pcs.lib.xml_tools import append_when_useful
++from pcs.lib.xml_tools import (
++    append_when_useful,
++    get_root,
++)
+ 
+ 
+ class PacemakerNode(namedtuple("PacemakerNode", "name addr")):
+@@ -58,6 +63,13 @@ def update_node_instance_attrs(
+     append_when_useful(cib_nodes, node_el)
+ 
+ 
++def get_node_names(cib: _Element) -> Set[str]:
++    return {
++        str(node.attrib["uname"])
++        for node in get_nodes(get_root(cib)).iterfind("./node")
++    }
++
++
+ def _ensure_node_exists(tree, node_name, state_nodes=None):
+     """
+     Make sure node with specified name exists
+diff --git a/pcs/lib/commands/resource.py b/pcs/lib/commands/resource.py
+index d0e8f4db..82ce73e0 100644
+--- a/pcs/lib/commands/resource.py
++++ b/pcs/lib/commands/resource.py
+@@ -50,12 +50,16 @@ from pcs.lib.cib.tools import (
+ from pcs.lib.env import LibraryEnvironment, WaitType
+ from pcs.lib.errors import LibraryError
+ from pcs.lib.external import CommandRunner
+-from pcs.lib.node import get_existing_nodes_names_addrs
++from pcs.lib.node import (
++    get_existing_nodes_names_addrs,
++    get_pacemaker_node_names,
++)
+ from pcs.lib.pacemaker import simulate as simulate_tools
+ from pcs.lib.pacemaker.live import (
+     diff_cibs_xml,
+     get_cib,
+     get_cib_xml,
++    get_cluster_status_dom,
+     has_resource_unmove_unban_expired_support,
+     push_cib_diff_xml,
+     resource_ban,
+@@ -1589,6 +1593,16 @@ def move(
+     )
+ 
+ 
++def _nodes_exist_reports(
++    cib: _Element, node_names: Iterable[str]
++) -> ReportItemList:
++    existing_node_names = get_pacemaker_node_names(cib)
++    return [
++        reports.ReportItem.error(reports.messages.NodeNotFound(node_name))
++        for node_name in (set(node_names) - existing_node_names)
++    ]
++
++
+ def move_autoclean(
+     env: LibraryEnvironment,
+     resource_id: str,
+@@ -1626,6 +1640,9 @@ def move_autoclean(
+     if resource_el is not None:
+         report_list.extend(resource.common.validate_move(resource_el, master))
+ 
++    if node:
++        report_list.extend(_nodes_exist_reports(cib, [node]))
++
+     if env.report_processor.report_list(report_list).has_errors:
+         raise LibraryError()
+ 
+@@ -1659,8 +1676,32 @@ def move_autoclean(
+     add_constraint_cib_diff = diff_cibs_xml(
+         env.cmd_runner(), env.report_processor, cib_xml, rsc_moved_cib_xml
+     )
++    with get_tmp_cib(
++        env.report_processor, rsc_moved_cib_xml
++    ) as rsc_moved_constraint_cleared_cib_file:
++        stdout, stderr, retval = resource_unmove_unban(
++            env.cmd_runner(
++                dict(CIB_file=rsc_moved_constraint_cleared_cib_file.name)
++            ),
++            resource_id,
++            node,
++            master,
++        )
++        if retval != 0:
++            raise LibraryError(
++                ReportItem.error(
++                    reports.messages.ResourceUnmoveUnbanPcmkError(
++                        resource_id, stdout, stderr
++                    )
++                )
++            )
++        rsc_moved_constraint_cleared_cib_file.seek(0)
++        constraint_removed_cib = rsc_moved_constraint_cleared_cib_file.read()
+     remove_constraint_cib_diff = diff_cibs_xml(
+-        env.cmd_runner(), env.report_processor, rsc_moved_cib_xml, cib_xml
++        env.cmd_runner(),
++        env.report_processor,
++        rsc_moved_cib_xml,
++        constraint_removed_cib,
+     )
+ 
+     if not (add_constraint_cib_diff and remove_constraint_cib_diff):
+@@ -1689,13 +1730,15 @@ def move_autoclean(
+                     )
+                 )
+             )
+-    _ensure_resource_is_not_moved(
++    _ensure_resource_moved_and_not_moved_back(
+         env.cmd_runner,
+         env.report_processor,
+         etree_to_str(after_move_simulated_cib),
+         remove_constraint_cib_diff,
+         resource_id,
+         strict,
++        resource_state_before,
++        node,
+     )
+     push_cib_diff_xml(env.cmd_runner(), add_constraint_cib_diff)
+     env.report_processor.report(
+@@ -1704,13 +1747,15 @@ def move_autoclean(
+         )
+     )
+     env.wait_for_idle(wait_timeout)
+-    _ensure_resource_is_not_moved(
++    _ensure_resource_moved_and_not_moved_back(
+         env.cmd_runner,
+         env.report_processor,
+         get_cib_xml(env.cmd_runner()),
+         remove_constraint_cib_diff,
+         resource_id,
+         strict,
++        resource_state_before,
++        node,
+     )
+     push_cib_diff_xml(env.cmd_runner(), remove_constraint_cib_diff)
+     env.report_processor.report(
+@@ -1730,16 +1775,35 @@ def move_autoclean(
+         raise LibraryError()
+ 
+ 
+-def _ensure_resource_is_not_moved(
++def _ensure_resource_moved_and_not_moved_back(
+     runner_factory: Callable[[Optional[Mapping[str, str]]], CommandRunner],
+     report_processor: reports.ReportProcessor,
+     cib_xml: str,
+     remove_constraint_cib_diff: str,
+     resource_id: str,
+     strict: bool,
++    resource_state_before: Dict[str, List[str]],
++    node: Optional[str],
+ ) -> None:
+     # pylint: disable=too-many-locals
+     with get_tmp_cib(report_processor, cib_xml) as rsc_unmove_cib_file:
++        if not _was_resource_moved(
++            node,
++            resource_state_before,
++            get_resource_state(
++                get_cluster_status_dom(
++                    runner_factory(dict(CIB_file=rsc_unmove_cib_file.name))
++                ),
++                resource_id,
++            ),
++        ):
++            raise LibraryError(
++                reports.ReportItem.error(
++                    reports.messages.ResourceMoveNotAffectingResource(
++                        resource_id
++                    )
++                )
++            )
+         push_cib_diff_xml(
+             runner_factory(dict(CIB_file=rsc_unmove_cib_file.name)),
+             remove_constraint_cib_diff,
+@@ -1809,20 +1873,31 @@ def _resource_running_on_nodes(
+     return frozenset()
+ 
+ 
++def _was_resource_moved(
++    node: Optional[str],
++    resource_state_before: Dict[str, List[str]],
++    resource_state_after: Dict[str, List[str]],
++) -> bool:
++    running_on_nodes = _resource_running_on_nodes(resource_state_after)
++    return not bool(
++        resource_state_before
++        and (  # running resource moved
++            not running_on_nodes
++            or (node and node not in running_on_nodes)
++            or (resource_state_before == resource_state_after)
++        )
++    )
++
++
+ def _move_wait_report(
+     resource_id: str,
+     node: Optional[str],
+     resource_state_before: Dict[str, List[str]],
+     resource_state_after: Dict[str, List[str]],
+ ) -> ReportItem:
+-    allowed_nodes = frozenset([node] if node else [])
+-    running_on_nodes = _resource_running_on_nodes(resource_state_after)
+-
+     severity = reports.item.ReportItemSeverity.info()
+-    if resource_state_before and (  # running resource moved
+-        not running_on_nodes
+-        or (allowed_nodes and allowed_nodes.isdisjoint(running_on_nodes))
+-        or (resource_state_before == resource_state_after)
++    if not _was_resource_moved(
++        node, resource_state_before, resource_state_after
+     ):
+         severity = reports.item.ReportItemSeverity.error()
+     if not resource_state_after:
+@@ -1873,14 +1948,18 @@ class _MoveBanTemplate:
+         lifetime=None,
+         wait: WaitType = False,
+     ):
++        # pylint: disable=too-many-locals
+         # validate
+         wait_timeout = env.ensure_wait_satisfiable(wait)  # raises on error
+ 
++        cib = env.get_cib()
+         resource_el, report_list = resource.common.find_one_resource(
+-            get_resources(env.get_cib()), resource_id
++            get_resources(cib), resource_id
+         )
+         if resource_el is not None:
+             report_list.extend(self._validate(resource_el, master))
++        if node:
++            report_list.extend(_nodes_exist_reports(cib, [node]))
+         if env.report_processor.report_list(report_list).has_errors:
+             raise LibraryError()
+ 
+diff --git a/pcs/lib/node.py b/pcs/lib/node.py
+index ff08f747..3a7f236e 100644
+--- a/pcs/lib/node.py
++++ b/pcs/lib/node.py
+@@ -3,6 +3,7 @@ from typing import (
+     List,
+     Optional,
+     Tuple,
++    Set,
+ )
+ 
+ from lxml.etree import _Element
+@@ -11,7 +12,7 @@ from pcs.common import reports
+ from pcs.common.reports import ReportItemList
+ from pcs.common.reports import ReportItemSeverity
+ from pcs.common.reports.item import ReportItem
+-from pcs.lib.cib.node import PacemakerNode
++from pcs.lib.cib.node import PacemakerNode, get_node_names
+ from pcs.lib.cib.resource import remote_node, guest_node
+ from pcs.lib.corosync.config_facade import ConfigFacade as CorosyncConfigFacade
+ from pcs.lib.corosync.node import CorosyncNode
+@@ -28,6 +29,10 @@ def get_existing_nodes_names(
+     )
+ 
+ 
++def get_pacemaker_node_names(cib: _Element) -> Set[str]:
++    return get_node_names(cib) | set(get_existing_nodes_names(None, cib)[0])
++
++
+ def get_existing_nodes_names_addrs(
+     corosync_conf=None, cib=None, error_on_missing_name=False
+ ):
+diff --git a/pcs_test/tier0/common/reports/test_messages.py b/pcs_test/tier0/common/reports/test_messages.py
+index c85aaa9c..4a7b4945 100644
+--- a/pcs_test/tier0/common/reports/test_messages.py
++++ b/pcs_test/tier0/common/reports/test_messages.py
+@@ -4515,6 +4515,18 @@ class ResourceMoveConstraintRemoved(NameBuildTest):
+         )
+ 
+ 
++class ResourceMoveNotAffectingResource(NameBuildTest):
++    def test_success(self):
++        self.assert_message_from_report(
++            (
++                "Unable to move resource 'R1' using a location constraint. "
++                "Current location of the resource may be affected by some "
++                "other constraint."
++            ),
++            reports.ResourceMoveNotAffectingResource("R1"),
++        )
++
++
+ class ResourceMoveAffectsOtherResources(NameBuildTest):
+     def test_multiple(self):
+         self.assert_message_from_report(
+diff --git a/pcs_test/tier0/lib/commands/resource/test_resource_move_autoclean.py b/pcs_test/tier0/lib/commands/resource/test_resource_move_autoclean.py
+index 32d758de..1bd4ee82 100644
+--- a/pcs_test/tier0/lib/commands/resource/test_resource_move_autoclean.py
++++ b/pcs_test/tier0/lib/commands/resource/test_resource_move_autoclean.py
+@@ -20,6 +20,25 @@ from pcs_test.tools.command_env import get_env_tools
+ from pcs_test.tools.misc import get_test_resource as rc
+ 
+ 
++def _node_fixture(name, node_id):
++    return f'<node id="{node_id}" uname="{name}"/>'
++
++
++def _node_list_fixture(nodes):
++    return "\n".join(
++        _node_fixture(node_name, node_id)
++        for node_id, node_name in enumerate(nodes)
++    )
++
++
++def _nodes_section_fixture(content):
++    return f"""
++    <nodes>
++    {content}
++    </nodes>
++    """
++
++
+ def _rsc_primitive_fixture(res_id):
+     return f'<primitive id="{res_id}"/>'
+ 
+@@ -145,11 +164,17 @@ class MoveAutocleanSuccess(MoveAutocleanCommonSetup):
+             resources=_resources_tag(
+                 _resource_primitive + _resource_promotable_clone
+             ),
++            nodes=_nodes_section_fixture(
++                _node_list_fixture([self.orig_node, self.new_node])
++            ),
+         )
+         self.orig_cib = etree_to_str(
+             xml_fromstring(self.config.calls.get(config_load_cib_name).stdout)
+         )
+         self.cib_with_constraint = '<updated_cib with_constraint="True"/>'
++        self.cib_without_constraint = (
++            '<cib with_constraint="False" updated="True"/>'
++        )
+         self.cib_simulate_constraint = (
+             '<cib simulate="True" with_constraint="True"/>'
+         )
+@@ -160,6 +185,9 @@ class MoveAutocleanSuccess(MoveAutocleanCommonSetup):
+         self.cib_diff_add_constraint_updated_tmp_file_name = (
+             "cib_diff_add_constraint_updated"
+         )
++        self.cib_constraint_removed_by_unmove_file_name = (
++            "cib_constraint_removed_by_unmove"
++        )
+         self.cib_diff_remove_constraint_orig_tmp_file_name = (
+             "cib_diff_remove_constraint_orig"
+         )
+@@ -220,13 +248,18 @@ class MoveAutocleanSuccess(MoveAutocleanCommonSetup):
+                 self.cib_diff_add_constraint_updated_tmp_file_name,
+                 orig_content=self.cib_with_constraint,
+             ),
++            TmpFileCall(
++                self.cib_constraint_removed_by_unmove_file_name,
++                orig_content=self.cib_with_constraint,
++                new_content=self.cib_without_constraint,
++            ),
+             TmpFileCall(
+                 self.cib_diff_remove_constraint_orig_tmp_file_name,
+                 orig_content=self.cib_with_constraint,
+             ),
+             TmpFileCall(
+                 self.cib_diff_remove_constraint_updated_tmp_file_name,
+-                orig_content=self.orig_cib,
++                orig_content=self.cib_without_constraint,
+             ),
+             TmpFileCall(
+                 self.simulated_cib_add_constraint_tmp_file_name,
+@@ -296,6 +329,12 @@ class MoveAutocleanSuccess(MoveAutocleanCommonSetup):
+             stdout=self.cib_diff_add_constraint,
+             name="runner.cib.diff.add_constraint",
+         )
++        self.config.runner.pcmk.resource_clear(
++            resource=resource_id,
++            master=is_promotable,
++            node=self.new_node if with_node else None,
++            env=dict(CIB_file=self.cib_constraint_removed_by_unmove_file_name),
++        )
+         self.config.runner.cib.diff(
+             self.cib_diff_remove_constraint_orig_tmp_file_name,
+             self.cib_diff_remove_constraint_updated_tmp_file_name,
+@@ -308,6 +347,13 @@ class MoveAutocleanSuccess(MoveAutocleanCommonSetup):
+             cib_xml=self.cib_with_constraint,
+             name="pcmk.simulate.rsc.move",
+         )
++        self.config.runner.pcmk.load_state(
++            resources=status_after,
++            name="runner.pcmk.load_state.mid_simulation",
++            env=dict(
++                CIB_file=self.cib_apply_diff_remove_constraint_from_simulated_cib_tmp_file_name
++            ),
++        )
+         self.config.runner.cib.push_diff(
+             cib_diff=self.cib_diff_remove_constraint,
+             name="pcmk.push_cib_diff.simulation.remove_constraint",
+@@ -335,6 +381,13 @@ class MoveAutocleanSuccess(MoveAutocleanCommonSetup):
+             self.cib_with_constraint,
+             name="load_cib_after_move",
+         )
++        self.config.runner.pcmk.load_state(
++            resources=status_after,
++            name="runner.pcmk.load_state.after_push",
++            env=dict(
++                CIB_file=self.cib_apply_diff_remove_constraint_after_push_tmp_file_name
++            ),
++        )
+         self.config.runner.cib.push_diff(
+             cib_diff=self.cib_diff_remove_constraint,
+             name="pcmk.push_cib_diff.simulation.remove_constraint_after_move",
+@@ -380,6 +433,11 @@ class MoveAutocleanSuccess(MoveAutocleanCommonSetup):
+                 file_path=self.cib_diff_add_constraint_updated_tmp_file_name,
+                 content=self.cib_with_constraint,
+             ),
++            fixture.debug(
++                reports.codes.TMP_FILE_WRITE,
++                file_path=self.cib_constraint_removed_by_unmove_file_name,
++                content=self.cib_with_constraint,
++            ),
+             fixture.debug(
+                 reports.codes.TMP_FILE_WRITE,
+                 file_path=self.cib_diff_remove_constraint_orig_tmp_file_name,
+@@ -388,7 +446,7 @@ class MoveAutocleanSuccess(MoveAutocleanCommonSetup):
+             fixture.debug(
+                 reports.codes.TMP_FILE_WRITE,
+                 file_path=self.cib_diff_remove_constraint_updated_tmp_file_name,
+-                content=self.orig_cib,
++                content=self.cib_without_constraint,
+             ),
+             fixture.debug(
+                 reports.codes.TMP_FILE_WRITE,
+@@ -758,9 +816,7 @@ class MoveAutocleanValidations(MoveAutocleanCommonSetup):
+             resources=_state_resource_fixture(resource_id, "Stopped"),
+         )
+         self.env_assist.assert_raise_library_error(
+-            lambda: move_autoclean(
+-                self.env_assist.get_env(), resource_id, node="node"
+-            ),
++            lambda: move_autoclean(self.env_assist.get_env(), resource_id),
+             [
+                 fixture.error(
+                     reports.codes.CANNOT_MOVE_RESOURCE_NOT_RUNNING,
+@@ -770,11 +826,33 @@ class MoveAutocleanValidations(MoveAutocleanCommonSetup):
+             expected_in_processor=False,
+         )
+ 
++    def test_node_not_found(self):
++        resource_id = "A"
++        node = "non_existing_node"
++        self.config.runner.cib.load(
++            resources=_resources_tag(_rsc_primitive_fixture(resource_id)),
++        )
++        self.env_assist.assert_raise_library_error(
++            lambda: move_autoclean(
++                self.env_assist.get_env(), resource_id, node
++            ),
++        )
++        self.env_assist.assert_reports(
++            [
++                fixture.error(
++                    reports.codes.NODE_NOT_FOUND,
++                    node=node,
++                    searched_types=[],
++                )
++            ],
++        )
++
+     def test_constraint_already_exist(self):
+         resource_id = "A"
+         config_load_cib_name = "load_cib"
+         node = "node1"
+         cib_with_constraint = '<cib with_constraint="True"/>'
++        cib_without_constraint = '<cib with_constraint="False" updated="True"/>'
+         cib_rsc_move_tmp_file_name = "cib_rsc_move_tmp_file"
+         cib_diff_add_constraint_orig_tmp_file_name = (
+             "cib_diff_add_constraint_orig"
+@@ -788,6 +866,9 @@ class MoveAutocleanValidations(MoveAutocleanCommonSetup):
+         cib_diff_remove_constraint_updated_tmp_file_name = (
+             "cib_diff_remove_constraint_updated"
+         )
++        cib_constraint_removed_by_unmove_file_name = (
++            "cib_constraint_removed_by_unmove"
++        )
+         self.config.runner.cib.load(
+             resources=_resources_tag(_rsc_primitive_fixture(resource_id)),
+             constraints=f"""
+@@ -795,6 +876,7 @@ class MoveAutocleanValidations(MoveAutocleanCommonSetup):
+                   <rsc_location id="prefer-{resource_id}" rsc="{resource_id}" role="Started" node="{node}" score="INFINITY"/>
+               </constraints>
+             """,
++            nodes=_nodes_section_fixture(_node_list_fixture([node])),
+             name=config_load_cib_name,
+         )
+         orig_cib = etree_to_str(
+@@ -815,13 +897,18 @@ class MoveAutocleanValidations(MoveAutocleanCommonSetup):
+                     cib_diff_add_constraint_updated_tmp_file_name,
+                     orig_content=cib_with_constraint,
+                 ),
++                TmpFileCall(
++                    cib_constraint_removed_by_unmove_file_name,
++                    orig_content=cib_with_constraint,
++                    new_content=cib_without_constraint,
++                ),
+                 TmpFileCall(
+                     cib_diff_remove_constraint_orig_tmp_file_name,
+                     orig_content=cib_with_constraint,
+                 ),
+                 TmpFileCall(
+                     cib_diff_remove_constraint_updated_tmp_file_name,
+-                    orig_content=orig_cib,
++                    orig_content=cib_without_constraint,
+                 ),
+             ]
+         )
+@@ -839,6 +926,11 @@ class MoveAutocleanValidations(MoveAutocleanCommonSetup):
+             stdout="",
+             name="runner.cib.diff.add_constraint",
+         )
++        self.config.runner.pcmk.resource_clear(
++            resource=resource_id,
++            node=node,
++            env=dict(CIB_file=cib_constraint_removed_by_unmove_file_name),
++        )
+         self.config.runner.cib.diff(
+             cib_diff_remove_constraint_orig_tmp_file_name,
+             cib_diff_remove_constraint_updated_tmp_file_name,
+@@ -863,6 +955,11 @@ class MoveAutocleanValidations(MoveAutocleanCommonSetup):
+                     file_path=cib_diff_add_constraint_updated_tmp_file_name,
+                     content=cib_with_constraint,
+                 ),
++                fixture.debug(
++                    reports.codes.TMP_FILE_WRITE,
++                    file_path=cib_constraint_removed_by_unmove_file_name,
++                    content=cib_with_constraint,
++                ),
+                 fixture.debug(
+                     reports.codes.TMP_FILE_WRITE,
+                     file_path=cib_diff_remove_constraint_orig_tmp_file_name,
+@@ -871,7 +968,7 @@ class MoveAutocleanValidations(MoveAutocleanCommonSetup):
+                 fixture.debug(
+                     reports.codes.TMP_FILE_WRITE,
+                     file_path=cib_diff_remove_constraint_updated_tmp_file_name,
+-                    content=orig_cib,
++                    content=cib_without_constraint,
+                 ),
+                 fixture.info(
+                     reports.codes.NO_ACTION_NECESSARY,
+@@ -896,6 +993,9 @@ class MoveAutocleanFailures(MoveAutocleanCommonSetup):
+         self.cib_diff_add_constraint = "diff_add_constraint"
+         self.cib_diff_remove_constraint = "diff_remove_constraint"
+         self.cib_with_constraint = '<cib with_constraint="True"/>'
++        self.cib_without_constraint = (
++            '<cib with_constraint="False" updated="True"/>'
++        )
+         self.cib_rsc_move_tmp_file_name = "cib_rsc_move_tmp_file"
+         self.cib_diff_add_constraint_orig_tmp_file_name = (
+             "cib_diff_add_constraint_orig"
+@@ -903,6 +1003,9 @@ class MoveAutocleanFailures(MoveAutocleanCommonSetup):
+         self.cib_diff_add_constraint_updated_tmp_file_name = (
+             "cib_diff_add_constraint_updated"
+         )
++        self.cib_constraint_removed_by_unmove_file_name = (
++            "cib_constraint_removed_by_unmove"
++        )
+         self.cib_diff_remove_constraint_orig_tmp_file_name = (
+             "cib_diff_remove_constraint_orig"
+         )
+@@ -951,6 +1054,9 @@ class MoveAutocleanFailures(MoveAutocleanCommonSetup):
+ 
+         self.config.runner.cib.load(
+             resources=_resources_tag(_rsc_primitive_fixture(self.resource_id)),
++            nodes=_nodes_section_fixture(
++                _node_list_fixture(["node1", "node2"])
++            ),
+             name=self.config_load_cib_name,
+         )
+         self.orig_cib = etree_to_str(
+@@ -979,13 +1085,18 @@ class MoveAutocleanFailures(MoveAutocleanCommonSetup):
+                 self.cib_diff_add_constraint_updated_tmp_file_name,
+                 orig_content=self.cib_with_constraint,
+             ),
++            TmpFileCall(
++                self.cib_constraint_removed_by_unmove_file_name,
++                orig_content=self.cib_with_constraint,
++                new_content=self.cib_without_constraint,
++            ),
+             TmpFileCall(
+                 self.cib_diff_remove_constraint_orig_tmp_file_name,
+                 orig_content=self.cib_with_constraint,
+             ),
+             TmpFileCall(
+                 self.cib_diff_remove_constraint_updated_tmp_file_name,
+-                orig_content=self.orig_cib,
++                orig_content=self.cib_without_constraint,
+             ),
+             TmpFileCall(
+                 self.simulated_cib_add_constraint_tmp_file_name,
+@@ -1067,6 +1178,11 @@ class MoveAutocleanFailures(MoveAutocleanCommonSetup):
+             stdout=self.cib_diff_add_constraint,
+             name="runner.cib.diff.add_constraint",
+         )
++        self.config.runner.pcmk.resource_clear(
++            resource=self.resource_id,
++            node=node,
++            env=dict(CIB_file=self.cib_constraint_removed_by_unmove_file_name),
++        )
+         self.config.runner.cib.diff(
+             self.cib_diff_remove_constraint_orig_tmp_file_name,
+             self.cib_diff_remove_constraint_updated_tmp_file_name,
+@@ -1081,6 +1197,15 @@ class MoveAutocleanFailures(MoveAutocleanCommonSetup):
+         )
+         if stage <= 1:
+             return
++        self.config.runner.pcmk.load_state(
++            resources=_state_resource_fixture(
++                self.resource_id, "Started", node if node else "node2"
++            ),
++            name="runner.pcmk.load_state.mid_simulation",
++            env=dict(
++                CIB_file=self.cib_apply_diff_remove_constraint_from_simulated_cib_tmp_file_name
++            ),
++        )
+         self.config.runner.cib.push_diff(
+             cib_diff=self.cib_diff_remove_constraint,
+             name="pcmk.push_cib_diff.simulation.remove_constraint",
+@@ -1110,6 +1235,17 @@ class MoveAutocleanFailures(MoveAutocleanCommonSetup):
+             self.cib_with_constraint,
+             name="load_cib_after_move",
+         )
++        if stage <= 3:
++            return
++        self.config.runner.pcmk.load_state(
++            resources=_state_resource_fixture(
++                self.resource_id, "Started", node if node else "node2"
++            ),
++            name="runner.pcmk.load_state.after_push",
++            env=dict(
++                CIB_file=self.cib_apply_diff_remove_constraint_after_push_tmp_file_name
++            ),
++        )
+         self.config.runner.cib.push_diff(
+             cib_diff=self.cib_diff_remove_constraint,
+             name="pcmk.push_cib_diff.simulation.remove_constraint_after_move",
+@@ -1126,7 +1262,7 @@ class MoveAutocleanFailures(MoveAutocleanCommonSetup):
+             ),
+             name="pcmk.simulate.rsc.unmove.after_push",
+         )
+-        if stage <= 3:
++        if stage <= 4:
+             return
+         self.config.runner.cib.push_diff(
+             cib_diff=self.cib_diff_remove_constraint,
+@@ -1153,6 +1289,11 @@ class MoveAutocleanFailures(MoveAutocleanCommonSetup):
+                 file_path=self.cib_diff_add_constraint_updated_tmp_file_name,
+                 content=self.cib_with_constraint,
+             ),
++            fixture.debug(
++                reports.codes.TMP_FILE_WRITE,
++                file_path=self.cib_constraint_removed_by_unmove_file_name,
++                content=self.cib_with_constraint,
++            ),
+             fixture.debug(
+                 reports.codes.TMP_FILE_WRITE,
+                 file_path=self.cib_diff_remove_constraint_orig_tmp_file_name,
+@@ -1161,7 +1302,7 @@ class MoveAutocleanFailures(MoveAutocleanCommonSetup):
+             fixture.debug(
+                 reports.codes.TMP_FILE_WRITE,
+                 file_path=self.cib_diff_remove_constraint_updated_tmp_file_name,
+-                content=self.orig_cib,
++                content=self.cib_without_constraint,
+             ),
+             fixture.debug(
+                 reports.codes.TMP_FILE_WRITE,
+@@ -1199,7 +1340,7 @@ class MoveAutocleanFailures(MoveAutocleanCommonSetup):
+                 reports.codes.WAIT_FOR_IDLE_STARTED,
+                 timeout=0,
+             ),
+-        ][: {None: None, 3: -2, 2: 7, 1: 5}[stage]]
++        ][: {None: None, 4: -2, 3: 10, 2: 8, 1: 6}[stage]]
+ 
+     def test_move_affects_other_resources_strict(self):
+         self.tmp_file_mock_obj.set_calls(
+@@ -1304,7 +1445,8 @@ class MoveAutocleanFailures(MoveAutocleanCommonSetup):
+                 ),
+             )
+         )
+-        self.set_up_testing_env(stage=3)
++        setup_stage = 4
++        self.set_up_testing_env(stage=setup_stage)
+         self.env_assist.assert_raise_library_error(
+             lambda: move_autoclean(self.env_assist.get_env(), self.resource_id),
+             [
+@@ -1316,7 +1458,7 @@ class MoveAutocleanFailures(MoveAutocleanCommonSetup):
+             ],
+             expected_in_processor=False,
+         )
+-        self.env_assist.assert_reports(self.get_reports(stage=3))
++        self.env_assist.assert_reports(self.get_reports(stage=setup_stage))
+ 
+     def test_unmove_after_push_affects_other_resources_strict(self):
+         self.tmp_file_mock_obj.set_calls(
+@@ -1330,7 +1472,8 @@ class MoveAutocleanFailures(MoveAutocleanCommonSetup):
+                 ),
+             )
+         )
+-        self.set_up_testing_env(stage=3)
++        setup_stage = 4
++        self.set_up_testing_env(stage=setup_stage)
+         self.env_assist.assert_raise_library_error(
+             lambda: move_autoclean(
+                 self.env_assist.get_env(),
+@@ -1346,7 +1489,7 @@ class MoveAutocleanFailures(MoveAutocleanCommonSetup):
+             ],
+             expected_in_processor=False,
+         )
+-        self.env_assist.assert_reports(self.get_reports(stage=3))
++        self.env_assist.assert_reports(self.get_reports(stage=setup_stage))
+ 
+     def test_resource_not_runnig_after_move(self):
+         self.tmp_file_mock_obj.set_calls(
+@@ -1381,8 +1524,113 @@ class MoveAutocleanFailures(MoveAutocleanCommonSetup):
+             ]
+         )
+ 
++    def test_simulation_resource_not_moved(self):
++        node = "node2"
++        different_node = f"different-{node}"
++        setup_stage = 1
++        self.tmp_file_mock_obj.set_calls(
++            self.get_tmp_files_mocks(
++                _simulation_transition_fixture(
++                    _simulation_synapses_fixture(self.resource_id)
++                ),
++            )
++            + [
++                TmpFileCall(
++                    self.cib_apply_diff_remove_constraint_from_simulated_cib_tmp_file_name,
++                    orig_content=self.cib_simulate_constraint,
++                ),
++            ]
++        )
++        self.set_up_testing_env(node=node, stage=setup_stage)
++        self.config.runner.pcmk.load_state(
++            resources=_state_resource_fixture(
++                self.resource_id, "Started", different_node
++            ),
++            name="runner.pcmk.load_state.final",
++            env=dict(
++                CIB_file=self.cib_apply_diff_remove_constraint_from_simulated_cib_tmp_file_name
++            ),
++        )
++        self.env_assist.assert_raise_library_error(
++            lambda: move_autoclean(
++                self.env_assist.get_env(),
++                self.resource_id,
++                node=node,
++            ),
++            [
++                fixture.error(
++                    reports.codes.RESOURCE_MOVE_NOT_AFFECTING_RESOURCE,
++                    resource_id=self.resource_id,
++                )
++            ],
++            expected_in_processor=False,
++        )
++        self.env_assist.assert_reports(
++            self.get_reports(stage=setup_stage)
++            + [
++                fixture.debug(
++                    reports.codes.TMP_FILE_WRITE,
++                    file_path=self.cib_apply_diff_remove_constraint_from_simulated_cib_tmp_file_name,
++                    content=self.cib_simulate_constraint,
++                ),
++            ]
++        )
++
++    def test_after_push_resource_not_moved(self):
++        node = "node2"
++        different_node = f"different-{node}"
++        setup_stage = 3
++        self.tmp_file_mock_obj.set_calls(
++            self.get_tmp_files_mocks(
++                _simulation_transition_fixture(
++                    _simulation_synapses_fixture(self.resource_id)
++                ),
++                _simulation_transition_fixture(),
++            )
++            + [
++                TmpFileCall(
++                    self.cib_apply_diff_remove_constraint_after_push_tmp_file_name,
++                    orig_content=self.cib_with_constraint,
++                ),
++            ]
++        )
++        self.set_up_testing_env(node=node, stage=setup_stage)
++        self.config.runner.pcmk.load_state(
++            resources=_state_resource_fixture(
++                self.resource_id, "Started", different_node
++            ),
++            name="runner.pcmk.load_state.final",
++            env=dict(
++                CIB_file=self.cib_apply_diff_remove_constraint_after_push_tmp_file_name,
++            ),
++        )
++        self.env_assist.assert_raise_library_error(
++            lambda: move_autoclean(
++                self.env_assist.get_env(),
++                self.resource_id,
++                node=node,
++            ),
++            [
++                fixture.error(
++                    reports.codes.RESOURCE_MOVE_NOT_AFFECTING_RESOURCE,
++                    resource_id=self.resource_id,
++                )
++            ],
++            expected_in_processor=False,
++        )
++        self.env_assist.assert_reports(
++            self.get_reports(stage=setup_stage)
++            + [
++                fixture.debug(
++                    reports.codes.TMP_FILE_WRITE,
++                    file_path=self.cib_apply_diff_remove_constraint_after_push_tmp_file_name,
++                    content=self.cib_with_constraint,
++                ),
++            ]
++        )
++
+     def test_resource_running_on_a_different_node(self):
+-        node = "node1"
++        node = "node2"
+         different_node = f"different-{node}"
+         self.tmp_file_mock_obj.set_calls(
+             self.get_tmp_files_mocks(
+diff --git a/pcs_test/tier0/lib/commands/resource/test_resource_move_ban.py b/pcs_test/tier0/lib/commands/resource/test_resource_move_ban.py
+index 5d57fa06..28dd1cd1 100644
+--- a/pcs_test/tier0/lib/commands/resource/test_resource_move_ban.py
++++ b/pcs_test/tier0/lib/commands/resource/test_resource_move_ban.py
+@@ -10,6 +10,29 @@ from pcs.common.reports import ReportItemSeverity as severities
+ from pcs.common.reports import codes as report_codes
+ from pcs.lib.commands import resource
+ 
++
++def _node_fixture(name, node_id):
++    return f'<node id="{node_id}" uname="{name}"/>'
++
++
++def _node_list_fixture(nodes):
++    return "\n".join(
++        _node_fixture(node_name, node_id)
++        for node_id, node_name in enumerate(nodes)
++    )
++
++
++def _nodes_section_fixture(content):
++    return f"""
++    <nodes>
++    {content}
++    </nodes>
++    """
++
++
++nodes_section = _nodes_section_fixture(
++    _node_list_fixture(["node", "node1", "node2"])
++)
+ resources_primitive = """
+     <resources>
+         <primitive id="A" />
+@@ -128,8 +151,24 @@ class MoveBanBaseMixin(MoveBanClearBaseMixin):
+             expected_in_processor=False,
+         )
+ 
++    def test_node_not_found(self):
++        self.config.runner.cib.load(resources=resources_primitive)
++        node = "node"
++        self.env_assist.assert_raise_library_error(
++            lambda: self.lib_action(self.env_assist.get_env(), "A", node)
++        )
++        self.env_assist.assert_reports(
++            [
++                fixture.error(
++                    report_codes.NODE_NOT_FOUND, node=node, searched_types=[]
++                )
++            ]
++        )
++
+     def test_all_options(self):
+-        self.config.runner.cib.load(resources=resources_promotable)
++        self.config.runner.cib.load(
++            resources=resources_promotable, nodes=nodes_section
++        )
+         self.config_pcmk_action(
+             resource="A-clone",
+             master=True,
+@@ -274,7 +313,9 @@ class MoveBanWaitMixin:
+     def setUp(self):
+         self.timeout = 10
+         self.env_assist, self.config = get_env_tools(self)
+-        self.config.runner.cib.load(resources=resources_primitive)
++        self.config.runner.cib.load(
++            resources=resources_primitive, nodes=nodes_section
++        )
+ 
+     @mock.patch.object(
+         settings,
+diff --git a/pcs_test/tools/command_env/config_runner_pcmk.py b/pcs_test/tools/command_env/config_runner_pcmk.py
+index e276e03b..213941b8 100644
+--- a/pcs_test/tools/command_env/config_runner_pcmk.py
++++ b/pcs_test/tools/command_env/config_runner_pcmk.py
+@@ -706,6 +706,7 @@ class PcmkShortcuts:
+         stdout="",
+         stderr="",
+         returncode=0,
++        env=None,
+     ):
+         """
+         Create a call for crm_resource --clear
+@@ -722,6 +723,7 @@ class PcmkShortcuts:
+         string stdout -- crm_resource's stdout
+         string stderr -- crm_resource's stderr
+         int returncode -- crm_resource's returncode
++        dict env -- CommandRunner environment variables
+         """
+         # arguments are used via locals()
+         # pylint: disable=unused-argument
+diff --git a/pcs_test/tools/command_env/mock_runner.py b/pcs_test/tools/command_env/mock_runner.py
+index f7871fc2..8520ce02 100644
+--- a/pcs_test/tools/command_env/mock_runner.py
++++ b/pcs_test/tools/command_env/mock_runner.py
+@@ -143,6 +143,6 @@ class Runner:
+             env.update(env_extend)
+         if env != call.env:
+             raise self.__call_queue.error_with_context(
+-                f"ENV doesn't match. Expected: {call.env}; Real: {env}"
++                f"Command #{i}: ENV doesn't match. Expected: {call.env}; Real: {env}"
+             )
+         return call.stdout, call.stderr, call.returncode
+diff --git a/pcs_test/tools/fixture_cib.py b/pcs_test/tools/fixture_cib.py
+index 602491c8..bf02bacc 100644
+--- a/pcs_test/tools/fixture_cib.py
++++ b/pcs_test/tools/fixture_cib.py
+@@ -310,6 +310,7 @@ MODIFIER_GENERATORS = {
+     "replace": replace_all,
+     "append": append_all,
+     "resources": lambda xml: replace_all({"./configuration/resources": xml}),
++    "nodes": lambda xml: replace_all({"./configuration/nodes": xml}),
+     "constraints": lambda xml: replace_all(
+         {"./configuration/constraints": xml}
+     ),
+-- 
+2.31.1
+
diff --git a/SOURCES/bz1991654-01-fix-unfencing-in-pcs-stonith-update-scsi-devices.patch b/SOURCES/bz1991654-01-fix-unfencing-in-pcs-stonith-update-scsi-devices.patch
deleted file mode 100644
index 60b7502..0000000
--- a/SOURCES/bz1991654-01-fix-unfencing-in-pcs-stonith-update-scsi-devices.patch
+++ /dev/null
@@ -1,787 +0,0 @@
-From cf68ded959ad03244c94de308b79fc1af806a474 Mon Sep 17 00:00:00 2001
-From: Ondrej Mular <omular@redhat.com>
-Date: Wed, 15 Sep 2021 07:55:50 +0200
-Subject: [PATCH 1/2] fix unfencing in `pcs stonith update-scsi-devices`
-
-* do not unfence newly added devices on fenced cluster nodes
----
- pcs/common/reports/codes.py                   |   6 ++
- pcs/common/reports/messages.py                |  41 +++++++
- pcs/lib/commands/scsi.py                      |  55 +++++++++-
- pcs/lib/commands/stonith.py                   |  26 +++--
- pcs/lib/communication/scsi.py                 |  40 ++++---
- .../tier0/common/reports/test_messages.py     |  24 +++++
- pcs_test/tier0/lib/commands/test_scsi.py      | 101 ++++++++++++++++--
- .../test_stonith_update_scsi_devices.py       |  87 ++++++++++++---
- .../tools/command_env/config_http_scsi.py     |  16 ++-
- .../tools/command_env/config_runner_scsi.py   |  36 ++++++-
- pcsd/api_v1.rb                                |   2 +-
- pcsd/capabilities.xml                         |   8 +-
- 12 files changed, 387 insertions(+), 55 deletions(-)
-
-diff --git a/pcs/common/reports/codes.py b/pcs/common/reports/codes.py
-index bbd61500..4bee0bac 100644
---- a/pcs/common/reports/codes.py
-+++ b/pcs/common/reports/codes.py
-@@ -468,6 +468,12 @@ STONITH_RESTARTLESS_UPDATE_UNSUPPORTED_AGENT = M(
-     "STONITH_RESTARTLESS_UPDATE_UNSUPPORTED_AGENT"
- )
- STONITH_UNFENCING_FAILED = M("STONITH_UNFENCING_FAILED")
-+STONITH_UNFENCING_DEVICE_STATUS_FAILED = M(
-+    "STONITH_UNFENCING_DEVICE_STATUS_FAILED"
-+)
-+STONITH_UNFENCING_SKIPPED_DEVICES_FENCED = M(
-+    "STONITH_UNFENCING_SKIPPED_DEVICES_FENCED"
-+)
- STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM = M(
-     "STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM"
- )
-diff --git a/pcs/common/reports/messages.py b/pcs/common/reports/messages.py
-index f9688437..be8dd154 100644
---- a/pcs/common/reports/messages.py
-+++ b/pcs/common/reports/messages.py
-@@ -2782,6 +2782,47 @@ class StonithUnfencingFailed(ReportItemMessage):
-         return f"Unfencing failed:\n{self.reason}"
- 
- 
-+@dataclass(frozen=True)
-+class StonithUnfencingDeviceStatusFailed(ReportItemMessage):
-+    """
-+    Unfencing failed on a cluster node.
-+    """
-+
-+    device: str
-+    reason: str
-+
-+    _code = codes.STONITH_UNFENCING_DEVICE_STATUS_FAILED
-+
-+    @property
-+    def message(self) -> str:
-+        return (
-+            "Unfencing failed, unable to check status of device "
-+            f"'{self.device}': {self.reason}"
-+        )
-+
-+
-+@dataclass(frozen=True)
-+class StonithUnfencingSkippedDevicesFenced(ReportItemMessage):
-+    """
-+    Unfencing skipped on a cluster node, because fenced devices were found on
-+    the node.
-+    """
-+
-+    devices: List[str]
-+
-+    _code = codes.STONITH_UNFENCING_SKIPPED_DEVICES_FENCED
-+
-+    @property
-+    def message(self) -> str:
-+        return (
-+            "Unfencing skipped, {device_pl} {devices} {is_pl} fenced"
-+        ).format(
-+            device_pl=format_plural(self.devices, "device"),
-+            devices=format_list(self.devices),
-+            is_pl=format_plural(self.devices, "is", "are"),
-+        )
-+
-+
- @dataclass(frozen=True)
- class StonithRestartlessUpdateUnableToPerform(ReportItemMessage):
-     """
-diff --git a/pcs/lib/commands/scsi.py b/pcs/lib/commands/scsi.py
-index 31a3ef2d..ff20a563 100644
---- a/pcs/lib/commands/scsi.py
-+++ b/pcs/lib/commands/scsi.py
-@@ -8,20 +8,65 @@ from pcs.lib.env import LibraryEnvironment
- from pcs.lib.errors import LibraryError
- 
- 
--def unfence_node(env: LibraryEnvironment, node: str, devices: Iterable[str]):
-+def unfence_node(
-+    env: LibraryEnvironment,
-+    node: str,
-+    original_devices: Iterable[str],
-+    updated_devices: Iterable[str],
-+) -> None:
-     """
--    Unfence scsi devices on a node by calling fence_scsi agent script.
-+    Unfence scsi devices on a node by calling fence_scsi agent script. Only
-+    newly added devices will be unfenced (set(updated_devices) -
-+    set(original_devices)). Before unfencing, original devices are be checked
-+    if any of them are not fenced. If there is a fenced device, unfencing will
-+    be skipped.
- 
-     env -- provides communication with externals
-     node -- node name on wich is unfencing performed
--    devices -- scsi devices to be unfenced
-+    original_devices -- list of devices defined before update
-+    updated_devices -- list of devices defined after update
-     """
-+    devices_to_unfence = set(updated_devices) - set(original_devices)
-+    if not devices_to_unfence:
-+        return
-+    fence_scsi_bin = os.path.join(settings.fence_agent_binaries, "fence_scsi")
-+    fenced_devices = []
-+    for device in original_devices:
-+        stdout, stderr, return_code = env.cmd_runner().run(
-+            [
-+                fence_scsi_bin,
-+                "--action=status",
-+                f"--devices={device}",
-+                f"--plug={node}",
-+            ]
-+        )
-+        if return_code == 2:
-+            fenced_devices.append(device)
-+        elif return_code != 0:
-+            raise LibraryError(
-+                reports.ReportItem.error(
-+                    reports.messages.StonithUnfencingDeviceStatusFailed(
-+                        device, join_multilines([stderr, stdout])
-+                    )
-+                )
-+            )
-+    if fenced_devices:
-+        # At least one of existing devices is off, which means the node has
-+        # been fenced and new devices should not be unfenced.
-+        env.report_processor.report(
-+            reports.ReportItem.info(
-+                reports.messages.StonithUnfencingSkippedDevicesFenced(
-+                    fenced_devices
-+                )
-+            )
-+        )
-+        return
-     stdout, stderr, return_code = env.cmd_runner().run(
-         [
--            os.path.join(settings.fence_agent_binaries, "fence_scsi"),
-+            fence_scsi_bin,
-             "--action=on",
-             "--devices",
--            ",".join(sorted(devices)),
-+            ",".join(sorted(devices_to_unfence)),
-             f"--plug={node}",
-         ],
-     )
-diff --git a/pcs/lib/commands/stonith.py b/pcs/lib/commands/stonith.py
-index 6f26e7d3..0dcf44f2 100644
---- a/pcs/lib/commands/stonith.py
-+++ b/pcs/lib/commands/stonith.py
-@@ -453,7 +453,8 @@ def _update_scsi_devices_get_element_and_devices(
- 
- def _unfencing_scsi_devices(
-     env: LibraryEnvironment,
--    device_list: Iterable[str],
-+    original_devices: Iterable[str],
-+    updated_devices: Iterable[str],
-     force_flags: Container[reports.types.ForceCode] = (),
- ) -> None:
-     """
-@@ -461,9 +462,13 @@ def _unfencing_scsi_devices(
-     to pcsd and corosync is running.
- 
-     env -- provides all for communication with externals
--    device_list -- devices to be unfenced
-+    original_devices -- devices before update
-+    updated_devices -- devices after update
-     force_flags -- list of flags codes
-     """
-+    devices_to_unfence = set(updated_devices) - set(original_devices)
-+    if not devices_to_unfence:
-+        return
-     cluster_nodes_names, nodes_report_list = get_existing_nodes_names(
-         env.get_corosync_conf(),
-         error_on_missing_name=True,
-@@ -487,7 +492,11 @@ def _unfencing_scsi_devices(
-     online_corosync_target_list = run_and_raise(
-         env.get_node_communicator(), com_cmd
-     )
--    com_cmd = Unfence(env.report_processor, sorted(device_list))
-+    com_cmd = Unfence(
-+        env.report_processor,
-+        original_devices=sorted(original_devices),
-+        updated_devices=sorted(updated_devices),
-+    )
-     com_cmd.set_targets(online_corosync_target_list)
-     run_and_raise(env.get_node_communicator(), com_cmd)
- 
-@@ -531,9 +540,9 @@ def update_scsi_devices(
-         IdProvider(stonith_el),
-         set_device_list,
-     )
--    devices_for_unfencing = set(set_device_list).difference(current_device_list)
--    if devices_for_unfencing:
--        _unfencing_scsi_devices(env, devices_for_unfencing, force_flags)
-+    _unfencing_scsi_devices(
-+        env, current_device_list, set_device_list, force_flags
-+    )
-     env.push_cib()
- 
- 
-@@ -585,6 +594,7 @@ def update_scsi_devices_add_remove(
-         IdProvider(stonith_el),
-         updated_device_set,
-     )
--    if add_device_list:
--        _unfencing_scsi_devices(env, add_device_list, force_flags)
-+    _unfencing_scsi_devices(
-+        env, current_device_list, updated_device_set, force_flags
-+    )
-     env.push_cib()
-diff --git a/pcs/lib/communication/scsi.py b/pcs/lib/communication/scsi.py
-index 7b272017..250d67aa 100644
---- a/pcs/lib/communication/scsi.py
-+++ b/pcs/lib/communication/scsi.py
-@@ -1,4 +1,5 @@
- import json
-+from typing import Iterable
- 
- from dacite import DaciteError
- 
-@@ -26,9 +27,15 @@ class Unfence(
-     MarkSuccessfulMixin,
-     RunRemotelyBase,
- ):
--    def __init__(self, report_processor, devices):
-+    def __init__(
-+        self,
-+        report_processor: reports.ReportProcessor,
-+        original_devices: Iterable[str],
-+        updated_devices: Iterable[str],
-+    ) -> None:
-         super().__init__(report_processor)
--        self._devices = devices
-+        self._original_devices = original_devices
-+        self._updated_devices = updated_devices
- 
-     def _get_request_data(self):
-         return None
-@@ -38,9 +45,13 @@ class Unfence(
-             Request(
-                 target,
-                 RequestData(
--                    "api/v1/scsi-unfence-node/v1",
-+                    "api/v1/scsi-unfence-node/v2",
-                     data=json.dumps(
--                        {"devices": self._devices, "node": target.label}
-+                        dict(
-+                            node=target.label,
-+                            original_devices=self._original_devices,
-+                            updated_devices=self._updated_devices,
-+                        )
-                     ),
-                 ),
-             )
-@@ -48,7 +59,9 @@ class Unfence(
-         ]
- 
-     def _process_response(self, response):
--        report_item = response_to_report_item(response)
-+        report_item = response_to_report_item(
-+            response, report_pcsd_too_old_on_404=True
-+        )
-         if report_item:
-             self._report(report_item)
-             return
-@@ -57,15 +70,14 @@ class Unfence(
-             result = from_dict(
-                 InternalCommunicationResultDto, json.loads(response.data)
-             )
--            if result.status != const.COM_STATUS_SUCCESS:
--                context = reports.ReportItemContext(node_label)
--                self._report_list(
--                    [
--                        reports.report_dto_to_item(report, context)
--                        for report in result.report_list
--                    ]
--                )
--            else:
-+            context = reports.ReportItemContext(node_label)
-+            self._report_list(
-+                [
-+                    reports.report_dto_to_item(report, context)
-+                    for report in result.report_list
-+                ]
-+            )
-+            if result.status == const.COM_STATUS_SUCCESS:
-                 self._on_success()
- 
-         except (json.JSONDecodeError, DaciteError):
-diff --git a/pcs_test/tier0/common/reports/test_messages.py b/pcs_test/tier0/common/reports/test_messages.py
-index b0826cfd..05c3f619 100644
---- a/pcs_test/tier0/common/reports/test_messages.py
-+++ b/pcs_test/tier0/common/reports/test_messages.py
-@@ -1904,6 +1904,30 @@ class StonithUnfencingFailed(NameBuildTest):
-         )
- 
- 
-+class StonithUnfencingDeviceStatusFailed(NameBuildTest):
-+    def test_build_message(self):
-+        self.assert_message_from_report(
-+            "Unfencing failed, unable to check status of device 'dev1': reason",
-+            reports.StonithUnfencingDeviceStatusFailed("dev1", "reason"),
-+        )
-+
-+
-+class StonithUnfencingSkippedDevicesFenced(NameBuildTest):
-+    def test_one_device(self):
-+        self.assert_message_from_report(
-+            "Unfencing skipped, device 'dev1' is fenced",
-+            reports.StonithUnfencingSkippedDevicesFenced(["dev1"]),
-+        )
-+
-+    def test_multiple_devices(self):
-+        self.assert_message_from_report(
-+            "Unfencing skipped, devices 'dev1', 'dev2', 'dev3' are fenced",
-+            reports.StonithUnfencingSkippedDevicesFenced(
-+                ["dev2", "dev1", "dev3"]
-+            ),
-+        )
-+
-+
- class StonithRestartlessUpdateUnableToPerform(NameBuildTest):
-     def test_build_message(self):
-         self.assert_message_from_report(
-diff --git a/pcs_test/tier0/lib/commands/test_scsi.py b/pcs_test/tier0/lib/commands/test_scsi.py
-index de75743f..8ef9836a 100644
---- a/pcs_test/tier0/lib/commands/test_scsi.py
-+++ b/pcs_test/tier0/lib/commands/test_scsi.py
-@@ -10,26 +10,113 @@ from pcs.lib.commands import scsi
- class TestUnfenceNode(TestCase):
-     def setUp(self):
-         self.env_assist, self.config = get_env_tools(self)
-+        self.old_devices = ["device1", "device3"]
-+        self.new_devices = ["device3", "device0", "device2"]
-+        self.added_devices = set(self.new_devices) - set(self.old_devices)
-+        self.node = "node1"
- 
--    def test_success(self):
--        self.config.runner.scsi.unfence_node("node1", ["/dev/sda", "/dev/sdb"])
-+    def test_success_devices_to_unfence(self):
-+        for old_dev in self.old_devices:
-+            self.config.runner.scsi.get_status(
-+                self.node, old_dev, name=f"runner.scsi.is_fenced.{old_dev}"
-+            )
-+        self.config.runner.scsi.unfence_node(self.node, self.added_devices)
-         scsi.unfence_node(
--            self.env_assist.get_env(), "node1", ["/dev/sdb", "/dev/sda"]
-+            self.env_assist.get_env(),
-+            self.node,
-+            self.old_devices,
-+            self.new_devices,
-         )
-         self.env_assist.assert_reports([])
- 
--    def test_failure(self):
-+    def test_success_no_devices_to_unfence(self):
-+        scsi.unfence_node(
-+            self.env_assist.get_env(),
-+            self.node,
-+            {"device1", "device2", "device3"},
-+            {"device3"},
-+        )
-+        self.env_assist.assert_reports([])
-+
-+    def test_unfencing_failure(self):
-+        err_msg = "stderr"
-+        for old_dev in self.old_devices:
-+            self.config.runner.scsi.get_status(
-+                self.node, old_dev, name=f"runner.scsi.is_fenced.{old_dev}"
-+            )
-         self.config.runner.scsi.unfence_node(
--            "node1", ["/dev/sda", "/dev/sdb"], stderr="stderr", return_code=1
-+            self.node, self.added_devices, stderr=err_msg, return_code=1
-         )
-         self.env_assist.assert_raise_library_error(
-             lambda: scsi.unfence_node(
--                self.env_assist.get_env(), "node1", ["/dev/sdb", "/dev/sda"]
-+                self.env_assist.get_env(),
-+                self.node,
-+                self.old_devices,
-+                self.new_devices,
-             ),
-             [
-                 fixture.error(
--                    report_codes.STONITH_UNFENCING_FAILED, reason="stderr"
-+                    report_codes.STONITH_UNFENCING_FAILED, reason=err_msg
-                 )
-             ],
-             expected_in_processor=False,
-         )
-+
-+    def test_device_status_failed(self):
-+        err_msg = "stderr"
-+        new_devices = ["device1", "device2", "device3", "device4"]
-+        old_devices = new_devices[:-1]
-+        ok_devices = new_devices[0:2]
-+        err_device = new_devices[2]
-+        for dev in ok_devices:
-+            self.config.runner.scsi.get_status(
-+                self.node, dev, name=f"runner.scsi.is_fenced.{dev}"
-+            )
-+        self.config.runner.scsi.get_status(
-+            self.node,
-+            err_device,
-+            name=f"runner.scsi.is_fenced.{err_device}",
-+            stderr=err_msg,
-+            return_code=1,
-+        )
-+        self.env_assist.assert_raise_library_error(
-+            lambda: scsi.unfence_node(
-+                self.env_assist.get_env(),
-+                self.node,
-+                old_devices,
-+                new_devices,
-+            ),
-+            [
-+                fixture.error(
-+                    report_codes.STONITH_UNFENCING_DEVICE_STATUS_FAILED,
-+                    device=err_device,
-+                    reason=err_msg,
-+                )
-+            ],
-+            expected_in_processor=False,
-+        )
-+
-+    def test_unfencing_skipped_devices_are_fenced(self):
-+        stdout_off = "Status: OFF"
-+        for old_dev in self.old_devices:
-+            self.config.runner.scsi.get_status(
-+                self.node,
-+                old_dev,
-+                name=f"runner.scsi.is_fenced.{old_dev}",
-+                stdout=stdout_off,
-+                return_code=2,
-+            )
-+        scsi.unfence_node(
-+            self.env_assist.get_env(),
-+            self.node,
-+            self.old_devices,
-+            self.new_devices,
-+        )
-+        self.env_assist.assert_reports(
-+            [
-+                fixture.info(
-+                    report_codes.STONITH_UNFENCING_SKIPPED_DEVICES_FENCED,
-+                    devices=sorted(self.old_devices),
-+                )
-+            ]
-+        )
-diff --git a/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py b/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py
-index 6ff6b99a..ed8f5d4f 100644
---- a/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py
-+++ b/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py
-@@ -1,3 +1,4 @@
-+# pylint: disable=too-many-lines
- import json
- from unittest import mock, TestCase
- 
-@@ -297,7 +298,9 @@ class UpdateScsiDevicesMixin:
-                 node_labels=self.existing_nodes
-             )
-             self.config.http.scsi.unfence_node(
--                unfence, node_labels=self.existing_nodes
-+                original_devices=devices_before,
-+                updated_devices=devices_updated,
-+                node_labels=self.existing_nodes,
-             )
-         self.config.env.push_cib(
-             resources=fixture_scsi(
-@@ -449,14 +452,14 @@ class UpdateScsiDevicesFailuresMixin:
-             node_labels=self.existing_nodes
-         )
-         self.config.http.scsi.unfence_node(
--            DEVICES_2,
-             communication_list=[
-                 dict(
-                     label=self.existing_nodes[0],
-                     raw_data=json.dumps(
-                         dict(
--                            devices=[DEV_2],
-                             node=self.existing_nodes[0],
-+                            original_devices=DEVICES_1,
-+                            updated_devices=DEVICES_2,
-                         )
-                     ),
-                     was_connected=False,
-@@ -466,8 +469,9 @@ class UpdateScsiDevicesFailuresMixin:
-                     label=self.existing_nodes[1],
-                     raw_data=json.dumps(
-                         dict(
--                            devices=[DEV_2],
-                             node=self.existing_nodes[1],
-+                            original_devices=DEVICES_1,
-+                            updated_devices=DEVICES_2,
-                         )
-                     ),
-                     output=json.dumps(
-@@ -491,8 +495,9 @@ class UpdateScsiDevicesFailuresMixin:
-                     label=self.existing_nodes[2],
-                     raw_data=json.dumps(
-                         dict(
--                            devices=[DEV_2],
-                             node=self.existing_nodes[2],
-+                            original_devices=DEVICES_1,
-+                            updated_devices=DEVICES_2,
-                         )
-                     ),
-                 ),
-@@ -504,7 +509,7 @@ class UpdateScsiDevicesFailuresMixin:
-                 fixture.error(
-                     reports.codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
-                     node=self.existing_nodes[0],
--                    command="api/v1/scsi-unfence-node/v1",
-+                    command="api/v1/scsi-unfence-node/v2",
-                     reason="errA",
-                 ),
-                 fixture.error(
-@@ -517,20 +522,76 @@ class UpdateScsiDevicesFailuresMixin:
-             ]
-         )
- 
-+    def test_unfence_failure_unknown_command(self):
-+        self._unfence_failure_common_calls()
-+        self.config.http.corosync.get_corosync_online_targets(
-+            node_labels=self.existing_nodes
-+        )
-+        communication_list = [
-+            dict(
-+                label=node,
-+                raw_data=json.dumps(
-+                    dict(
-+                        node=node,
-+                        original_devices=DEVICES_1,
-+                        updated_devices=DEVICES_2,
-+                    )
-+                ),
-+            )
-+            for node in self.existing_nodes[0:2]
-+        ]
-+        communication_list.append(
-+            dict(
-+                label=self.existing_nodes[2],
-+                response_code=404,
-+                raw_data=json.dumps(
-+                    dict(
-+                        node=self.existing_nodes[2],
-+                        original_devices=DEVICES_1,
-+                        updated_devices=DEVICES_2,
-+                    )
-+                ),
-+                output=json.dumps(
-+                    dto.to_dict(
-+                        communication.dto.InternalCommunicationResultDto(
-+                            status=communication.const.COM_STATUS_UNKNOWN_CMD,
-+                            status_msg=(
-+                                "Unknown command '/api/v1/scsi-unfence-node/v2'"
-+                            ),
-+                            report_list=[],
-+                            data=None,
-+                        )
-+                    )
-+                ),
-+            ),
-+        )
-+        self.config.http.scsi.unfence_node(
-+            communication_list=communication_list
-+        )
-+        self.env_assist.assert_raise_library_error(self.command())
-+        self.env_assist.assert_reports(
-+            [
-+                fixture.error(
-+                    reports.codes.PCSD_VERSION_TOO_OLD,
-+                    node=self.existing_nodes[2],
-+                ),
-+            ]
-+        )
-+
-     def test_unfence_failure_agent_script_failed(self):
-         self._unfence_failure_common_calls()
-         self.config.http.corosync.get_corosync_online_targets(
-             node_labels=self.existing_nodes
-         )
-         self.config.http.scsi.unfence_node(
--            DEVICES_2,
-             communication_list=[
-                 dict(
-                     label=self.existing_nodes[0],
-                     raw_data=json.dumps(
-                         dict(
--                            devices=[DEV_2],
-                             node=self.existing_nodes[0],
-+                            original_devices=DEVICES_1,
-+                            updated_devices=DEVICES_2,
-                         )
-                     ),
-                 ),
-@@ -538,8 +599,9 @@ class UpdateScsiDevicesFailuresMixin:
-                     label=self.existing_nodes[1],
-                     raw_data=json.dumps(
-                         dict(
--                            devices=[DEV_2],
-                             node=self.existing_nodes[1],
-+                            original_devices=DEVICES_1,
-+                            updated_devices=DEVICES_2,
-                         )
-                     ),
-                     output=json.dumps(
-@@ -563,8 +625,9 @@ class UpdateScsiDevicesFailuresMixin:
-                     label=self.existing_nodes[2],
-                     raw_data=json.dumps(
-                         dict(
--                            devices=[DEV_2],
-                             node=self.existing_nodes[2],
-+                            original_devices=DEVICES_1,
-+                            updated_devices=DEVICES_2,
-                         )
-                     ),
-                 ),
-@@ -639,14 +702,14 @@ class UpdateScsiDevicesFailuresMixin:
-             ]
-         )
-         self.config.http.scsi.unfence_node(
--            DEVICES_2,
-             communication_list=[
-                 dict(
-                     label=self.existing_nodes[0],
-                     raw_data=json.dumps(
-                         dict(
--                            devices=[DEV_2],
-                             node=self.existing_nodes[0],
-+                            original_devices=DEVICES_1,
-+                            updated_devices=DEVICES_2,
-                         )
-                     ),
-                 ),
-diff --git a/pcs_test/tools/command_env/config_http_scsi.py b/pcs_test/tools/command_env/config_http_scsi.py
-index 0e9f63af..7150eef9 100644
---- a/pcs_test/tools/command_env/config_http_scsi.py
-+++ b/pcs_test/tools/command_env/config_http_scsi.py
-@@ -14,7 +14,8 @@ class ScsiShortcuts:
- 
-     def unfence_node(
-         self,
--        devices,
-+        original_devices=(),
-+        updated_devices=(),
-         node_labels=None,
-         communication_list=None,
-         name="http.scsi.unfence_node",
-@@ -22,7 +23,8 @@ class ScsiShortcuts:
-         """
-         Create a calls for node unfencing
- 
--        list devices -- list of scsi devices
-+        list original_devices -- list of scsi devices before an update
-+        list updated_devices -- list of scsi devices after an update
-         list node_labels -- create success responses from these nodes
-         list communication_list -- use these custom responses
-         string name -- the key of this call
-@@ -39,7 +41,13 @@ class ScsiShortcuts:
-             communication_list = [
-                 dict(
-                     label=node,
--                    raw_data=json.dumps(dict(devices=devices, node=node)),
-+                    raw_data=json.dumps(
-+                        dict(
-+                            node=node,
-+                            original_devices=original_devices,
-+                            updated_devices=updated_devices,
-+                        )
-+                    ),
-                 )
-                 for node in node_labels
-             ]
-@@ -47,7 +55,7 @@ class ScsiShortcuts:
-             self.__calls,
-             name,
-             communication_list,
--            action="api/v1/scsi-unfence-node/v1",
-+            action="api/v1/scsi-unfence-node/v2",
-             output=json.dumps(
-                 to_dict(
-                     communication.dto.InternalCommunicationResultDto(
-diff --git a/pcs_test/tools/command_env/config_runner_scsi.py b/pcs_test/tools/command_env/config_runner_scsi.py
-index 4b671bb7..3cee13d6 100644
---- a/pcs_test/tools/command_env/config_runner_scsi.py
-+++ b/pcs_test/tools/command_env/config_runner_scsi.py
-@@ -35,7 +35,41 @@ class ScsiShortcuts:
-                     os.path.join(settings.fence_agent_binaries, "fence_scsi"),
-                     "--action=on",
-                     "--devices",
--                    ",".join(devices),
-+                    ",".join(sorted(devices)),
-+                    f"--plug={node}",
-+                ],
-+                stdout=stdout,
-+                stderr=stderr,
-+                returncode=return_code,
-+            ),
-+        )
-+
-+    def get_status(
-+        self,
-+        node,
-+        device,
-+        stdout="",
-+        stderr="",
-+        return_code=0,
-+        name="runner.scsi.is_fenced",
-+    ):
-+        """
-+        Create a call for getting scsi status
-+
-+        string node -- a node from which is unfencing performed
-+        str device -- a device to check
-+        string stdout -- stdout from fence_scsi agent script
-+        string stderr -- stderr from fence_scsi agent script
-+        int return_code -- return code of the fence_scsi agent script
-+        string name -- the key of this call
-+        """
-+        self.__calls.place(
-+            name,
-+            RunnerCall(
-+                [
-+                    os.path.join(settings.fence_agent_binaries, "fence_scsi"),
-+                    "--action=status",
-+                    f"--devices={device}",
-                     f"--plug={node}",
-                 ],
-                 stdout=stdout,
-diff --git a/pcsd/api_v1.rb b/pcsd/api_v1.rb
-index 7edeeabf..e55c2be7 100644
---- a/pcsd/api_v1.rb
-+++ b/pcsd/api_v1.rb
-@@ -291,7 +291,7 @@ def route_api_v1(auth_user, params, request)
-       :only_superuser => false,
-       :permissions => Permissions::WRITE,
-     },
--    'scsi-unfence-node/v1' => {
-+    'scsi-unfence-node/v2' => {
-       :cmd => 'scsi.unfence_node',
-       :only_superuser => false,
-       :permissions => Permissions::WRITE,
-diff --git a/pcsd/capabilities.xml b/pcsd/capabilities.xml
-index 58ebcf0f..3954aa5d 100644
---- a/pcsd/capabilities.xml
-+++ b/pcsd/capabilities.xml
-@@ -1892,11 +1892,13 @@
-         pcs commands: stonith update-scsi-devices
-       </description>
-     </capability>
--    <capability id="pcmk.stonith.scsi-unfence-node" in-pcs="0" in-pcsd="1">
-+    <capability id="pcmk.stonith.scsi-unfence-node-v2" in-pcs="0" in-pcsd="1">
-       <description>
--        Unfence scsi devices on a cluster node.
-+        Unfence scsi devices on a cluster node. In comparison with v1, only
-+        newly added devices are unfenced. In case any existing device is
-+        fenced, unfencing will be skipped.
- 
--        daemon urls: /api/v1/scsi-unfence-node/v1
-+        daemon urls: /api/v1/scsi-unfence-node/v2
-       </description>
-     </capability>
-     <capability id="pcmk.stonith.enable-disable" in-pcs="1" in-pcsd="1">
--- 
-2.31.1
-
diff --git a/SOURCES/bz1992668-01-add-add-remove-syntax-for-command-pcs-stonith-update.patch b/SOURCES/bz1992668-01-add-add-remove-syntax-for-command-pcs-stonith-update.patch
deleted file mode 100644
index cdad5a1..0000000
--- a/SOURCES/bz1992668-01-add-add-remove-syntax-for-command-pcs-stonith-update.patch
+++ /dev/null
@@ -1,3629 +0,0 @@
-From d20c356298eacec1a71a85c29f7d1f8b63fd8cb7 Mon Sep 17 00:00:00 2001
-From: Miroslav Lisik <mlisik@redhat.com>
-Date: Fri, 6 Aug 2021 17:35:03 +0200
-Subject: [PATCH 1/2] add add/remove syntax for command `pcs stonith
- update-scsi-devices`
-
----
- CHANGELOG.md                                  |    9 -
- pcs/cli/common/lib_wrapper.py                 |    1 +
- pcs/common/reports/codes.py                   |   39 +
- pcs/common/reports/const.py                   |    4 +
- pcs/common/reports/messages.py                |  289 ++++
- pcs/common/reports/types.py                   |    2 +
- pcs/common/str_tools.py                       |   26 +-
- pcs/lib/commands/stonith.py                   |  307 +++-
- pcs/pcs.8.in                                  |    4 +-
- pcs/stonith.py                                |   43 +-
- pcs/usage.py                                  |   13 +-
- pcs_test/Makefile.am                          |    1 +
- pcs_test/tier0/cli/test_stonith.py            |  169 +-
- .../tier0/common/reports/test_messages.py     |  185 +++
- pcs_test/tier0/common/test_str_tools.py       |   63 +-
- pcs_test/tier0/lib/cib/test_stonith.py        |  135 +-
- .../test_stonith_update_scsi_devices.py       | 1439 ++++++++++-------
- pcsd/capabilities.xml                         |    8 +
- 18 files changed, 2041 insertions(+), 696 deletions(-)
-
-diff --git a/CHANGELOG.md b/CHANGELOG.md
-index c15546ba..f768cc36 100644
---- a/CHANGELOG.md
-+++ b/CHANGELOG.md
-@@ -1,14 +1,5 @@
- # Change Log
- 
--## [Unreleased]
--
--### Fixed
--- Fixed an error when creating a resource which defines 'depth' attribute for
--  its operations ([rhbz#1998454])
--
--[rhbz#1998454]: https://bugzilla.redhat.com/show_bug.cgi?id=1998454
--
--
- ## [0.10.10] - 2021-08-19
- 
- ### Added
-diff --git a/pcs/cli/common/lib_wrapper.py b/pcs/cli/common/lib_wrapper.py
-index 06410b41..2bf83485 100644
---- a/pcs/cli/common/lib_wrapper.py
-+++ b/pcs/cli/common/lib_wrapper.py
-@@ -436,6 +436,7 @@ def load_module(env, middleware_factory, name):
-                 "history_cleanup": stonith.history_cleanup,
-                 "history_update": stonith.history_update,
-                 "update_scsi_devices": stonith.update_scsi_devices,
-+                "update_scsi_devices_add_remove": stonith.update_scsi_devices_add_remove,
-             },
-         )
- 
-diff --git a/pcs/common/reports/codes.py b/pcs/common/reports/codes.py
-index 32898154..bbd61500 100644
---- a/pcs/common/reports/codes.py
-+++ b/pcs/common/reports/codes.py
-@@ -12,6 +12,29 @@ SKIP_OFFLINE_NODES = F("SKIP_OFFLINE_NODES")
- # messages
- 
- 
-+ADD_REMOVE_ITEMS_NOT_SPECIFIED = M("ADD_REMOVE_ITEMS_NOT_SPECIFIED")
-+ADD_REMOVE_ITEMS_DUPLICATION = M("ADD_REMOVE_ITEMS_DUPLICATION")
-+ADD_REMOVE_CANNOT_ADD_ITEMS_ALREADY_IN_THE_CONTAINER = M(
-+    "ADD_REMOVE_CANNOT_ADD_ITEMS_ALREADY_IN_THE_CONTAINER"
-+)
-+ADD_REMOVE_CANNOT_REMOVE_ITEMS_NOT_IN_THE_CONTAINER = M(
-+    "ADD_REMOVE_CANNOT_REMOVE_ITEMS_NOT_IN_THE_CONTAINER"
-+)
-+ADD_REMOVE_CANNOT_ADD_AND_REMOVE_ITEMS_AT_THE_SAME_TIME = M(
-+    "ADD_REMOVE_CANNOT_ADD_AND_REMOVE_ITEMS_AT_THE_SAME_TIME"
-+)
-+ADD_REMOVE_CANNOT_REMOVE_ALL_ITEMS_FROM_THE_CONTAINER = M(
-+    "ADD_REMOVE_CANNOT_REMOVE_ALL_ITEMS_FROM_THE_CONTAINER"
-+)
-+ADD_REMOVE_ADJACENT_ITEM_NOT_IN_THE_CONTAINER = M(
-+    "ADD_REMOVE_ADJACENT_ITEM_NOT_IN_THE_CONTAINER"
-+)
-+ADD_REMOVE_CANNOT_PUT_ITEM_NEXT_TO_ITSELF = M(
-+    "ADD_REMOVE_CANNOT_PUT_ITEM_NEXT_TO_ITSELF"
-+)
-+ADD_REMOVE_CANNOT_SPECIFY_ADJACENT_ITEM_WITHOUT_ITEMS_TO_ADD = M(
-+    "ADD_REMOVE_CANNOT_SPECIFY_ADJACENT_ITEM_WITHOUT_ITEMS_TO_ADD"
-+)
- AGENT_NAME_GUESS_FOUND_MORE_THAN_ONE = M("AGENT_NAME_GUESS_FOUND_MORE_THAN_ONE")
- AGENT_NAME_GUESS_FOUND_NONE = M("AGENT_NAME_GUESS_FOUND_NONE")
- AGENT_NAME_GUESSED = M("AGENT_NAME_GUESSED")
-@@ -44,17 +67,23 @@ CANNOT_BAN_RESOURCE_MASTER_RESOURCE_NOT_PROMOTABLE = M(
- CANNOT_BAN_RESOURCE_STOPPED_NO_NODE_SPECIFIED = M(
-     "CANNOT_BAN_RESOURCE_STOPPED_NO_NODE_SPECIFIED"
- )
-+# TODO: remove, use ADD_REMOVE reports
- CANNOT_GROUP_RESOURCE_ADJACENT_RESOURCE_FOR_NEW_GROUP = M(
-     "CANNOT_GROUP_RESOURCE_ADJACENT_RESOURCE_FOR_NEW_GROUP"
- )
-+# TODO: remove, use ADD_REMOVE reports
- CANNOT_GROUP_RESOURCE_ADJACENT_RESOURCE_NOT_IN_GROUP = M(
-     "CANNOT_GROUP_RESOURCE_ADJACENT_RESOURCE_NOT_IN_GROUP"
- )
-+# TODO: remove, use ADD_REMOVE reports
- CANNOT_GROUP_RESOURCE_ALREADY_IN_THE_GROUP = M(
-     "CANNOT_GROUP_RESOURCE_ALREADY_IN_THE_GROUP"
- )
-+# TODO: remove, use ADD_REMOVE reports
- CANNOT_GROUP_RESOURCE_MORE_THAN_ONCE = M("CANNOT_GROUP_RESOURCE_MORE_THAN_ONCE")
-+# TODO: remove, use ADD_REMOVE reports
- CANNOT_GROUP_RESOURCE_NEXT_TO_ITSELF = M("CANNOT_GROUP_RESOURCE_NEXT_TO_ITSELF")
-+# TODO: remove, use ADD_REMOVE reports
- CANNOT_GROUP_RESOURCE_NO_RESOURCES = M("CANNOT_GROUP_RESOURCE_NO_RESOURCES")
- CANNOT_GROUP_RESOURCE_WRONG_TYPE = M("CANNOT_GROUP_RESOURCE_WRONG_TYPE")
- CANNOT_LEAVE_GROUP_EMPTY_AFTER_MOVE = M("CANNOT_LEAVE_GROUP_EMPTY_AFTER_MOVE")
-@@ -448,13 +477,17 @@ SERVICE_COMMAND_ON_NODE_ERROR = M("SERVICE_COMMAND_ON_NODE_ERROR")
- SERVICE_COMMAND_ON_NODE_SUCCESS = M("SERVICE_COMMAND_ON_NODE_SUCCESS")
- SERVICE_UNABLE_TO_DETECT_INIT_SYSTEM = M("SERVICE_UNABLE_TO_DETECT_INIT_SYSTEM")
- SYSTEM_WILL_RESET = M("SYSTEM_WILL_RESET")
-+# TODO: remove, use ADD_REMOVE reports
- TAG_ADD_REMOVE_IDS_DUPLICATION = M("TAG_ADD_REMOVE_IDS_DUPLICATION")
-+# TODO: remove, use ADD_REMOVE reports
- TAG_ADJACENT_REFERENCE_ID_NOT_IN_THE_TAG = M(
-     "TAG_ADJACENT_REFERENCE_ID_NOT_IN_THE_TAG"
- )
-+# TODO: remove, use ADD_REMOVE reports
- TAG_CANNOT_ADD_AND_REMOVE_IDS_AT_THE_SAME_TIME = M(
-     "TAG_CANNOT_ADD_AND_REMOVE_IDS_AT_THE_SAME_TIME"
- )
-+# TODO: remove, use ADD_REMOVE reports
- TAG_CANNOT_ADD_REFERENCE_IDS_ALREADY_IN_THE_TAG = M(
-     "TAG_CANNOT_ADD_REFERENCE_IDS_ALREADY_IN_THE_TAG"
- )
-@@ -462,8 +495,11 @@ TAG_CANNOT_CONTAIN_ITSELF = M("TAG_CANNOT_CONTAIN_ITSELF")
- TAG_CANNOT_CREATE_EMPTY_TAG_NO_IDS_SPECIFIED = M(
-     "TAG_CANNOT_CREATE_EMPTY_TAG_NO_IDS_SPECIFIED"
- )
-+# TODO: remove, use ADD_REMOVE reports
- TAG_CANNOT_PUT_ID_NEXT_TO_ITSELF = M("TAG_CANNOT_PUT_ID_NEXT_TO_ITSELF")
-+# TODO: remove, use ADD_REMOVE reports
- TAG_CANNOT_REMOVE_ADJACENT_ID = M("TAG_CANNOT_REMOVE_ADJACENT_ID")
-+# TODO: remove, use ADD_REMOVE reports
- TAG_CANNOT_REMOVE_REFERENCES_WITHOUT_REMOVING_TAG = M(
-     "TAG_CANNOT_REMOVE_REFERENCES_WITHOUT_REMOVING_TAG"
- )
-@@ -473,12 +509,15 @@ TAG_CANNOT_REMOVE_TAG_REFERENCED_IN_CONSTRAINTS = M(
- TAG_CANNOT_REMOVE_TAGS_NO_TAGS_SPECIFIED = M(
-     "TAG_CANNOT_REMOVE_TAGS_NO_TAGS_SPECIFIED"
- )
-+# TODO: remove, use ADD_REMOVE reports
- TAG_CANNOT_SPECIFY_ADJACENT_ID_WITHOUT_IDS_TO_ADD = M(
-     "TAG_CANNOT_SPECIFY_ADJACENT_ID_WITHOUT_IDS_TO_ADD"
- )
-+# TODO: remove, use ADD_REMOVE reports
- TAG_CANNOT_UPDATE_TAG_NO_IDS_SPECIFIED = M(
-     "TAG_CANNOT_UPDATE_TAG_NO_IDS_SPECIFIED"
- )
-+# TODO: remove, use ADD_REMOVE reports
- TAG_IDS_NOT_IN_THE_TAG = M("TAG_IDS_NOT_IN_THE_TAG")
- TMP_FILE_WRITE = M("TMP_FILE_WRITE")
- UNABLE_TO_CONNECT_TO_ANY_REMAINING_NODE = M(
-diff --git a/pcs/common/reports/const.py b/pcs/common/reports/const.py
-index c551338e..88725eb3 100644
---- a/pcs/common/reports/const.py
-+++ b/pcs/common/reports/const.py
-@@ -1,4 +1,6 @@
- from .types import (
-+    AddRemoveContainerType,
-+    AddRemoveItemType,
-     BoothConfigUsedWhere,
-     DefaultAddressSource,
-     FenceHistoryCommandType,
-@@ -9,6 +11,8 @@ from .types import (
- )
- 
- 
-+ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE = AddRemoveContainerType("stonith")
-+ADD_REMOVE_ITEM_TYPE_DEVICE = AddRemoveItemType("device")
- BOOTH_CONFIG_USED_IN_CLUSTER_RESOURCE = BoothConfigUsedWhere(
-     "in a cluster resource"
- )
-diff --git a/pcs/common/reports/messages.py b/pcs/common/reports/messages.py
-index a1c5db11..f9688437 100644
---- a/pcs/common/reports/messages.py
-+++ b/pcs/common/reports/messages.py
-@@ -24,6 +24,7 @@ from pcs.common.str_tools import (
-     format_list_custom_last_separator,
-     format_optional,
-     format_plural,
-+    get_plural,
-     indent,
-     is_iterable_not_str,
- )
-@@ -95,6 +96,14 @@ def _key_numeric(item: str) -> Tuple[int, str]:
-     return (int(item), item) if item.isdigit() else (-1, item)
- 
- 
-+_add_remove_container_translation = {
-+    const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE: "stonith resource",
-+}
-+
-+_add_remove_item_translation = {
-+    const.ADD_REMOVE_ITEM_TYPE_DEVICE: "device",
-+}
-+
- _file_role_translation = {
-     file_type_codes.BOOTH_CONFIG: "Booth configuration",
-     file_type_codes.BOOTH_KEY: "Booth key",
-@@ -129,6 +138,16 @@ _type_articles = {
- }
- 
- 
-+def _add_remove_container_str(
-+    container: types.AddRemoveContainerType,
-+) -> str:
-+    return _add_remove_container_translation.get(container, container)
-+
-+
-+def _add_remove_item_str(item: types.AddRemoveItemType) -> str:
-+    return _add_remove_item_translation.get(item, item)
-+
-+
- def _format_file_role(role: file_type_codes.FileTypeCode) -> str:
-     return _file_role_translation.get(role, role)
- 
-@@ -2528,6 +2547,7 @@ class ResourceBundleAlreadyContainsAResource(ReportItemMessage):
-         )
- 
- 
-+# TODO: remove, use ADD_REMOVE reports
- @dataclass(frozen=True)
- class CannotGroupResourceAdjacentResourceForNewGroup(ReportItemMessage):
-     """
-@@ -2551,6 +2571,7 @@ class CannotGroupResourceAdjacentResourceForNewGroup(ReportItemMessage):
-         )
- 
- 
-+# TODO: remove, use ADD_REMOVE reports
- @dataclass(frozen=True)
- class CannotGroupResourceAdjacentResourceNotInGroup(ReportItemMessage):
-     """
-@@ -2573,6 +2594,7 @@ class CannotGroupResourceAdjacentResourceNotInGroup(ReportItemMessage):
-         )
- 
- 
-+# TODO: remove, use ADD_REMOVE reports
- @dataclass(frozen=True)
- class CannotGroupResourceAlreadyInTheGroup(ReportItemMessage):
-     """
-@@ -2593,6 +2615,7 @@ class CannotGroupResourceAlreadyInTheGroup(ReportItemMessage):
-         return f"{resources} already {exist} in '{self.group_id}'"
- 
- 
-+# TODO: remove, use ADD_REMOVE reports
- @dataclass(frozen=True)
- class CannotGroupResourceMoreThanOnce(ReportItemMessage):
-     """
-@@ -2610,6 +2633,7 @@ class CannotGroupResourceMoreThanOnce(ReportItemMessage):
-         return f"Resources specified more than once: {resources}"
- 
- 
-+# TODO: remove, use ADD_REMOVE reports
- @dataclass(frozen=True)
- class CannotGroupResourceNoResources(ReportItemMessage):
-     """
-@@ -2623,6 +2647,7 @@ class CannotGroupResourceNoResources(ReportItemMessage):
-         return "No resources to add"
- 
- 
-+# TODO: remove, use ADD_REMOVE reports
- @dataclass(frozen=True)
- class CannotGroupResourceNextToItself(ReportItemMessage):
-     """
-@@ -6482,6 +6507,7 @@ class BoothTicketOperationFailed(ReportItemMessage):
-         )
- 
- 
-+# TODO: remove, use ADD_REMOVE reports
- @dataclass(frozen=True)
- class TagAddRemoveIdsDuplication(ReportItemMessage):
-     """
-@@ -6500,6 +6526,7 @@ class TagAddRemoveIdsDuplication(ReportItemMessage):
-         return f"Ids to {action} must be unique, duplicate ids: {duplicate_ids}"
- 
- 
-+# TODO: remove, use ADD_REMOVE reports
- @dataclass(frozen=True)
- class TagAdjacentReferenceIdNotInTheTag(ReportItemMessage):
-     """
-@@ -6522,6 +6549,7 @@ class TagAdjacentReferenceIdNotInTheTag(ReportItemMessage):
-         )
- 
- 
-+# TODO: remove, use ADD_REMOVE reports
- @dataclass(frozen=True)
- class TagCannotAddAndRemoveIdsAtTheSameTime(ReportItemMessage):
-     """
-@@ -6540,6 +6568,7 @@ class TagCannotAddAndRemoveIdsAtTheSameTime(ReportItemMessage):
-         return f"Ids cannot be added and removed at the same time: {idref_list}"
- 
- 
-+# TODO: remove, use ADD_REMOVE reports
- @dataclass(frozen=True)
- class TagCannotAddReferenceIdsAlreadyInTheTag(ReportItemMessage):
-     """
-@@ -6591,6 +6620,7 @@ class TagCannotCreateEmptyTagNoIdsSpecified(ReportItemMessage):
-         return "Cannot create empty tag, no resource ids specified"
- 
- 
-+# TODO: remove, use ADD_REMOVE reports
- @dataclass(frozen=True)
- class TagCannotPutIdNextToItself(ReportItemMessage):
-     """
-@@ -6607,6 +6637,7 @@ class TagCannotPutIdNextToItself(ReportItemMessage):
-         return f"Cannot put id '{self.adjacent_id}' next to itself."
- 
- 
-+# TODO: remove, use ADD_REMOVE reports
- @dataclass(frozen=True)
- class TagCannotRemoveAdjacentId(ReportItemMessage):
-     """
-@@ -6626,6 +6657,7 @@ class TagCannotRemoveAdjacentId(ReportItemMessage):
-         )
- 
- 
-+# TODO: remove, use ADD_REMOVE reports
- @dataclass(frozen=True)
- class TagCannotRemoveReferencesWithoutRemovingTag(ReportItemMessage):
-     """
-@@ -6678,6 +6710,7 @@ class TagCannotRemoveTagsNoTagsSpecified(ReportItemMessage):
-         return "Cannot remove tags, no tags to remove specified"
- 
- 
-+# TODO: remove, use ADD_REMOVE reports
- @dataclass(frozen=True)
- class TagCannotSpecifyAdjacentIdWithoutIdsToAdd(ReportItemMessage):
-     """
-@@ -6697,6 +6730,7 @@ class TagCannotSpecifyAdjacentIdWithoutIdsToAdd(ReportItemMessage):
-         )
- 
- 
-+# TODO: remove, use ADD_REMOVE reports
- @dataclass(frozen=True)
- class TagCannotUpdateTagNoIdsSpecified(ReportItemMessage):
-     """
-@@ -6710,6 +6744,7 @@ class TagCannotUpdateTagNoIdsSpecified(ReportItemMessage):
-         return "Cannot update tag, no ids to be added or removed specified"
- 
- 
-+# TODO: remove, use ADD_REMOVE reports
- @dataclass(frozen=True)
- class TagIdsNotInTheTag(ReportItemMessage):
-     """
-@@ -6850,3 +6885,257 @@ class CibNvsetAmbiguousProvideNvsetId(ReportItemMessage):
-     @property
-     def message(self) -> str:
-         return "Several options sets exist, please specify an option set ID"
-+
-+
-+@dataclass(frozen=True)
-+class AddRemoveItemsNotSpecified(ReportItemMessage):
-+    """
-+    Cannot modify container, no add or remove items specified.
-+
-+    container_type -- type of item container
-+    item_type -- type of item in a container
-+    container_id -- id of a container
-+    """
-+
-+    container_type: types.AddRemoveContainerType
-+    item_type: types.AddRemoveItemType
-+    container_id: str
-+    _code = codes.ADD_REMOVE_ITEMS_NOT_SPECIFIED
-+
-+    @property
-+    def message(self) -> str:
-+        container = _add_remove_container_str(self.container_type)
-+        items = get_plural(_add_remove_item_str(self.item_type))
-+        return (
-+            f"Cannot modify {container} '{self.container_id}', no {items} to "
-+            "add or remove specified"
-+        )
-+
-+
-+@dataclass(frozen=True)
-+class AddRemoveItemsDuplication(ReportItemMessage):
-+    """
-+    Duplicate items were found in add/remove item lists.
-+
-+    container_type -- type of item container
-+    item_type -- type of item in a container
-+    container_id -- id of a container
-+    duplicate_items_list -- list of duplicate items
-+    """
-+
-+    container_type: types.AddRemoveContainerType
-+    item_type: types.AddRemoveItemType
-+    container_id: str
-+    duplicate_items_list: List[str]
-+    _code = codes.ADD_REMOVE_ITEMS_DUPLICATION
-+
-+    @property
-+    def message(self) -> str:
-+        items = get_plural(_add_remove_item_str(self.item_type))
-+        duplicate_items = format_list(self.duplicate_items_list)
-+        return (
-+            f"{items.capitalize()} to add or remove must be unique, duplicate "
-+            f"{items}: {duplicate_items}"
-+        )
-+
-+
-+@dataclass(frozen=True)
-+class AddRemoveCannotAddItemsAlreadyInTheContainer(ReportItemMessage):
-+    """
-+    Cannot add items already existing in the container.
-+
-+    container_type -- type of item container
-+    item_type -- type of item in a container
-+    container_id -- id of a container
-+    item_list -- list of items already in the container
-+    """
-+
-+    container_type: types.AddRemoveContainerType
-+    item_type: types.AddRemoveItemType
-+    container_id: str
-+    item_list: List[str]
-+    _code = codes.ADD_REMOVE_CANNOT_ADD_ITEMS_ALREADY_IN_THE_CONTAINER
-+
-+    @property
-+    def message(self) -> str:
-+        items = format_plural(
-+            self.item_list, _add_remove_item_str(self.item_type)
-+        )
-+        item_list = format_list(self.item_list)
-+        they = format_plural(self.item_list, "it")
-+        are = format_plural(self.item_list, "is")
-+        container = _add_remove_container_str(self.container_type)
-+        return (
-+            f"Cannot add {items} {item_list}, {they} {are} already present in "
-+            f"{container} '{self.container_id}'"
-+        )
-+
-+
-+@dataclass(frozen=True)
-+class AddRemoveCannotRemoveItemsNotInTheContainer(ReportItemMessage):
-+    """
-+    Cannot remove items not existing in the container.
-+
-+    container_type -- type of item container
-+    item_type -- type of item in a container
-+    container_id -- id of a container
-+    item_list -- list of items not in the container
-+    """
-+
-+    container_type: types.AddRemoveContainerType
-+    item_type: types.AddRemoveItemType
-+    container_id: str
-+    item_list: List[str]
-+    _code = codes.ADD_REMOVE_CANNOT_REMOVE_ITEMS_NOT_IN_THE_CONTAINER
-+
-+    @property
-+    def message(self) -> str:
-+        items = format_plural(
-+            self.item_list, _add_remove_item_str(self.item_type)
-+        )
-+        item_list = format_list(self.item_list)
-+        they = format_plural(self.item_list, "it")
-+        are = format_plural(self.item_list, "is")
-+        container = _add_remove_container_str(self.container_type)
-+        items = format_plural(
-+            self.item_list, _add_remove_item_str(self.item_type)
-+        )
-+        return (
-+            f"Cannot remove {items} {item_list}, {they} {are} not present in "
-+            f"{container} '{self.container_id}'"
-+        )
-+
-+
-+@dataclass(frozen=True)
-+class AddRemoveCannotAddAndRemoveItemsAtTheSameTime(ReportItemMessage):
-+    """
-+    Cannot add and remove items at the same time. Avoid operation without an
-+    effect.
-+
-+    container_type -- type of item container
-+    item_type -- type of item in a container
-+    container_id -- id of a container
-+    item_list -- common items from add and remove item lists
-+    """
-+
-+    container_type: types.AddRemoveContainerType
-+    item_type: types.AddRemoveItemType
-+    container_id: str
-+    item_list: List[str]
-+    _code = codes.ADD_REMOVE_CANNOT_ADD_AND_REMOVE_ITEMS_AT_THE_SAME_TIME
-+
-+    @property
-+    def message(self) -> str:
-+        items = format_plural(
-+            self.item_list, _add_remove_item_str(self.item_type)
-+        )
-+        item_list = format_list(self.item_list)
-+        return (
-+            f"{items.capitalize()} cannot be added and removed at the same "
-+            f"time: {item_list}"
-+        )
-+
-+
-+@dataclass(frozen=True)
-+class AddRemoveCannotRemoveAllItemsFromTheContainer(ReportItemMessage):
-+    """
-+    Cannot remove all items from a container.
-+
-+    container_type -- type of item container
-+    item_type -- type of item in a container
-+    container_id -- id of a container
-+    item_list -- common items from add and remove item lists
-+    """
-+
-+    container_type: types.AddRemoveContainerType
-+    item_type: types.AddRemoveItemType
-+    container_id: str
-+    item_list: List[str]
-+    _code = codes.ADD_REMOVE_CANNOT_REMOVE_ALL_ITEMS_FROM_THE_CONTAINER
-+
-+    @property
-+    def message(self) -> str:
-+        container = _add_remove_container_str(self.container_type)
-+        items = get_plural(_add_remove_item_str(self.item_type))
-+        return (
-+            f"Cannot remove all {items} from {container} '{self.container_id}'"
-+        )
-+
-+
-+@dataclass(frozen=True)
-+class AddRemoveAdjacentItemNotInTheContainer(ReportItemMessage):
-+    """
-+    Cannot put items next to an adjacent item in the container, because the
-+    adjacent item does not exist in the container.
-+
-+    container_type -- type of item container
-+    item_type -- type of item in a container
-+    container_id -- id of a container
-+    adjacent_item_id -- id of an adjacent item
-+    """
-+
-+    container_type: types.AddRemoveContainerType
-+    item_type: types.AddRemoveItemType
-+    container_id: str
-+    adjacent_item_id: str
-+    _code = codes.ADD_REMOVE_ADJACENT_ITEM_NOT_IN_THE_CONTAINER
-+
-+    @property
-+    def message(self) -> str:
-+        container = _add_remove_container_str(self.container_type)
-+        item = _add_remove_item_str(self.item_type)
-+        items = get_plural(item)
-+        return (
-+            f"There is no {item} '{self.adjacent_item_id}' in the "
-+            f"{container} '{self.container_id}', cannot add {items} next to it"
-+        )
-+
-+
-+@dataclass(frozen=True)
-+class AddRemoveCannotPutItemNextToItself(ReportItemMessage):
-+    """
-+    Cannot put an item into a container next to itself.
-+
-+    container_type -- type of item container
-+    item_type -- type of item in a container
-+    container_id -- id of a container
-+    adjacent_item_id -- id of an adjacent item
-+    """
-+
-+    container_type: types.AddRemoveContainerType
-+    item_type: types.AddRemoveItemType
-+    container_id: str
-+    adjacent_item_id: str
-+    _code = codes.ADD_REMOVE_CANNOT_PUT_ITEM_NEXT_TO_ITSELF
-+
-+    @property
-+    def message(self) -> str:
-+        item = _add_remove_item_str(self.item_type)
-+        return f"Cannot put {item} '{self.adjacent_item_id}' next to itself"
-+
-+
-+@dataclass(frozen=True)
-+class AddRemoveCannotSpecifyAdjacentItemWithoutItemsToAdd(ReportItemMessage):
-+    """
-+    Cannot specify adjacent item without items to add.
-+
-+    container_type -- type of item container
-+    item_type -- type of item in a container
-+    container_id -- id of a container
-+    adjacent_item_id -- id of an adjacent item
-+    """
-+
-+    container_type: types.AddRemoveContainerType
-+    item_type: types.AddRemoveItemType
-+    container_id: str
-+    adjacent_item_id: str
-+    _code = codes.ADD_REMOVE_CANNOT_SPECIFY_ADJACENT_ITEM_WITHOUT_ITEMS_TO_ADD
-+
-+    @property
-+    def message(self) -> str:
-+        item = _add_remove_item_str(self.item_type)
-+        items = get_plural(item)
-+        return (
-+            f"Cannot specify adjacent {item} '{self.adjacent_item_id}' without "
-+            f"{items} to add"
-+        )
-diff --git a/pcs/common/reports/types.py b/pcs/common/reports/types.py
-index fa7fdf4d..610c16f1 100644
---- a/pcs/common/reports/types.py
-+++ b/pcs/common/reports/types.py
-@@ -1,5 +1,7 @@
- from typing import NewType
- 
-+AddRemoveContainerType = NewType("AddRemoveContainerType", str)
-+AddRemoveItemType = NewType("AddRemoveItemType", str)
- BoothConfigUsedWhere = NewType("BoothConfigUsedWhere", str)
- DefaultAddressSource = NewType("DefaultAddressSource", str)
- FenceHistoryCommandType = NewType("FenceHistoryCommandType", str)
-diff --git a/pcs/common/str_tools.py b/pcs/common/str_tools.py
-index 98fe5f50..b8dccc0c 100644
---- a/pcs/common/str_tools.py
-+++ b/pcs/common/str_tools.py
-@@ -131,6 +131,23 @@ def _add_s(word):
-     return word + "s"
- 
- 
-+def get_plural(singular: str) -> str:
-+    """
-+    Take singular word form and return plural.
-+
-+    singular -- singular word (like: is, do, node)
-+    """
-+    common_plurals = {
-+        "is": "are",
-+        "has": "have",
-+        "does": "do",
-+        "it": "they",
-+    }
-+    if singular in common_plurals:
-+        return common_plurals[singular]
-+    return _add_s(singular)
-+
-+
- def format_plural(
-     depends_on: Union[int, Iterable[Any]],
-     singular: str,
-@@ -145,18 +162,11 @@ def format_plural(
-     singular -- singular word (like: is, do, node)
-     plural -- optional irregular plural form
-     """
--    common_plurals = {
--        "is": "are",
--        "has": "have",
--        "does": "do",
--    }
-     if not _is_multiple(depends_on):
-         return singular
-     if plural:
-         return plural
--    if singular in common_plurals:
--        return common_plurals[singular]
--    return _add_s(singular)
-+    return get_plural(singular)
- 
- 
- T = TypeVar("T")
-diff --git a/pcs/lib/commands/stonith.py b/pcs/lib/commands/stonith.py
-index 036e3fa5..6f26e7d3 100644
---- a/pcs/lib/commands/stonith.py
-+++ b/pcs/lib/commands/stonith.py
-@@ -1,9 +1,15 @@
--from typing import Container, Iterable, Optional
-+from collections import Counter
-+from typing import Container, Iterable, List, Optional, Set, Tuple
-+
-+from lxml.etree import _Element
- 
- from pcs.common import reports
-+from pcs.common.reports import ReportItemList
-+from pcs.common.reports import ReportProcessor
- from pcs.common.reports.item import ReportItem
- from pcs.lib.cib import resource
- from pcs.lib.cib import stonith
-+from pcs.lib.cib.nvpair import INSTANCE_ATTRIBUTES_TAG, get_value
- from pcs.lib.cib.resource.common import are_meta_disabled
- from pcs.lib.cib.tools import IdProvider
- from pcs.lib.commands.resource import (
-@@ -20,6 +26,7 @@ from pcs.lib.communication.tools import (
- )
- from pcs.lib.env import LibraryEnvironment
- from pcs.lib.errors import LibraryError
-+from pcs.lib.external import CommandRunner
- from pcs.lib.node import get_existing_nodes_names
- from pcs.lib.pacemaker.live import (
-     FenceHistoryCommandErrorException,
-@@ -268,55 +275,195 @@ def history_update(env: LibraryEnvironment):
-         ) from e
- 
- 
--def update_scsi_devices(
--    env: LibraryEnvironment,
--    stonith_id: str,
--    set_device_list: Iterable[str],
--    force_flags: Container[reports.types.ForceCode] = (),
--) -> None:
-+def _validate_add_remove_items(
-+    add_item_list: Iterable[str],
-+    remove_item_list: Iterable[str],
-+    current_item_list: Iterable[str],
-+    container_type: reports.types.AddRemoveContainerType,
-+    item_type: reports.types.AddRemoveItemType,
-+    container_id: str,
-+    adjacent_item_id: Optional[str] = None,
-+    container_can_be_empty: bool = False,
-+) -> ReportItemList:
-     """
--    Update scsi fencing devices without restart and affecting other resources.
-+    Validate if items can be added or removed to or from a container.
- 
--    env -- provides all for communication with externals
--    stonith_id -- id of stonith resource
--    set_device_list -- paths to the scsi devices that would be set for stonith
--        resource
--    force_flags -- list of flags codes
-+    add_item_list -- items to be added
-+    remove_item_list -- items to be removed
-+    current_item_list -- items currently in the container
-+    container_type -- container type
-+    item_type -- item type
-+    container_id -- id of the container
-+    adjacent_item_id -- an adjacent item in the container
-+    container_can_be_empty -- flag to decide if container can be left empty
-     """
--    if not is_getting_resource_digest_supported(env.cmd_runner()):
--        raise LibraryError(
-+    # pylint: disable=too-many-locals
-+    report_list: ReportItemList = []
-+    if not add_item_list and not remove_item_list:
-+        report_list.append(
-             ReportItem.error(
--                reports.messages.StonithRestartlessUpdateOfScsiDevicesNotSupported()
-+                reports.messages.AddRemoveItemsNotSpecified(
-+                    container_type, item_type, container_id
-+                )
-             )
-         )
--    cib = env.get_cib()
--    if not set_device_list:
--        env.report_processor.report(
-+
-+    def _get_duplicate_items(item_list: Iterable[str]) -> Set[str]:
-+        return {item for item, count in Counter(item_list).items() if count > 1}
-+
-+    duplicate_items_list = _get_duplicate_items(
-+        add_item_list
-+    ) | _get_duplicate_items(remove_item_list)
-+    if duplicate_items_list:
-+        report_list.append(
-             ReportItem.error(
--                reports.messages.InvalidOptionValue(
--                    "devices", "", None, cannot_be_empty=True
-+                reports.messages.AddRemoveItemsDuplication(
-+                    container_type,
-+                    item_type,
-+                    container_id,
-+                    sorted(duplicate_items_list),
-+                )
-+            )
-+        )
-+    already_present = set(add_item_list).intersection(current_item_list)
-+    # report only if an adjacent id is not defined, because we want to allow
-+    # to move items when adjacent_item_id is specified
-+    if adjacent_item_id is None and already_present:
-+        report_list.append(
-+            ReportItem.error(
-+                reports.messages.AddRemoveCannotAddItemsAlreadyInTheContainer(
-+                    container_type,
-+                    item_type,
-+                    container_id,
-+                    sorted(already_present),
-+                )
-+            )
-+        )
-+    missing_items = set(remove_item_list).difference(current_item_list)
-+    if missing_items:
-+        report_list.append(
-+            ReportItem.error(
-+                reports.messages.AddRemoveCannotRemoveItemsNotInTheContainer(
-+                    container_type,
-+                    item_type,
-+                    container_id,
-+                    sorted(missing_items),
-                 )
-             )
-         )
-+    common_items = set(add_item_list) & set(remove_item_list)
-+    if common_items:
-+        report_list.append(
-+            ReportItem.error(
-+                reports.messages.AddRemoveCannotAddAndRemoveItemsAtTheSameTime(
-+                    container_type,
-+                    item_type,
-+                    container_id,
-+                    sorted(common_items),
-+                )
-+            )
-+        )
-+    if not container_can_be_empty and not add_item_list:
-+        remaining_items = set(current_item_list).difference(remove_item_list)
-+        if not remaining_items:
-+            report_list.append(
-+                ReportItem.error(
-+                    reports.messages.AddRemoveCannotRemoveAllItemsFromTheContainer(
-+                        container_type,
-+                        item_type,
-+                        container_id,
-+                        list(current_item_list),
-+                    )
-+                )
-+            )
-+    if adjacent_item_id:
-+        if adjacent_item_id not in current_item_list:
-+            report_list.append(
-+                ReportItem.error(
-+                    reports.messages.AddRemoveAdjacentItemNotInTheContainer(
-+                        container_type,
-+                        item_type,
-+                        container_id,
-+                        adjacent_item_id,
-+                    )
-+                )
-+            )
-+        if adjacent_item_id in add_item_list:
-+            report_list.append(
-+                ReportItem.error(
-+                    reports.messages.AddRemoveCannotPutItemNextToItself(
-+                        container_type,
-+                        item_type,
-+                        container_id,
-+                        adjacent_item_id,
-+                    )
-+                )
-+            )
-+        if not add_item_list:
-+            report_list.append(
-+                ReportItem.error(
-+                    reports.messages.AddRemoveCannotSpecifyAdjacentItemWithoutItemsToAdd(
-+                        container_type,
-+                        item_type,
-+                        container_id,
-+                        adjacent_item_id,
-+                    )
-+                )
-+            )
-+    return report_list
-+
-+
-+def _update_scsi_devices_get_element_and_devices(
-+    runner: CommandRunner,
-+    report_processor: ReportProcessor,
-+    cib: _Element,
-+    stonith_id: str,
-+) -> Tuple[_Element, List[str]]:
-+    """
-+    Do checks and return stonith element and list of current scsi devices.
-+    Raise LibraryError if checks fail.
-+
-+    runner -- command runner instance
-+    report_processor -- tool for warning/info/error reporting
-+    cib -- cib element
-+    stonith_id -- id of stonith resource
-+    """
-+    if not is_getting_resource_digest_supported(runner):
-+        raise LibraryError(
-+            ReportItem.error(
-+                reports.messages.StonithRestartlessUpdateOfScsiDevicesNotSupported()
-+            )
-+        )
-     (
-         stonith_el,
-         report_list,
-     ) = stonith.validate_stonith_restartless_update(cib, stonith_id)
--    if env.report_processor.report_list(report_list).has_errors:
-+    if report_processor.report_list(report_list).has_errors:
-         raise LibraryError()
--    # for mypy, this should not happen because exeption would be raised
-+    # for mypy, this should not happen because exception would be raised
-     if stonith_el is None:
-         raise AssertionError("stonith element is None")
--
--    stonith.update_scsi_devices_without_restart(
--        env.cmd_runner(),
--        env.get_cluster_state(),
--        stonith_el,
--        IdProvider(cib),
--        set_device_list,
-+    current_device_list = get_value(
-+        INSTANCE_ATTRIBUTES_TAG, stonith_el, "devices"
-     )
-+    if current_device_list is None:
-+        raise AssertionError("current_device_list is None")
-+    return stonith_el, current_device_list.split(",")
-+
-+
-+def _unfencing_scsi_devices(
-+    env: LibraryEnvironment,
-+    device_list: Iterable[str],
-+    force_flags: Container[reports.types.ForceCode] = (),
-+) -> None:
-+    """
-+    Unfence scsi devices provided in device_list if it is possible to connect
-+    to pcsd and corosync is running.
- 
--    # Unfencing
-+    env -- provides all for communication with externals
-+    device_list -- devices to be unfenced
-+    force_flags -- list of flags codes
-+    """
-     cluster_nodes_names, nodes_report_list = get_existing_nodes_names(
-         env.get_corosync_conf(),
-         error_on_missing_name=True,
-@@ -340,8 +487,104 @@ def update_scsi_devices(
-     online_corosync_target_list = run_and_raise(
-         env.get_node_communicator(), com_cmd
-     )
--    com_cmd = Unfence(env.report_processor, sorted(set_device_list))
-+    com_cmd = Unfence(env.report_processor, sorted(device_list))
-     com_cmd.set_targets(online_corosync_target_list)
-     run_and_raise(env.get_node_communicator(), com_cmd)
- 
-+
-+def update_scsi_devices(
-+    env: LibraryEnvironment,
-+    stonith_id: str,
-+    set_device_list: Iterable[str],
-+    force_flags: Container[reports.types.ForceCode] = (),
-+) -> None:
-+    """
-+    Update scsi fencing devices without restart and affecting other resources.
-+
-+    env -- provides all for communication with externals
-+    stonith_id -- id of stonith resource
-+    set_device_list -- paths to the scsi devices that would be set for stonith
-+        resource
-+    force_flags -- list of flags codes
-+    """
-+    if not set_device_list:
-+        env.report_processor.report(
-+            ReportItem.error(
-+                reports.messages.InvalidOptionValue(
-+                    "devices", "", None, cannot_be_empty=True
-+                )
-+            )
-+        )
-+    runner = env.cmd_runner()
-+    (
-+        stonith_el,
-+        current_device_list,
-+    ) = _update_scsi_devices_get_element_and_devices(
-+        runner, env.report_processor, env.get_cib(), stonith_id
-+    )
-+    if env.report_processor.has_errors:
-+        raise LibraryError()
-+    stonith.update_scsi_devices_without_restart(
-+        runner,
-+        env.get_cluster_state(),
-+        stonith_el,
-+        IdProvider(stonith_el),
-+        set_device_list,
-+    )
-+    devices_for_unfencing = set(set_device_list).difference(current_device_list)
-+    if devices_for_unfencing:
-+        _unfencing_scsi_devices(env, devices_for_unfencing, force_flags)
-+    env.push_cib()
-+
-+
-+def update_scsi_devices_add_remove(
-+    env: LibraryEnvironment,
-+    stonith_id: str,
-+    add_device_list: Iterable[str],
-+    remove_device_list: Iterable[str],
-+    force_flags: Container[reports.types.ForceCode] = (),
-+) -> None:
-+    """
-+    Update scsi fencing devices without restart and affecting other resources.
-+
-+    env -- provides all for communication with externals
-+    stonith_id -- id of stonith resource
-+    add_device_list -- paths to the scsi devices that would be added to the
-+        stonith resource
-+    remove_device_list -- paths to the scsi devices that would be removed from
-+        the stonith resource
-+    force_flags -- list of flags codes
-+    """
-+    runner = env.cmd_runner()
-+    (
-+        stonith_el,
-+        current_device_list,
-+    ) = _update_scsi_devices_get_element_and_devices(
-+        runner, env.report_processor, env.get_cib(), stonith_id
-+    )
-+    if env.report_processor.report_list(
-+        _validate_add_remove_items(
-+            add_device_list,
-+            remove_device_list,
-+            current_device_list,
-+            reports.const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE,
-+            reports.const.ADD_REMOVE_ITEM_TYPE_DEVICE,
-+            stonith_el.get("id", ""),
-+        )
-+    ).has_errors:
-+        raise LibraryError()
-+    updated_device_set = (
-+        set(current_device_list)
-+        .union(add_device_list)
-+        .difference(remove_device_list)
-+    )
-+    stonith.update_scsi_devices_without_restart(
-+        env.cmd_runner(),
-+        env.get_cluster_state(),
-+        stonith_el,
-+        IdProvider(stonith_el),
-+        updated_device_set,
-+    )
-+    if add_device_list:
-+        _unfencing_scsi_devices(env, add_device_list, force_flags)
-     env.push_cib()
-diff --git a/pcs/pcs.8.in b/pcs/pcs.8.in
-index ac093d69..1695d75c 100644
---- a/pcs/pcs.8.in
-+++ b/pcs/pcs.8.in
-@@ -664,8 +664,8 @@ pcs stonith create MyFence fence_virt 'pcmk_host_map=n1:p1;n2:p2,p3'
- update <stonith id> [stonith device options]
- Add/Change options to specified stonith id.
- .TP
--update\-scsi\-devices <stonith id> set <device\-path> [<device\-path>...]
--Update scsi fencing devices without affecting other resources. Stonith resource must be running on one cluster node. Each device will be unfenced on each cluster node running cluster. Supported fence agents: fence_scsi.
-+update\-scsi\-devices <stonith id> (set <device\-path> [<device\-path>...]) | (add <device\-path> [<device\-path>...] delete|remove <device\-path> [<device\-path>...] )
-+Update scsi fencing devices without affecting other resources. You must specify either list of set devices or at least one device for add or delete/remove devices. Stonith resource must be running on one cluster node. Each device will be unfenced on each cluster node running cluster. Supported fence agents: fence_scsi.
- .TP
- delete <stonith id>
- Remove stonith id from configuration.
-diff --git a/pcs/stonith.py b/pcs/stonith.py
-index c7eb14de..6ed8b751 100644
---- a/pcs/stonith.py
-+++ b/pcs/stonith.py
-@@ -894,24 +894,43 @@ def stonith_update_scsi_devices(lib, argv, modifiers):
-       * --skip-offline - skip unreachable nodes
-     """
-     modifiers.ensure_only_supported("--request-timeout", "--skip-offline")
-+    force_flags = []
-+    if modifiers.get("--skip-offline"):
-+        force_flags.append(reports.codes.SKIP_OFFLINE_NODES)
-+
-     if len(argv) < 2:
-         raise CmdLineInputError()
-     stonith_id = argv[0]
-     parsed_args = parse_args.group_by_keywords(
-         argv[1:],
--        ["set"],
-+        ["set", "add", "remove", "delete"],
-         keyword_repeat_allowed=False,
-         only_found_keywords=True,
-     )
--    set_args = parsed_args["set"] if "set" in parsed_args else []
--    if not set_args:
--        raise CmdLineInputError(
--            show_both_usage_and_message=True,
--            hint="You must specify set devices to be updated",
--        )
--    force_flags = []
--    if modifiers.get("--skip-offline"):
--        force_flags.append(reports.codes.SKIP_OFFLINE_NODES)
--    lib.stonith.update_scsi_devices(
--        stonith_id, set_args, force_flags=force_flags
-+    cmd_exception = CmdLineInputError(
-+        show_both_usage_and_message=True,
-+        hint=(
-+            "You must specify either list of set devices or at least one device"
-+            " for add or delete/remove devices"
-+        ),
-     )
-+    if "set" in parsed_args and {"add", "remove", "delete"} & set(
-+        parsed_args.keys()
-+    ):
-+        raise cmd_exception
-+    if "set" in parsed_args:
-+        if not parsed_args["set"]:
-+            raise cmd_exception
-+        lib.stonith.update_scsi_devices(
-+            stonith_id, parsed_args["set"], force_flags=force_flags
-+        )
-+    else:
-+        for key in ("add", "remove", "delete"):
-+            if key in parsed_args and not parsed_args[key]:
-+                raise cmd_exception
-+        lib.stonith.update_scsi_devices_add_remove(
-+            stonith_id,
-+            parsed_args.get("add", []),
-+            parsed_args.get("delete", []) + parsed_args.get("remove", []),
-+            force_flags=force_flags,
-+        )
-diff --git a/pcs/usage.py b/pcs/usage.py
-index 38e21ed9..66e097f1 100644
---- a/pcs/usage.py
-+++ b/pcs/usage.py
-@@ -1289,11 +1289,14 @@ Commands:
-     update <stonith id> [stonith device options]
-         Add/Change options to specified stonith id.
- 
--    update-scsi-devices <stonith id> set <device-path> [<device-path>...]
--        Update scsi fencing devices without affecting other resources. Stonith
--        resource must be running on one cluster node. Each device will be
--        unfenced on each cluster node running cluster. Supported fence agents:
--        fence_scsi.
-+    update-scsi-devices <stonith id> (set <device-path> [<device-path>...])
-+            | (add <device-path> [<device-path>...] delete|remove <device-path>
-+            [device-path>...])
-+        Update scsi fencing devices without affecting other resources. You must
-+        specify either list of set devices or at least one device for add or
-+        delete/remove devices. Stonith resource must be running on one cluster
-+        node. Each device will be unfenced on each cluster node running
-+        cluster. Supported fence agents: fence_scsi.
- 
-     delete <stonith id>
-         Remove stonith id from configuration.
-diff --git a/pcs_test/Makefile.am b/pcs_test/Makefile.am
-index b4df00e2..c7346f96 100644
---- a/pcs_test/Makefile.am
-+++ b/pcs_test/Makefile.am
-@@ -236,6 +236,7 @@ EXTRA_DIST		= \
- 			  tier0/lib/commands/test_stonith_agent.py \
- 			  tier0/lib/commands/test_stonith_history.py \
- 			  tier0/lib/commands/test_stonith.py \
-+			  tier0/lib/commands/test_stonith_update_scsi_devices.py \
- 			  tier0/lib/commands/test_ticket.py \
- 			  tier0/lib/communication/__init__.py \
- 			  tier0/lib/communication/test_booth.py \
-diff --git a/pcs_test/tier0/cli/test_stonith.py b/pcs_test/tier0/cli/test_stonith.py
-index 5bc18f3c..a54b442e 100644
---- a/pcs_test/tier0/cli/test_stonith.py
-+++ b/pcs_test/tier0/cli/test_stonith.py
-@@ -149,15 +149,41 @@ class SbdDeviceSetup(TestCase):
- 
- 
- class StonithUpdateScsiDevices(TestCase):
-+    # pylint: disable=too-many-public-methods
-     def setUp(self):
-         self.lib = mock.Mock(spec_set=["stonith"])
--        self.stonith = mock.Mock(spec_set=["update_scsi_devices"])
-+        self.stonith = mock.Mock(
-+            spec_set=["update_scsi_devices", "update_scsi_devices_add_remove"]
-+        )
-         self.lib.stonith = self.stonith
- 
-     def assert_called_with(self, stonith_id, set_devices, force_flags):
-         self.stonith.update_scsi_devices.assert_called_once_with(
-             stonith_id, set_devices, force_flags=force_flags
-         )
-+        self.stonith.update_scsi_devices_add_remove.assert_not_called()
-+
-+    def assert_add_remove_called_with(
-+        self, stonith_id, add_devices, remove_devices, force_flags
-+    ):
-+        self.stonith.update_scsi_devices_add_remove.assert_called_once_with(
-+            stonith_id, add_devices, remove_devices, force_flags=force_flags
-+        )
-+        self.stonith.update_scsi_devices.assert_not_called()
-+
-+    def assert_bad_syntax_cli_exception(self, args):
-+        with self.assertRaises(CmdLineInputError) as cm:
-+            self.call_cmd(args)
-+        self.assertEqual(cm.exception.message, None)
-+        self.assertEqual(
-+            cm.exception.hint,
-+            (
-+                "You must specify either list of set devices or at least one "
-+                "device for add or delete/remove devices"
-+            ),
-+        )
-+        self.stonith.update_scsi_devices.assert_not_called()
-+        self.stonith.update_scsi_devices_add_remove.assert_not_called()
- 
-     def call_cmd(self, argv, modifiers=None):
-         stonith.stonith_update_scsi_devices(
-@@ -174,44 +200,141 @@ class StonithUpdateScsiDevices(TestCase):
-             self.call_cmd(["stonith-id"])
-         self.assertEqual(cm.exception.message, None)
- 
--    def test_not_set_keyword(self):
-+    def test_unknown_keyword(self):
-         with self.assertRaises(CmdLineInputError) as cm:
-             self.call_cmd(["stonith-id", "unset"])
-         self.assertEqual(cm.exception.message, None)
- 
--    def test_only_set_keyword(self):
--        with self.assertRaises(CmdLineInputError) as cm:
--            self.call_cmd(["stonith-id", "set"])
--        self.assertEqual(cm.exception.message, None)
--        self.assertEqual(
--            cm.exception.hint, "You must specify set devices to be updated"
--        )
--
--    def test_one_device(self):
--        self.call_cmd(["stonith-id", "set", "device1"])
--        self.assert_called_with("stonith-id", ["device1"], [])
--
--    def test_more_devices(self):
--        self.call_cmd(["stonith-id", "set", "device1", "device2"])
--        self.assert_called_with("stonith-id", ["device1", "device2"], [])
--
-     def test_supported_options(self):
-         self.call_cmd(
--            ["stonith-id", "set", "device1", "device2"],
-+            ["stonith-id", "set", "d1", "d2"],
-             {"skip-offline": True, "request-timeout": 60},
-         )
-         self.assert_called_with(
-             "stonith-id",
--            ["device1", "device2"],
-+            ["d1", "d2"],
-             [reports.codes.SKIP_OFFLINE_NODES],
-         )
- 
-     def test_unsupported_options(self):
-         with self.assertRaises(CmdLineInputError) as cm:
--            self.call_cmd(
--                ["stonith-id", "set", "device1", "device2"], {"force": True}
--            )
-+            self.call_cmd(["stonith-id", "set", "d1", "d2"], {"force": True})
-         self.assertEqual(
-             cm.exception.message,
-             "Specified option '--force' is not supported in this command",
-         )
-+
-+    def test_only_set_keyword(self):
-+        self.assert_bad_syntax_cli_exception(["stonith-id", "set"])
-+
-+    def test_only_add_keyword(self):
-+        self.assert_bad_syntax_cli_exception(["stonith-id", "add"])
-+
-+    def test_only_remove_keyword(self):
-+        self.assert_bad_syntax_cli_exception(["stonith-id", "remove"])
-+
-+    def test_only_delete_keyword(self):
-+        self.assert_bad_syntax_cli_exception(["stonith-id", "delete"])
-+
-+    def test_add_and_empty_remove(self):
-+        self.assert_bad_syntax_cli_exception(
-+            ["stonith-id", "add", "d1", "remove"]
-+        )
-+
-+    def test_add_and_empty_delete(self):
-+        self.assert_bad_syntax_cli_exception(
-+            ["stonith-id", "add", "d1", "delete"]
-+        )
-+
-+    def test_empty_add_and_remove(self):
-+        self.assert_bad_syntax_cli_exception(
-+            ["stonith-id", "add", "remove", "d1"]
-+        )
-+
-+    def test_empty_add_and_delete(self):
-+        self.assert_bad_syntax_cli_exception(
-+            ["stonith-id", "add", "delete", "d1"]
-+        )
-+
-+    def test_empty_remove_and_delete(self):
-+        self.assert_bad_syntax_cli_exception(
-+            ["stonith-id", "remove", "delete", "d1"]
-+        )
-+
-+    def test_empty_delete_and_remove(self):
-+        self.assert_bad_syntax_cli_exception(
-+            ["stonith-id", "delete", "remove", "d1"]
-+        )
-+
-+    def test_empty_add_empty_remove_empty_delete(self):
-+        self.assert_bad_syntax_cli_exception(
-+            ["stonith-id", "add", "delete", "remove"]
-+        )
-+
-+    def test_set_add_remove_delete_devices(self):
-+        self.assert_bad_syntax_cli_exception(
-+            [
-+                "stonith-id",
-+                "set",
-+                "add",
-+                "d2",
-+                "remove",
-+                "d3",
-+                "delete",
-+                "d4",
-+            ]
-+        )
-+
-+    def test_set_devices(self):
-+        self.call_cmd(["stonith-id", "set", "d1", "d2"])
-+        self.assert_called_with("stonith-id", ["d1", "d2"], [])
-+
-+    def test_add_devices(self):
-+        self.call_cmd(["stonith-id", "add", "d1", "d2"])
-+        self.assert_add_remove_called_with("stonith-id", ["d1", "d2"], [], [])
-+
-+    def test_remove_devices(self):
-+        self.call_cmd(["stonith-id", "remove", "d1", "d2"])
-+        self.assert_add_remove_called_with("stonith-id", [], ["d1", "d2"], [])
-+
-+    def test_delete_devices(self):
-+        self.call_cmd(["stonith-id", "delete", "d1", "d2"])
-+        self.assert_add_remove_called_with("stonith-id", [], ["d1", "d2"], [])
-+
-+    def test_add_remove_devices(self):
-+        self.call_cmd(["stonith-id", "add", "d1", "d2", "remove", "d3", "d4"])
-+        self.assert_add_remove_called_with(
-+            "stonith-id", ["d1", "d2"], ["d3", "d4"], []
-+        )
-+
-+    def test_add_delete_devices(self):
-+        self.call_cmd(["stonith-id", "add", "d1", "d2", "delete", "d3", "d4"])
-+        self.assert_add_remove_called_with(
-+            "stonith-id", ["d1", "d2"], ["d3", "d4"], []
-+        )
-+
-+    def test_add_delete_remove_devices(self):
-+        self.call_cmd(
-+            [
-+                "stonith-id",
-+                "add",
-+                "d1",
-+                "d2",
-+                "delete",
-+                "d3",
-+                "d4",
-+                "remove",
-+                "d5",
-+            ]
-+        )
-+        self.assert_add_remove_called_with(
-+            "stonith-id", ["d1", "d2"], ["d3", "d4", "d5"], []
-+        )
-+
-+    def test_remove_delete_devices(self):
-+        self.call_cmd(
-+            ["stonith-id", "remove", "d2", "d1", "delete", "d4", "d3"]
-+        )
-+        self.assert_add_remove_called_with(
-+            "stonith-id", [], ["d4", "d3", "d2", "d1"], []
-+        )
-diff --git a/pcs_test/tier0/common/reports/test_messages.py b/pcs_test/tier0/common/reports/test_messages.py
-index 0cb97138..b0826cfd 100644
---- a/pcs_test/tier0/common/reports/test_messages.py
-+++ b/pcs_test/tier0/common/reports/test_messages.py
-@@ -1761,6 +1761,7 @@ class ResourceBundleAlreadyContainsAResource(NameBuildTest):
-         )
- 
- 
-+# TODO: remove, use ADD_REMOVE reports
- class CannotGroupResourceAdjacentResourceForNewGroup(NameBuildTest):
-     def test_success(self):
-         self.assert_message_from_report(
-@@ -1772,6 +1773,7 @@ class CannotGroupResourceAdjacentResourceForNewGroup(NameBuildTest):
-         )
- 
- 
-+# TODO: remove, use ADD_REMOVE reports
- class CannotGroupResourceAdjacentResourceNotInGroup(NameBuildTest):
-     def test_success(self):
-         self.assert_message_from_report(
-@@ -1783,6 +1785,7 @@ class CannotGroupResourceAdjacentResourceNotInGroup(NameBuildTest):
-         )
- 
- 
-+# TODO: remove, use ADD_REMOVE reports
- class CannotGroupResourceAlreadyInTheGroup(NameBuildTest):
-     def test_single_resource(self):
-         self.assert_message_from_report(
-@@ -1797,6 +1800,7 @@ class CannotGroupResourceAlreadyInTheGroup(NameBuildTest):
-         )
- 
- 
-+# TODO: remove, use ADD_REMOVE reports
- class CannotGroupResourceMoreThanOnce(NameBuildTest):
-     def test_single_resource(self):
-         self.assert_message_from_report(
-@@ -1811,6 +1815,7 @@ class CannotGroupResourceMoreThanOnce(NameBuildTest):
-         )
- 
- 
-+# TODO: remove, use ADD_REMOVE reports
- class CannotGroupResourceNoResources(NameBuildTest):
-     def test_success(self):
-         self.assert_message_from_report(
-@@ -1818,6 +1823,7 @@ class CannotGroupResourceNoResources(NameBuildTest):
-         )
- 
- 
-+# TODO: remove, use ADD_REMOVE reports
- class CannotGroupResourceNextToItself(NameBuildTest):
-     def test_success(self):
-         self.assert_message_from_report(
-@@ -4836,6 +4842,7 @@ class BoothTicketOperationFailed(NameBuildTest):
-         )
- 
- 
-+# TODO: remove, use ADD_REMOVE reports
- class TagAddRemoveIdsDuplication(NameBuildTest):
-     def test_message_add(self):
-         self.assert_message_from_report(
-@@ -4855,6 +4862,7 @@ class TagAddRemoveIdsDuplication(NameBuildTest):
-         )
- 
- 
-+# TODO: remove, use ADD_REMOVE reports
- class TagAdjacentReferenceIdNotInTheTag(NameBuildTest):
-     def test_messag(self):
-         self.assert_message_from_report(
-@@ -4866,6 +4874,7 @@ class TagAdjacentReferenceIdNotInTheTag(NameBuildTest):
-         )
- 
- 
-+# TODO: remove, use ADD_REMOVE reports
- class TagCannotAddAndRemoveIdsAtTheSameTime(NameBuildTest):
-     def test_message_one_item(self):
-         self.assert_message_from_report(
-@@ -4885,6 +4894,7 @@ class TagCannotAddAndRemoveIdsAtTheSameTime(NameBuildTest):
-         )
- 
- 
-+# TODO: remove, use ADD_REMOVE reports
- class TagCannotAddReferenceIdsAlreadyInTheTag(NameBuildTest):
-     def test_message_singular(self):
-         self.assert_message_from_report(
-@@ -4920,6 +4930,7 @@ class TagCannotCreateEmptyTagNoIdsSpecified(NameBuildTest):
-         )
- 
- 
-+# TODO: remove, use ADD_REMOVE reports
- class TagCannotPutIdNextToItself(NameBuildTest):
-     def test_message(self):
-         self.assert_message_from_report(
-@@ -4928,6 +4939,7 @@ class TagCannotPutIdNextToItself(NameBuildTest):
-         )
- 
- 
-+# TODO: remove, use ADD_REMOVE reports
- class TagCannotRemoveAdjacentId(NameBuildTest):
-     def test_message(self):
-         self.assert_message_from_report(
-@@ -4936,6 +4948,7 @@ class TagCannotRemoveAdjacentId(NameBuildTest):
-         )
- 
- 
-+# TODO: remove, use ADD_REMOVE reports
- class TagCannotRemoveReferencesWithoutRemovingTag(NameBuildTest):
-     def test_message(self):
-         self.assert_message_from_report(
-@@ -4974,6 +4987,7 @@ class TagCannotRemoveTagsNoTagsSpecified(NameBuildTest):
-         )
- 
- 
-+# TODO: remove, use ADD_REMOVE reports
- class TagCannotSpecifyAdjacentIdWithoutIdsToAdd(NameBuildTest):
-     def test_message(self):
-         self.assert_message_from_report(
-@@ -4982,6 +4996,7 @@ class TagCannotSpecifyAdjacentIdWithoutIdsToAdd(NameBuildTest):
-         )
- 
- 
-+# TODO: remove, use ADD_REMOVE reports
- class TagCannotUpdateTagNoIdsSpecified(NameBuildTest):
-     def test_message(self):
-         self.assert_message_from_report(
-@@ -4990,6 +5005,7 @@ class TagCannotUpdateTagNoIdsSpecified(NameBuildTest):
-         )
- 
- 
-+# TODO: remove, use ADD_REMOVE reports
- class TagIdsNotInTheTag(NameBuildTest):
-     def test_message_singular(self):
-         self.assert_message_from_report(
-@@ -5080,3 +5096,172 @@ class CibNvsetAmbiguousProvideNvsetId(NameBuildTest):
-                 const.PCS_COMMAND_RESOURCE_DEFAULTS_UPDATE
-             ),
-         )
-+
-+
-+class AddRemoveItemsNotSpecified(NameBuildTest):
-+    def test_message(self):
-+        self.assert_message_from_report(
-+            (
-+                "Cannot modify stonith resource 'container-id', no devices to "
-+                "add or remove specified"
-+            ),
-+            reports.AddRemoveItemsNotSpecified(
-+                const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE,
-+                const.ADD_REMOVE_ITEM_TYPE_DEVICE,
-+                "container-id",
-+            ),
-+        )
-+
-+
-+class AddRemoveItemsDuplication(NameBuildTest):
-+    def test_message(self):
-+        self.assert_message_from_report(
-+            (
-+                "Devices to add or remove must be unique, duplicate devices: "
-+                "'dup1', 'dup2'"
-+            ),
-+            reports.AddRemoveItemsDuplication(
-+                const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE,
-+                const.ADD_REMOVE_ITEM_TYPE_DEVICE,
-+                "container-id",
-+                ["dup2", "dup1"],
-+            ),
-+        )
-+
-+
-+class AddRemoveCannotAddItemsAlreadyInTheContainer(NameBuildTest):
-+    def test_message_plural(self):
-+        self.assert_message_from_report(
-+            "Cannot add devices 'i1', 'i2', they are already present in stonith"
-+            " resource 'container-id'",
-+            reports.AddRemoveCannotAddItemsAlreadyInTheContainer(
-+                const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE,
-+                const.ADD_REMOVE_ITEM_TYPE_DEVICE,
-+                "container-id",
-+                ["i2", "i1"],
-+            ),
-+        )
-+
-+    def test_message_singular(self):
-+        self.assert_message_from_report(
-+            "Cannot add device 'i1', it is already present in stonith resource "
-+            "'container-id'",
-+            reports.AddRemoveCannotAddItemsAlreadyInTheContainer(
-+                const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE,
-+                const.ADD_REMOVE_ITEM_TYPE_DEVICE,
-+                "container-id",
-+                ["i1"],
-+            ),
-+        )
-+
-+
-+class AddRemoveCannotRemoveItemsNotInTheContainer(NameBuildTest):
-+    def test_message_plural(self):
-+        self.assert_message_from_report(
-+            (
-+                "Cannot remove devices 'i1', 'i2', they are not present in "
-+                "stonith resource 'container-id'"
-+            ),
-+            reports.AddRemoveCannotRemoveItemsNotInTheContainer(
-+                const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE,
-+                const.ADD_REMOVE_ITEM_TYPE_DEVICE,
-+                "container-id",
-+                ["i2", "i1"],
-+            ),
-+        )
-+
-+    def test_message_singular(self):
-+        self.assert_message_from_report(
-+            (
-+                "Cannot remove device 'i1', it is not present in "
-+                "stonith resource 'container-id'"
-+            ),
-+            reports.AddRemoveCannotRemoveItemsNotInTheContainer(
-+                const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE,
-+                const.ADD_REMOVE_ITEM_TYPE_DEVICE,
-+                "container-id",
-+                ["i1"],
-+            ),
-+        )
-+
-+
-+class AddRemoveCannotAddAndRemoveItemsAtTheSameTime(NameBuildTest):
-+    def test_message_plural(self):
-+        self.assert_message_from_report(
-+            "Devices cannot be added and removed at the same time: 'i1', 'i2'",
-+            reports.AddRemoveCannotAddAndRemoveItemsAtTheSameTime(
-+                const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE,
-+                const.ADD_REMOVE_ITEM_TYPE_DEVICE,
-+                "container-id",
-+                ["i2", "i1"],
-+            ),
-+        )
-+
-+    def test_message_singular(self):
-+        self.assert_message_from_report(
-+            "Device cannot be added and removed at the same time: 'i1'",
-+            reports.AddRemoveCannotAddAndRemoveItemsAtTheSameTime(
-+                const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE,
-+                const.ADD_REMOVE_ITEM_TYPE_DEVICE,
-+                "container-id",
-+                ["i1"],
-+            ),
-+        )
-+
-+
-+class AddRemoveCannotRemoveAllItemsFromTheContainer(NameBuildTest):
-+    def test_message(self):
-+        self.assert_message_from_report(
-+            "Cannot remove all devices from stonith resource 'container-id'",
-+            reports.AddRemoveCannotRemoveAllItemsFromTheContainer(
-+                const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE,
-+                const.ADD_REMOVE_ITEM_TYPE_DEVICE,
-+                "container-id",
-+                ["i1", "i2"],
-+            ),
-+        )
-+
-+
-+class AddRemoveAdjacentItemNotInTheContainer(NameBuildTest):
-+    def test_message(self):
-+        self.assert_message_from_report(
-+            (
-+                "There is no device 'adjacent-item-id' in the stonith resource "
-+                "'container-id', cannot add devices next to it"
-+            ),
-+            reports.AddRemoveAdjacentItemNotInTheContainer(
-+                const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE,
-+                const.ADD_REMOVE_ITEM_TYPE_DEVICE,
-+                "container-id",
-+                "adjacent-item-id",
-+            ),
-+        )
-+
-+
-+class AddRemoveCannotPutItemNextToItself(NameBuildTest):
-+    def test_message(self):
-+        self.assert_message_from_report(
-+            "Cannot put device 'adjacent-item-id' next to itself",
-+            reports.AddRemoveCannotPutItemNextToItself(
-+                const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE,
-+                const.ADD_REMOVE_ITEM_TYPE_DEVICE,
-+                "container-id",
-+                "adjacent-item-id",
-+            ),
-+        )
-+
-+
-+class AddRemoveCannotSpecifyAdjacentItemWithoutItemsToAdd(NameBuildTest):
-+    def test_message(self):
-+        self.assert_message_from_report(
-+            (
-+                "Cannot specify adjacent device 'adjacent-item-id' without "
-+                "devices to add"
-+            ),
-+            reports.AddRemoveCannotSpecifyAdjacentItemWithoutItemsToAdd(
-+                const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE,
-+                const.ADD_REMOVE_ITEM_TYPE_DEVICE,
-+                "container-id",
-+                "adjacent-item-id",
-+            ),
-+        )
-diff --git a/pcs_test/tier0/common/test_str_tools.py b/pcs_test/tier0/common/test_str_tools.py
-index 97c1d223..b0028a88 100644
---- a/pcs_test/tier0/common/test_str_tools.py
-+++ b/pcs_test/tier0/common/test_str_tools.py
-@@ -1,5 +1,5 @@
- # pylint: disable=protected-access
--from unittest import TestCase, mock
-+from unittest import TestCase
- 
- from pcs.common import str_tools as tools
- 
-@@ -124,73 +124,48 @@ class AddSTest(TestCase):
-         self.assertEqual(tools._add_s("church"), "churches")
- 
- 
--@mock.patch("pcs.common.str_tools._add_s")
--@mock.patch("pcs.common.str_tools._is_multiple")
-+class GetPluralTest(TestCase):
-+    def test_common_plural(self):
-+        self.assertEqual("are", tools.get_plural("is"))
-+
-+    def test_add_s(self):
-+        self.assertEqual("pieces", tools.get_plural("piece"))
-+
-+
- class FormatPluralTest(TestCase):
--    def test_is_sg(self, mock_is_multiple, mock_add_s):
--        mock_is_multiple.return_value = False
-+    def test_is_sg(self):
-         self.assertEqual("is", tools.format_plural(1, "is"))
--        mock_add_s.assert_not_called()
--        mock_is_multiple.assert_called_once_with(1)
- 
--    def test_is_pl(self, mock_is_multiple, mock_add_s):
--        mock_is_multiple.return_value = True
-+    def test_is_pl(self):
-         self.assertEqual("are", tools.format_plural(2, "is"))
--        mock_add_s.assert_not_called()
--        mock_is_multiple.assert_called_once_with(2)
- 
--    def test_do_sg(self, mock_is_multiple, mock_add_s):
--        mock_is_multiple.return_value = False
-+    def test_do_sg(self):
-         self.assertEqual("does", tools.format_plural("he", "does"))
--        mock_add_s.assert_not_called()
--        mock_is_multiple.assert_called_once_with("he")
- 
--    def test_do_pl(self, mock_is_multiple, mock_add_s):
--        mock_is_multiple.return_value = True
-+    def test_do_pl(self):
-         self.assertEqual("do", tools.format_plural(["he", "she"], "does"))
--        mock_add_s.assert_not_called()
--        mock_is_multiple.assert_called_once_with(["he", "she"])
- 
--    def test_have_sg(self, mock_is_multiple, mock_add_s):
--        mock_is_multiple.return_value = False
-+    def test_have_sg(self):
-         self.assertEqual("has", tools.format_plural("he", "has"))
--        mock_add_s.assert_not_called()
--        mock_is_multiple.assert_called_once_with("he")
- 
--    def test_have_pl(self, mock_is_multiple, mock_add_s):
--        mock_is_multiple.return_value = True
-+    def test_have_pl(self):
-         self.assertEqual("have", tools.format_plural(["he", "she"], "has"))
--        mock_add_s.assert_not_called()
--        mock_is_multiple.assert_called_once_with(["he", "she"])
- 
--    def test_plural_sg(self, mock_is_multiple, mock_add_s):
--        mock_is_multiple.return_value = False
-+    def test_plural_sg(self):
-         self.assertEqual(
-             "singular", tools.format_plural(1, "singular", "plural")
-         )
--        mock_add_s.assert_not_called()
--        mock_is_multiple.assert_called_once_with(1)
- 
--    def test_plural_pl(self, mock_is_multiple, mock_add_s):
--        mock_is_multiple.return_value = True
-+    def test_plural_pl(self):
-         self.assertEqual(
-             "plural", tools.format_plural(10, "singular", "plural")
-         )
--        mock_add_s.assert_not_called()
--        mock_is_multiple.assert_called_once_with(10)
- 
--    def test_regular_sg(self, mock_is_multiple, mock_add_s):
--        mock_is_multiple.return_value = False
-+    def test_regular_sg(self):
-         self.assertEqual("greeting", tools.format_plural(1, "greeting"))
--        mock_add_s.assert_not_called()
--        mock_is_multiple.assert_called_once_with(1)
- 
--    def test_regular_pl(self, mock_is_multiple, mock_add_s):
--        mock_add_s.return_value = "greetings"
--        mock_is_multiple.return_value = True
-+    def test_regular_pl(self):
-         self.assertEqual("greetings", tools.format_plural(10, "greeting"))
--        mock_add_s.assert_called_once_with("greeting")
--        mock_is_multiple.assert_called_once_with(10)
- 
- 
- class FormatList(TestCase):
-diff --git a/pcs_test/tier0/lib/cib/test_stonith.py b/pcs_test/tier0/lib/cib/test_stonith.py
-index ef7571ce..df059121 100644
---- a/pcs_test/tier0/lib/cib/test_stonith.py
-+++ b/pcs_test/tier0/lib/cib/test_stonith.py
-@@ -2,8 +2,12 @@ from unittest import TestCase
- 
- from lxml import etree
- 
-+from pcs.common import reports
- from pcs.lib.cib import stonith
- 
-+from pcs_test.tools import fixture
-+from pcs_test.tools.assertions import assert_report_item_list_equal
-+
- 
- class IsStonithEnabled(TestCase):
-     def test_not_set(self):
-@@ -149,8 +153,129 @@ class GetMisconfiguredResources(TestCase):
-         )
- 
- 
--class ValidateStonithDeviceExistsAndSupported(TestCase):
--    """
--    tested in:
--      pcs_test.tier0.lib.commands.test_stonith_update_scsi_devices.TestUpdateScsiDevicesFailures
--    """
-+class ValidateStonithRestartlessUpdate(TestCase):
-+    RESOURCES = etree.fromstring(
-+        """
-+        <resources>
-+            <primitive id="supported" class="stonith" type="fence_scsi">
-+                <instance_attributes>
-+                    <nvpair name="devices" value="/dev/sda" />
-+                </instance_attributes>
-+            </primitive>
-+            <primitive id="empty" class="stonith" type="fence_scsi">
-+                <instance_attributes>
-+                    <nvpair id="empty-instance_attributes-devices"
-+                        name="devices" value="" />
-+                </instance_attributes>
-+            </primitive>
-+            <primitive id="no-devices" class="stonith" type="fence_scsi"/>
-+            <primitive id="unsupported_provider"
-+                class="stonith" provider="provider" type="fence_scsi"/>
-+            <primitive id="unsupported_type" class="stonith" type="fence_xvm"/>
-+            <primitive class="ocf" id="cp-01" provider="pacemaker" type="Dummy"/>
-+        </resources>
-+        """
-+    )
-+
-+    def assert_unsupported_stonith_agent(self, resource_id, resource_type):
-+        stonith_el, report_list = stonith.validate_stonith_restartless_update(
-+            self.RESOURCES, resource_id
-+        )
-+        self.assertEqual(
-+            stonith_el,
-+            self.RESOURCES.find(f".//primitive[@id='{resource_id}']"),
-+        )
-+        assert_report_item_list_equal(
-+            report_list,
-+            [
-+                fixture.error(
-+                    reports.codes.STONITH_RESTARTLESS_UPDATE_UNSUPPORTED_AGENT,
-+                    resource_id=resource_id,
-+                    resource_type=resource_type,
-+                    supported_stonith_types=["fence_scsi"],
-+                )
-+            ],
-+        )
-+
-+    def assert_no_devices(self, resource_id):
-+        stonith_el, report_list = stonith.validate_stonith_restartless_update(
-+            self.RESOURCES, resource_id
-+        )
-+        self.assertEqual(
-+            stonith_el,
-+            self.RESOURCES.find(f".//primitive[@id='{resource_id}']"),
-+        )
-+        assert_report_item_list_equal(
-+            report_list,
-+            [
-+                fixture.error(
-+                    reports.codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM,
-+                    reason=(
-+                        "no devices option configured for stonith device "
-+                        f"'{resource_id}'"
-+                    ),
-+                    reason_type="other",
-+                )
-+            ],
-+        )
-+
-+    def test_supported(self):
-+        stonith_el, report_list = stonith.validate_stonith_restartless_update(
-+            self.RESOURCES, "supported"
-+        )
-+        self.assertEqual(
-+            stonith_el, self.RESOURCES.find(".//primitive[@id='supported']")
-+        )
-+        assert_report_item_list_equal(report_list, [])
-+
-+    def test_nonexistent_id(self):
-+        stonith_el, report_list = stonith.validate_stonith_restartless_update(
-+            self.RESOURCES, "non-existent"
-+        )
-+        self.assertEqual(stonith_el, None)
-+        assert_report_item_list_equal(
-+            report_list,
-+            [
-+                fixture.error(
-+                    reports.codes.ID_NOT_FOUND,
-+                    id="non-existent",
-+                    expected_types=["primitive"],
-+                    context_type="resources",
-+                    context_id="",
-+                )
-+            ],
-+        )
-+
-+    def test_not_a_resource_id(self):
-+        stonith_el, report_list = stonith.validate_stonith_restartless_update(
-+            self.RESOURCES, "empty-instance_attributes-devices"
-+        )
-+        self.assertEqual(stonith_el, None)
-+        assert_report_item_list_equal(
-+            report_list,
-+            [
-+                fixture.error(
-+                    reports.codes.ID_BELONGS_TO_UNEXPECTED_TYPE,
-+                    id="empty-instance_attributes-devices",
-+                    expected_types=["primitive"],
-+                    current_type="nvpair",
-+                )
-+            ],
-+        )
-+
-+    def test_devices_empty(self):
-+        self.assert_no_devices("empty")
-+
-+    def test_missing_devices_attr(self):
-+        self.assert_no_devices("no-devices")
-+
-+    def test_unsupported_class(self):
-+        self.assert_unsupported_stonith_agent("cp-01", "Dummy")
-+
-+    def test_unsupported_provider(self):
-+        self.assert_unsupported_stonith_agent(
-+            "unsupported_provider", "fence_scsi"
-+        )
-+
-+    def test_unsupported_type(self):
-+        self.assert_unsupported_stonith_agent("unsupported_type", "fence_xvm")
-diff --git a/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py b/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py
-index 3bc51325..6ff6b99a 100644
---- a/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py
-+++ b/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py
-@@ -3,6 +3,7 @@ from unittest import mock, TestCase
- 
- 
- from pcs_test.tools import fixture
-+from pcs_test.tools.assertions import assert_report_item_list_equal
- from pcs_test.tools.command_env import get_env_tools
- from pcs_test.tools.misc import get_test_resource as rc
- 
-@@ -13,6 +14,10 @@ from pcs.common import (
-     reports,
- )
- from pcs.common.interface import dto
-+from pcs.common.reports.const import (
-+    ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE,
-+    ADD_REMOVE_ITEM_TYPE_DEVICE,
-+)
- from pcs.common.tools import timeout_to_seconds
- 
- from .cluster.common import (
-@@ -28,6 +33,10 @@ DEFAULT_DIGEST = _DIGEST + "0"
- ALL_DIGEST = _DIGEST + "1"
- NONPRIVATE_DIGEST = _DIGEST + "2"
- NONRELOADABLE_DIGEST = _DIGEST + "3"
-+DEV_1 = "/dev/sda"
-+DEV_2 = "/dev/sdb"
-+DEV_3 = "/dev/sdc"
-+DEV_4 = "/dev/sdd"
- DEVICES_1 = ("/dev/sda",)
- DEVICES_2 = ("/dev/sda", "/dev/sdb")
- DEVICES_3 = ("/dev/sda", "/dev/sdb", "/dev/sdc")
-@@ -197,13 +206,9 @@ FIXTURE_CRM_MON_RES_STOPPED = f"""
- """
- 
- 
--@mock.patch.object(
--    settings,
--    "pacemaker_api_result_schema",
--    rc("pcmk_api_rng/api-result.rng"),
--)
--class UpdateScsiDevices(TestCase):
-+class UpdateScsiDevicesMixin:
-     def setUp(self):
-+        # pylint: disable=invalid-name
-         self.env_assist, self.config = get_env_tools(self)
- 
-         self.existing_nodes = ["node1", "node2", "node3"]
-@@ -217,14 +222,18 @@ class UpdateScsiDevices(TestCase):
-         self,
-         devices_before=DEVICES_1,
-         devices_updated=DEVICES_2,
-+        devices_add=(),
-+        devices_remove=(),
-+        unfence=None,
-         resource_ops=DEFAULT_OPS,
-         lrm_monitor_ops=DEFAULT_LRM_MONITOR_OPS,
-         lrm_start_ops=DEFAULT_LRM_START_OPS,
-         lrm_monitor_ops_updated=DEFAULT_LRM_MONITOR_OPS_UPDATED,
-         lrm_start_ops_updated=DEFAULT_LRM_START_OPS_UPDATED,
-     ):
-+        # pylint: disable=too-many-arguments
-         # pylint: disable=too-many-locals
--        self.config.runner.pcmk.is_resource_digests_supported()
-+        devices_value = ",".join(sorted(devices_updated))
-         self.config.runner.cib.load(
-             resources=fixture_scsi(
-                 devices=devices_before, resource_ops=resource_ops
-@@ -235,16 +244,17 @@ class UpdateScsiDevices(TestCase):
-                 lrm_monitor_ops=lrm_monitor_ops,
-             ),
-         )
-+        self.config.runner.pcmk.is_resource_digests_supported()
-         self.config.runner.pcmk.load_state(
-             resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES
-         )
--        devices_opt = "devices={}".format(",".join(devices_updated))
-+        devices_opt = "devices={}".format(devices_value)
-         self.config.runner.pcmk.resource_digests(
-             SCSI_STONITH_ID,
-             SCSI_NODE,
-             name="start.op.digests",
-             stdout=fixture_digests_xml(
--                SCSI_STONITH_ID, SCSI_NODE, devices=",".join(devices_updated)
-+                SCSI_STONITH_ID, SCSI_NODE, devices=devices_value
-             ),
-             args=[devices_opt],
-         )
-@@ -272,22 +282,23 @@ class UpdateScsiDevices(TestCase):
-                 stdout=fixture_digests_xml(
-                     SCSI_STONITH_ID,
-                     SCSI_NODE,
--                    devices=",".join(devices_updated),
-+                    devices=devices_value,
-                 ),
-                 args=args,
-             )
--        self.config.corosync_conf.load_content(
--            corosync_conf_fixture(
--                self.existing_corosync_nodes,
--                get_two_node(len(self.existing_corosync_nodes)),
-+        if unfence:
-+            self.config.corosync_conf.load_content(
-+                corosync_conf_fixture(
-+                    self.existing_corosync_nodes,
-+                    get_two_node(len(self.existing_corosync_nodes)),
-+                )
-+            )
-+            self.config.http.corosync.get_corosync_online_targets(
-+                node_labels=self.existing_nodes
-+            )
-+            self.config.http.scsi.unfence_node(
-+                unfence, node_labels=self.existing_nodes
-             )
--        )
--        self.config.http.corosync.get_corosync_online_targets(
--            node_labels=self.existing_nodes
--        )
--        self.config.http.scsi.unfence_node(
--            devices_updated, node_labels=self.existing_nodes
--        )
-         self.config.env.push_cib(
-             resources=fixture_scsi(
-                 devices=devices_updated, resource_ops=resource_ops
-@@ -298,113 +309,25 @@ class UpdateScsiDevices(TestCase):
-                 lrm_monitor_ops=lrm_monitor_ops_updated,
-             ),
-         )
--        stonith.update_scsi_devices(
--            self.env_assist.get_env(), SCSI_STONITH_ID, devices_updated
--        )
-+        if devices_add or devices_remove:
-+            stonith.update_scsi_devices_add_remove(
-+                self.env_assist.get_env(),
-+                SCSI_STONITH_ID,
-+                devices_add,
-+                devices_remove,
-+            )
-+        else:
-+            stonith.update_scsi_devices(
-+                self.env_assist.get_env(), SCSI_STONITH_ID, devices_updated
-+            )
-         self.env_assist.assert_reports([])
- 
--    def test_update_1_to_1_devices(self):
--        self.assert_command_success(
--            devices_before=DEVICES_1, devices_updated=DEVICES_1
--        )
--
--    def test_update_2_to_2_devices(self):
--        self.assert_command_success(
--            devices_before=DEVICES_1, devices_updated=DEVICES_1
--        )
--
--    def test_update_1_to_2_devices(self):
--        self.assert_command_success()
--
--    def test_update_1_to_3_devices(self):
--        self.assert_command_success(
--            devices_before=DEVICES_1, devices_updated=DEVICES_3
--        )
--
--    def test_update_3_to_1_devices(self):
--        self.assert_command_success(
--            devices_before=DEVICES_3, devices_updated=DEVICES_1
--        )
--
--    def test_update_3_to_2_devices(self):
--        self.assert_command_success(
--            devices_before=DEVICES_3, devices_updated=DEVICES_2
--        )
--
--    def test_default_monitor(self):
--        self.assert_command_success()
--
--    def test_no_monitor_ops(self):
--        self.assert_command_success(
--            resource_ops=(), lrm_monitor_ops=(), lrm_monitor_ops_updated=()
--        )
--
--    def test_1_monitor_with_timeout(self):
--        self.assert_command_success(
--            resource_ops=(("monitor", "30s", "10s", None),),
--            lrm_monitor_ops=(("30000", DEFAULT_DIGEST, None, None),),
--            lrm_monitor_ops_updated=(("30000", ALL_DIGEST, None, None),),
--        )
--
--    def test_2_monitor_ops_with_timeouts(self):
--        self.assert_command_success(
--            resource_ops=(
--                ("monitor", "30s", "10s", None),
--                ("monitor", "40s", "20s", None),
--            ),
--            lrm_monitor_ops=(
--                ("30000", DEFAULT_DIGEST, None, None),
--                ("40000", DEFAULT_DIGEST, None, None),
--            ),
--            lrm_monitor_ops_updated=(
--                ("30000", ALL_DIGEST, None, None),
--                ("40000", ALL_DIGEST, None, None),
--            ),
--        )
--
--    def test_2_monitor_ops_with_one_timeout(self):
--        self.assert_command_success(
--            resource_ops=(
--                ("monitor", "30s", "10s", None),
--                ("monitor", "60s", None, None),
--            ),
--            lrm_monitor_ops=(
--                ("30000", DEFAULT_DIGEST, None, None),
--                ("60000", DEFAULT_DIGEST, None, None),
--            ),
--            lrm_monitor_ops_updated=(
--                ("30000", ALL_DIGEST, None, None),
--                ("60000", ALL_DIGEST, None, None),
--            ),
--        )
--
--    def test_various_start_ops_one_lrm_start_op(self):
--        self.assert_command_success(
--            resource_ops=(
--                ("monitor", "60s", None, None),
--                ("start", "0s", "40s", None),
--                ("start", "0s", "30s", "1"),
--                ("start", "10s", "5s", None),
--                ("start", "20s", None, None),
--            ),
--        )
--
--    def test_1_nonrecurring_start_op_with_timeout(self):
--        self.assert_command_success(
--            resource_ops=(
--                ("monitor", "60s", None, None),
--                ("start", "0s", "40s", None),
--            ),
--        )
- 
-+class UpdateScsiDevicesFailuresMixin:
-+    def command(self, force_flags=()):
-+        raise NotImplementedError
- 
--@mock.patch.object(
--    settings,
--    "pacemaker_api_result_schema",
--    rc("pcmk_api_rng/api-result.rng"),
--)
--class TestUpdateScsiDevicesFailures(TestCase):
--    # pylint: disable=too-many-public-methods
-+    # pylint: disable=invalid-name
-     def setUp(self):
-         self.env_assist, self.config = get_env_tools(self)
- 
-@@ -416,13 +339,12 @@ class TestUpdateScsiDevicesFailures(TestCase):
-         self.config.env.set_known_nodes(self.existing_nodes)
- 
-     def test_pcmk_doesnt_support_digests(self):
-+        self.config.runner.cib.load(resources=fixture_scsi())
-         self.config.runner.pcmk.is_resource_digests_supported(
-             is_supported=False
-         )
-         self.env_assist.assert_raise_library_error(
--            lambda: stonith.update_scsi_devices(
--                self.env_assist.get_env(), SCSI_STONITH_ID, ()
--            ),
-+            self.command(),
-             [
-                 fixture.error(
-                     reports.codes.STONITH_RESTARTLESS_UPDATE_OF_SCSI_DEVICES_NOT_SUPPORTED,
-@@ -431,134 +353,557 @@ class TestUpdateScsiDevicesFailures(TestCase):
-             expected_in_processor=False,
-         )
- 
--    def test_devices_cannot_be_empty(self):
--        self.config.runner.pcmk.is_resource_digests_supported()
--        self.config.runner.cib.load(resources=fixture_scsi())
--        self.env_assist.assert_raise_library_error(
--            lambda: stonith.update_scsi_devices(
--                self.env_assist.get_env(), SCSI_STONITH_ID, ()
--            )
--        )
--        self.env_assist.assert_reports(
--            [
--                fixture.error(
--                    reports.codes.INVALID_OPTION_VALUE,
--                    option_name="devices",
--                    option_value="",
--                    allowed_values=None,
--                    cannot_be_empty=True,
--                    forbidden_characters=None,
--                )
--            ]
-+    def test_node_missing_name_and_missing_auth_token(self):
-+        self.config.runner.cib.load(
-+            resources=fixture_scsi(),
-+            status=_fixture_status_lrm_ops(SCSI_STONITH_ID),
-         )
--
--    def test_nonexistant_id(self):
-         self.config.runner.pcmk.is_resource_digests_supported()
--        self.config.runner.cib.load(resources=fixture_scsi())
--        self.env_assist.assert_raise_library_error(
--            lambda: stonith.update_scsi_devices(
--                self.env_assist.get_env(), "non-existent-id", DEVICES_2
--            )
--        )
--        self.env_assist.assert_reports(
--            [
--                fixture.error(
--                    reports.codes.ID_NOT_FOUND,
--                    id="non-existent-id",
--                    expected_types=["primitive"],
--                    context_type="cib",
--                    context_id="",
--                )
--            ]
-+        self.config.runner.pcmk.load_state(
-+            resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES
-         )
--
--    def test_not_a_resource_id(self):
--        self.config.runner.pcmk.is_resource_digests_supported()
--        self.config.runner.cib.load(resources=fixture_scsi())
--        self.env_assist.assert_raise_library_error(
--            lambda: stonith.update_scsi_devices(
--                self.env_assist.get_env(),
--                f"{SCSI_STONITH_ID}-instance_attributes-devices",
--                DEVICES_2,
--            )
-+        self.config.runner.pcmk.resource_digests(
-+            SCSI_STONITH_ID,
-+            SCSI_NODE,
-+            name="start.op.digests",
-+            stdout=fixture_digests_xml(
-+                SCSI_STONITH_ID, SCSI_NODE, devices=",".join(DEVICES_2)
-+            ),
-+            args=["devices={}".format(",".join(DEVICES_2))],
-         )
--        self.env_assist.assert_reports(
--            [
--                fixture.error(
--                    reports.codes.ID_BELONGS_TO_UNEXPECTED_TYPE,
--                    id=f"{SCSI_STONITH_ID}-instance_attributes-devices",
--                    expected_types=["primitive"],
--                    current_type="nvpair",
--                )
--            ]
-+        self.config.runner.pcmk.resource_digests(
-+            SCSI_STONITH_ID,
-+            SCSI_NODE,
-+            name="monitor.op.digests",
-+            stdout=fixture_digests_xml(
-+                SCSI_STONITH_ID, SCSI_NODE, devices=",".join(DEVICES_2)
-+            ),
-+            args=[
-+                "devices={}".format(",".join(DEVICES_2)),
-+                "CRM_meta_interval=60000",
-+            ],
-         )
--
--    def test_not_supported_resource_type(self):
--        self.config.runner.pcmk.is_resource_digests_supported()
--        self.config.runner.cib.load(resources=fixture_scsi())
--        self.env_assist.assert_raise_library_error(
--            lambda: stonith.update_scsi_devices(
--                self.env_assist.get_env(), "dummy", DEVICES_2
-+        self.config.corosync_conf.load_content(
-+            corosync_conf_fixture(
-+                self.existing_corosync_nodes
-+                + [[("ring0_addr", "custom_node"), ("nodeid", "5")]],
-             )
-         )
-+        self.config.env.set_known_nodes(self.existing_nodes[:-1])
-+        self.env_assist.assert_raise_library_error(self.command())
-         self.env_assist.assert_reports(
-             [
-                 fixture.error(
--                    reports.codes.STONITH_RESTARTLESS_UPDATE_UNSUPPORTED_AGENT,
--                    resource_id="dummy",
--                    resource_type="Dummy",
--                    supported_stonith_types=["fence_scsi"],
--                )
--            ]
--        )
--
--    def test_devices_option_missing(self):
--        self.config.runner.pcmk.is_resource_digests_supported()
--        self.config.runner.cib.load(resources=fixture_scsi(devices=None))
--        self.env_assist.assert_raise_library_error(
--            lambda: stonith.update_scsi_devices(
--                self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2
--            )
--        )
--        self.env_assist.assert_reports(
--            [
-+                    reports.codes.COROSYNC_CONFIG_MISSING_NAMES_OF_NODES,
-+                    fatal=True,
-+                ),
-                 fixture.error(
--                    reports.codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM,
--                    reason=(
--                        "no devices option configured for stonith device "
--                        f"'{SCSI_STONITH_ID}'"
--                    ),
--                    reason_type=reports.const.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_OTHER,
--                )
-+                    reports.codes.HOST_NOT_FOUND,
-+                    host_list=[self.existing_nodes[-1]],
-+                ),
-             ]
-         )
- 
--    def test_devices_option_empty(self):
--        self.config.runner.pcmk.is_resource_digests_supported()
--        self.config.runner.cib.load(resources=fixture_scsi(devices=""))
--        self.env_assist.assert_raise_library_error(
--            lambda: stonith.update_scsi_devices(
--                self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2
--            )
--        )
--        self.env_assist.assert_reports(
--            [
--                fixture.error(
--                    reports.codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM,
--                    reason=(
--                        "no devices option configured for stonith device "
--                        f"'{SCSI_STONITH_ID}'"
--                    ),
--                    reason_type=reports.const.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_OTHER,
--                )
--            ]
-+    def _unfence_failure_common_calls(self):
-+        devices = ",".join(DEVICES_2)
-+        self.config.runner.cib.load(
-+            resources=fixture_scsi(),
-+            status=_fixture_status_lrm_ops(SCSI_STONITH_ID),
-         )
--
--    def test_stonith_resource_is_not_running(self):
-         self.config.runner.pcmk.is_resource_digests_supported()
--        self.config.runner.cib.load(resources=fixture_scsi())
-         self.config.runner.pcmk.load_state(
--            resources=FIXTURE_CRM_MON_RES_STOPPED, nodes=FIXTURE_CRM_MON_NODES
-+            resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES
-+        )
-+        self.config.runner.pcmk.resource_digests(
-+            SCSI_STONITH_ID,
-+            SCSI_NODE,
-+            name="start.op.digests",
-+            stdout=fixture_digests_xml(
-+                SCSI_STONITH_ID,
-+                SCSI_NODE,
-+                devices=devices,
-+            ),
-+            args=[f"devices={devices}"],
-+        )
-+        self.config.runner.pcmk.resource_digests(
-+            SCSI_STONITH_ID,
-+            SCSI_NODE,
-+            name="monitor.op.digests",
-+            stdout=fixture_digests_xml(
-+                SCSI_STONITH_ID,
-+                SCSI_NODE,
-+                devices=devices,
-+            ),
-+            args=[
-+                f"devices={devices}",
-+                "CRM_meta_interval=60000",
-+            ],
-+        )
-+        self.config.corosync_conf.load_content(
-+            corosync_conf_fixture(self.existing_corosync_nodes)
-+        )
-+
-+    def test_unfence_failure_unable_to_connect(self):
-+        self._unfence_failure_common_calls()
-+        self.config.http.corosync.get_corosync_online_targets(
-+            node_labels=self.existing_nodes
-+        )
-+        self.config.http.scsi.unfence_node(
-+            DEVICES_2,
-+            communication_list=[
-+                dict(
-+                    label=self.existing_nodes[0],
-+                    raw_data=json.dumps(
-+                        dict(
-+                            devices=[DEV_2],
-+                            node=self.existing_nodes[0],
-+                        )
-+                    ),
-+                    was_connected=False,
-+                    error_msg="errA",
-+                ),
-+                dict(
-+                    label=self.existing_nodes[1],
-+                    raw_data=json.dumps(
-+                        dict(
-+                            devices=[DEV_2],
-+                            node=self.existing_nodes[1],
-+                        )
-+                    ),
-+                    output=json.dumps(
-+                        dto.to_dict(
-+                            communication.dto.InternalCommunicationResultDto(
-+                                status=communication.const.COM_STATUS_ERROR,
-+                                status_msg="error",
-+                                report_list=[
-+                                    reports.ReportItem.error(
-+                                        reports.messages.StonithUnfencingFailed(
-+                                            "errB"
-+                                        )
-+                                    ).to_dto()
-+                                ],
-+                                data=None,
-+                            )
-+                        )
-+                    ),
-+                ),
-+                dict(
-+                    label=self.existing_nodes[2],
-+                    raw_data=json.dumps(
-+                        dict(
-+                            devices=[DEV_2],
-+                            node=self.existing_nodes[2],
-+                        )
-+                    ),
-+                ),
-+            ],
-+        )
-+        self.env_assist.assert_raise_library_error(self.command())
-+        self.env_assist.assert_reports(
-+            [
-+                fixture.error(
-+                    reports.codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
-+                    node=self.existing_nodes[0],
-+                    command="api/v1/scsi-unfence-node/v1",
-+                    reason="errA",
-+                ),
-+                fixture.error(
-+                    reports.codes.STONITH_UNFENCING_FAILED,
-+                    reason="errB",
-+                    context=reports.dto.ReportItemContextDto(
-+                        node=self.existing_nodes[1],
-+                    ),
-+                ),
-+            ]
-+        )
-+
-+    def test_unfence_failure_agent_script_failed(self):
-+        self._unfence_failure_common_calls()
-+        self.config.http.corosync.get_corosync_online_targets(
-+            node_labels=self.existing_nodes
-+        )
-+        self.config.http.scsi.unfence_node(
-+            DEVICES_2,
-+            communication_list=[
-+                dict(
-+                    label=self.existing_nodes[0],
-+                    raw_data=json.dumps(
-+                        dict(
-+                            devices=[DEV_2],
-+                            node=self.existing_nodes[0],
-+                        )
-+                    ),
-+                ),
-+                dict(
-+                    label=self.existing_nodes[1],
-+                    raw_data=json.dumps(
-+                        dict(
-+                            devices=[DEV_2],
-+                            node=self.existing_nodes[1],
-+                        )
-+                    ),
-+                    output=json.dumps(
-+                        dto.to_dict(
-+                            communication.dto.InternalCommunicationResultDto(
-+                                status=communication.const.COM_STATUS_ERROR,
-+                                status_msg="error",
-+                                report_list=[
-+                                    reports.ReportItem.error(
-+                                        reports.messages.StonithUnfencingFailed(
-+                                            "errB"
-+                                        )
-+                                    ).to_dto()
-+                                ],
-+                                data=None,
-+                            )
-+                        )
-+                    ),
-+                ),
-+                dict(
-+                    label=self.existing_nodes[2],
-+                    raw_data=json.dumps(
-+                        dict(
-+                            devices=[DEV_2],
-+                            node=self.existing_nodes[2],
-+                        )
-+                    ),
-+                ),
-+            ],
-+        )
-+        self.env_assist.assert_raise_library_error(self.command())
-+        self.env_assist.assert_reports(
-+            [
-+                fixture.error(
-+                    reports.codes.STONITH_UNFENCING_FAILED,
-+                    reason="errB",
-+                    context=reports.dto.ReportItemContextDto(
-+                        node=self.existing_nodes[1],
-+                    ),
-+                ),
-+            ]
-+        )
-+
-+    def test_corosync_targets_unable_to_connect(self):
-+        self._unfence_failure_common_calls()
-+        self.config.http.corosync.get_corosync_online_targets(
-+            communication_list=[
-+                dict(
-+                    label=self.existing_nodes[0],
-+                    output='{"corosync":true}',
-+                ),
-+            ]
-+            + [
-+                dict(
-+                    label=node,
-+                    was_connected=False,
-+                    errno=7,
-+                    error_msg="an error",
-+                )
-+                for node in self.existing_nodes[1:]
-+            ]
-+        )
-+        self.env_assist.assert_raise_library_error(self.command())
-+        self.env_assist.assert_reports(
-+            [
-+                fixture.error(
-+                    reports.codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
-+                    force_code=reports.codes.SKIP_OFFLINE_NODES,
-+                    node=node,
-+                    command="remote/status",
-+                    reason="an error",
-+                )
-+                for node in self.existing_nodes[1:]
-+            ]
-+        )
-+
-+    def test_corosync_targets_skip_offline_unfence_node_running_corosync(
-+        self,
-+    ):
-+        self._unfence_failure_common_calls()
-+        self.config.http.corosync.get_corosync_online_targets(
-+            communication_list=[
-+                dict(
-+                    label=self.existing_nodes[0],
-+                    output='{"corosync":true}',
-+                ),
-+                dict(
-+                    label=self.existing_nodes[1],
-+                    output='{"corosync":false}',
-+                ),
-+                dict(
-+                    label=self.existing_nodes[2],
-+                    was_connected=False,
-+                    errno=7,
-+                    error_msg="an error",
-+                ),
-+            ]
-+        )
-+        self.config.http.scsi.unfence_node(
-+            DEVICES_2,
-+            communication_list=[
-+                dict(
-+                    label=self.existing_nodes[0],
-+                    raw_data=json.dumps(
-+                        dict(
-+                            devices=[DEV_2],
-+                            node=self.existing_nodes[0],
-+                        )
-+                    ),
-+                ),
-+            ],
-+        )
-+        self.config.env.push_cib(
-+            resources=fixture_scsi(devices=DEVICES_2),
-+            status=_fixture_status_lrm_ops(
-+                SCSI_STONITH_ID,
-+                lrm_start_ops=DEFAULT_LRM_START_OPS_UPDATED,
-+                lrm_monitor_ops=DEFAULT_LRM_MONITOR_OPS_UPDATED,
-+            ),
-+        )
-+        self.command(force_flags=[reports.codes.SKIP_OFFLINE_NODES])()
-+        self.env_assist.assert_reports(
-+            [
-+                fixture.warn(
-+                    reports.codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
-+                    node=self.existing_nodes[2],
-+                    command="remote/status",
-+                    reason="an error",
-+                ),
-+            ]
-+        )
-+
-+    def test_corosync_targets_unable_to_perform_unfencing_operation(
-+        self,
-+    ):
-+        self._unfence_failure_common_calls()
-+        self.config.http.corosync.get_corosync_online_targets(
-+            communication_list=[
-+                dict(
-+                    label=self.existing_nodes[0],
-+                    was_connected=False,
-+                    errno=7,
-+                    error_msg="an error",
-+                ),
-+                dict(
-+                    label=self.existing_nodes[1],
-+                    was_connected=False,
-+                    errno=7,
-+                    error_msg="an error",
-+                ),
-+                dict(
-+                    label=self.existing_nodes[2],
-+                    output='{"corosync":false}',
-+                ),
-+            ]
-+        )
-+        self.config.http.scsi.unfence_node([DEV_2], communication_list=[])
-+        self.env_assist.assert_raise_library_error(
-+            self.command(force_flags=[reports.codes.SKIP_OFFLINE_NODES])
-+        )
-+        self.env_assist.assert_reports(
-+            [
-+                fixture.warn(
-+                    reports.codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
-+                    node=node,
-+                    command="remote/status",
-+                    reason="an error",
-+                )
-+                for node in self.existing_nodes[0:2]
-+            ]
-+            + [
-+                fixture.error(
-+                    reports.codes.UNABLE_TO_PERFORM_OPERATION_ON_ANY_NODE,
-+                ),
-+            ]
-+        )
-+
-+
-+@mock.patch.object(
-+    settings,
-+    "pacemaker_api_result_schema",
-+    rc("pcmk_api_rng/api-result.rng"),
-+)
-+class UpdateScsiDevices(UpdateScsiDevicesMixin, TestCase):
-+    def test_update_1_to_1_devices(self):
-+        self.assert_command_success(
-+            devices_before=DEVICES_1,
-+            devices_updated=DEVICES_1,
-+        )
-+
-+    def test_update_2_to_2_devices(self):
-+        self.assert_command_success(
-+            devices_before=DEVICES_2,
-+            devices_updated=DEVICES_2,
-+        )
-+
-+    def test_update_1_to_2_devices(self):
-+        self.assert_command_success(unfence=[DEV_2])
-+
-+    def test_update_1_to_3_devices(self):
-+        self.assert_command_success(
-+            devices_before=DEVICES_1,
-+            devices_updated=DEVICES_3,
-+            unfence=[DEV_2, DEV_3],
-+        )
-+
-+    def test_update_3_to_1_devices(self):
-+        self.assert_command_success(
-+            devices_before=DEVICES_3,
-+            devices_updated=DEVICES_1,
-+        )
-+
-+    def test_update_3_to_2_devices(self):
-+        self.assert_command_success(
-+            devices_before=DEVICES_3,
-+            devices_updated=DEVICES_2,
-+        )
-+
-+    def test_update_add_2_to_2_remove_1(self):
-+        self.assert_command_success(
-+            devices_before=[DEV_1, DEV_2],
-+            devices_updated=[DEV_2, DEV_3, DEV_4],
-+            unfence=[DEV_3, DEV_4],
-+        )
-+
-+    def test_default_monitor(self):
-+        self.assert_command_success(unfence=[DEV_2])
-+
-+    def test_no_monitor_ops(self):
-+        self.assert_command_success(
-+            unfence=[DEV_2],
-+            resource_ops=(),
-+            lrm_monitor_ops=(),
-+            lrm_monitor_ops_updated=(),
-+        )
-+
-+    def test_1_monitor_with_timeout(self):
-+        self.assert_command_success(
-+            unfence=[DEV_2],
-+            resource_ops=(("monitor", "30s", "10s", None),),
-+            lrm_monitor_ops=(("30000", DEFAULT_DIGEST, None, None),),
-+            lrm_monitor_ops_updated=(("30000", ALL_DIGEST, None, None),),
-+        )
-+
-+    def test_2_monitor_ops_with_timeouts(self):
-+        self.assert_command_success(
-+            unfence=[DEV_2],
-+            resource_ops=(
-+                ("monitor", "30s", "10s", None),
-+                ("monitor", "40s", "20s", None),
-+            ),
-+            lrm_monitor_ops=(
-+                ("30000", DEFAULT_DIGEST, None, None),
-+                ("40000", DEFAULT_DIGEST, None, None),
-+            ),
-+            lrm_monitor_ops_updated=(
-+                ("30000", ALL_DIGEST, None, None),
-+                ("40000", ALL_DIGEST, None, None),
-+            ),
-+        )
-+
-+    def test_2_monitor_ops_with_one_timeout(self):
-+        self.assert_command_success(
-+            unfence=[DEV_2],
-+            resource_ops=(
-+                ("monitor", "30s", "10s", None),
-+                ("monitor", "60s", None, None),
-+            ),
-+            lrm_monitor_ops=(
-+                ("30000", DEFAULT_DIGEST, None, None),
-+                ("60000", DEFAULT_DIGEST, None, None),
-+            ),
-+            lrm_monitor_ops_updated=(
-+                ("30000", ALL_DIGEST, None, None),
-+                ("60000", ALL_DIGEST, None, None),
-+            ),
-+        )
-+
-+    def test_various_start_ops_one_lrm_start_op(self):
-+        self.assert_command_success(
-+            unfence=[DEV_2],
-+            resource_ops=(
-+                ("monitor", "60s", None, None),
-+                ("start", "0s", "40s", None),
-+                ("start", "0s", "30s", "1"),
-+                ("start", "10s", "5s", None),
-+                ("start", "20s", None, None),
-+            ),
-+        )
-+
-+    def test_1_nonrecurring_start_op_with_timeout(self):
-+        self.assert_command_success(
-+            unfence=[DEV_2],
-+            resource_ops=(
-+                ("monitor", "60s", None, None),
-+                ("start", "0s", "40s", None),
-+            ),
-+        )
-+
-+
-+@mock.patch.object(
-+    settings,
-+    "pacemaker_api_result_schema",
-+    rc("pcmk_api_rng/api-result.rng"),
-+)
-+class TestUpdateScsiDevicesFailures(UpdateScsiDevicesFailuresMixin, TestCase):
-+    def command(self, force_flags=()):
-+        return lambda: stonith.update_scsi_devices(
-+            self.env_assist.get_env(),
-+            SCSI_STONITH_ID,
-+            DEVICES_2,
-+            force_flags=force_flags,
-+        )
-+
-+    def test_devices_cannot_be_empty(self):
-+        self.config.runner.cib.load(resources=fixture_scsi())
-+        self.config.runner.pcmk.is_resource_digests_supported()
-+        self.env_assist.assert_raise_library_error(
-+            lambda: stonith.update_scsi_devices(
-+                self.env_assist.get_env(), SCSI_STONITH_ID, ()
-+            )
-+        )
-+        self.env_assist.assert_reports(
-+            [
-+                fixture.error(
-+                    reports.codes.INVALID_OPTION_VALUE,
-+                    option_name="devices",
-+                    option_value="",
-+                    allowed_values=None,
-+                    cannot_be_empty=True,
-+                    forbidden_characters=None,
-+                )
-+            ]
-+        )
-+
-+    def test_nonexistant_id(self):
-+        """
-+        lower level tested in
-+        pcs_test.tier0.lib.cib.test_stonith.ValidateStonithRestartlessUpdate
-+        """
-+        self.config.runner.cib.load(resources=fixture_scsi())
-+        self.config.runner.pcmk.is_resource_digests_supported()
-+        self.env_assist.assert_raise_library_error(
-+            lambda: stonith.update_scsi_devices(
-+                self.env_assist.get_env(), "non-existent-id", DEVICES_2
-+            )
-+        )
-+        self.env_assist.assert_reports(
-+            [
-+                fixture.error(
-+                    reports.codes.ID_NOT_FOUND,
-+                    id="non-existent-id",
-+                    expected_types=["primitive"],
-+                    context_type="cib",
-+                    context_id="",
-+                )
-+            ]
-+        )
-+
-+    def test_stonith_resource_is_not_running(self):
-+        self.config.runner.cib.load(resources=fixture_scsi())
-+        self.config.runner.pcmk.is_resource_digests_supported()
-+        self.config.runner.pcmk.load_state(
-+            resources=FIXTURE_CRM_MON_RES_STOPPED, nodes=FIXTURE_CRM_MON_NODES
-         )
-         self.env_assist.assert_raise_library_error(
-             lambda: stonith.update_scsi_devices(
-@@ -575,8 +920,8 @@ class TestUpdateScsiDevicesFailures(TestCase):
-         )
- 
-     def test_stonith_resource_is_running_on_more_than_one_node(self):
--        self.config.runner.pcmk.is_resource_digests_supported()
-         self.config.runner.cib.load(resources=fixture_scsi())
-+        self.config.runner.pcmk.is_resource_digests_supported()
-         self.config.runner.pcmk.load_state(
-             resources=FIXTURE_CRM_MON_RES_RUNNING_2, nodes=FIXTURE_CRM_MON_NODES
-         )
-@@ -599,7 +944,6 @@ class TestUpdateScsiDevicesFailures(TestCase):
- 
-     def test_lrm_op_missing_digest_attributes(self):
-         devices = ",".join(DEVICES_2)
--        self.config.runner.pcmk.is_resource_digests_supported()
-         self.config.runner.cib.load(
-             resources=fixture_scsi(),
-             status=_fixture_status_lrm_ops_base(
-@@ -607,6 +951,7 @@ class TestUpdateScsiDevicesFailures(TestCase):
-                 f'<lrm_rsc_op id="{SCSI_STONITH_ID}_last" operation="start"/>',
-             ),
-         )
-+        self.config.runner.pcmk.is_resource_digests_supported()
-         self.config.runner.pcmk.load_state(
-             resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES
-         )
-@@ -637,7 +982,6 @@ class TestUpdateScsiDevicesFailures(TestCase):
- 
-     def test_crm_resource_digests_missing(self):
-         devices = ",".join(DEVICES_2)
--        self.config.runner.pcmk.is_resource_digests_supported()
-         self.config.runner.cib.load(
-             resources=fixture_scsi(),
-             status=_fixture_status_lrm_ops_base(
-@@ -648,6 +992,7 @@ class TestUpdateScsiDevicesFailures(TestCase):
-                 ),
-             ),
-         )
-+        self.config.runner.pcmk.is_resource_digests_supported()
-         self.config.runner.pcmk.load_state(
-             resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES
-         )
-@@ -680,11 +1025,11 @@ class TestUpdateScsiDevicesFailures(TestCase):
-         )
- 
-     def test_no_lrm_start_op(self):
--        self.config.runner.pcmk.is_resource_digests_supported()
-         self.config.runner.cib.load(
-             resources=fixture_scsi(),
-             status=_fixture_status_lrm_ops(SCSI_STONITH_ID, lrm_start_ops=()),
-         )
-+        self.config.runner.pcmk.is_resource_digests_supported()
-         self.config.runner.pcmk.load_state(
-             resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES
-         )
-@@ -705,7 +1050,6 @@ class TestUpdateScsiDevicesFailures(TestCase):
-         )
- 
-     def test_monitor_ops_and_lrm_monitor_ops_do_not_match(self):
--        self.config.runner.pcmk.is_resource_digests_supported()
-         self.config.runner.cib.load(
-             resources=fixture_scsi(
-                 resource_ops=(
-@@ -716,6 +1060,7 @@ class TestUpdateScsiDevicesFailures(TestCase):
-             ),
-             status=_fixture_status_lrm_ops(SCSI_STONITH_ID),
-         )
-+        self.config.runner.pcmk.is_resource_digests_supported()
-         self.config.runner.pcmk.load_state(
-             resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES
-         )
-@@ -746,13 +1091,13 @@ class TestUpdateScsiDevicesFailures(TestCase):
-         )
- 
-     def test_lrm_monitor_ops_not_found(self):
--        self.config.runner.pcmk.is_resource_digests_supported()
-         self.config.runner.cib.load(
-             resources=fixture_scsi(
-                 resource_ops=(("monitor", "30s", None, None),)
-             ),
-             status=_fixture_status_lrm_ops(SCSI_STONITH_ID),
-         )
-+        self.config.runner.pcmk.is_resource_digests_supported()
-         self.config.runner.pcmk.load_state(
-             resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES
-         )
-@@ -783,371 +1128,353 @@ class TestUpdateScsiDevicesFailures(TestCase):
-             expected_in_processor=False,
-         )
- 
--    def test_node_missing_name_and_missing_auth_token(self):
--        self.config.runner.pcmk.is_resource_digests_supported()
--        self.config.runner.cib.load(
--            resources=fixture_scsi(),
--            status=_fixture_status_lrm_ops(SCSI_STONITH_ID),
--        )
--        self.config.runner.pcmk.load_state(
--            resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES
--        )
--        self.config.runner.pcmk.resource_digests(
--            SCSI_STONITH_ID,
--            SCSI_NODE,
--            name="start.op.digests",
--            stdout=fixture_digests_xml(
--                SCSI_STONITH_ID, SCSI_NODE, devices=",".join(DEVICES_2)
--            ),
--            args=["devices={}".format(",".join(DEVICES_2))],
--        )
--        self.config.runner.pcmk.resource_digests(
--            SCSI_STONITH_ID,
--            SCSI_NODE,
--            name="monitor.op.digests",
--            stdout=fixture_digests_xml(
--                SCSI_STONITH_ID, SCSI_NODE, devices=",".join(DEVICES_2)
--            ),
--            args=[
--                "devices={}".format(",".join(DEVICES_2)),
--                "CRM_meta_interval=60000",
--            ],
--        )
--        self.config.corosync_conf.load_content(
--            corosync_conf_fixture(
--                self.existing_corosync_nodes
--                + [[("ring0_addr", "custom_node"), ("nodeid", "5")]],
--            )
--        )
--        self.config.env.set_known_nodes(self.existing_nodes[:-1])
--        self.env_assist.assert_raise_library_error(
--            lambda: stonith.update_scsi_devices(
--                self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2
--            ),
-+
-+@mock.patch.object(
-+    settings,
-+    "pacemaker_api_result_schema",
-+    rc("pcmk_api_rng/api-result.rng"),
-+)
-+class UpdateScsiDevicesAddRemove(UpdateScsiDevicesMixin, TestCase):
-+    def test_add_1_to_1(self):
-+        self.assert_command_success(
-+            devices_before=[DEV_1],
-+            devices_updated=[DEV_1, DEV_2],
-+            devices_add=[DEV_2],
-+            devices_remove=[],
-+            unfence=[DEV_2],
-         )
--        self.env_assist.assert_reports(
--            [
--                fixture.error(
--                    reports.codes.COROSYNC_CONFIG_MISSING_NAMES_OF_NODES,
--                    fatal=True,
--                ),
--                fixture.error(
--                    reports.codes.HOST_NOT_FOUND,
--                    host_list=[self.existing_nodes[-1]],
--                ),
--            ]
-+
-+    def test_add_2_to_1(self):
-+        self.assert_command_success(
-+            devices_before=[DEV_1],
-+            devices_updated=[DEV_1, DEV_2, DEV_3],
-+            devices_add=[DEV_2, DEV_3],
-+            devices_remove=[],
-+            unfence=[DEV_2, DEV_3],
-         )
- 
--    def _unfence_failure_common_calls(self):
--        devices = ",".join(DEVICES_2)
--        self.config.runner.pcmk.is_resource_digests_supported()
--        self.config.runner.cib.load(
--            resources=fixture_scsi(),
--            status=_fixture_status_lrm_ops(SCSI_STONITH_ID),
-+    def test_add_2_to_2_and_remove_1(self):
-+        self.assert_command_success(
-+            devices_before=[DEV_1, DEV_2],
-+            devices_updated=[DEV_2, DEV_3, DEV_4],
-+            devices_add=[DEV_3, DEV_4],
-+            devices_remove=[DEV_1],
-+            unfence=[DEV_3, DEV_4],
-         )
--        self.config.runner.pcmk.load_state(
--            resources=FIXTURE_CRM_MON_RES_RUNNING_1, nodes=FIXTURE_CRM_MON_NODES
-+
-+    def test_remove_1_from_2(self):
-+        self.assert_command_success(
-+            devices_before=[DEV_1, DEV_2],
-+            devices_updated=[DEV_2],
-+            devices_add=[],
-+            devices_remove=[DEV_1],
-         )
--        self.config.runner.pcmk.resource_digests(
--            SCSI_STONITH_ID,
--            SCSI_NODE,
--            name="start.op.digests",
--            stdout=fixture_digests_xml(
--                SCSI_STONITH_ID,
--                SCSI_NODE,
--                devices=devices,
--            ),
--            args=[f"devices={devices}"],
-+
-+    def test_remove_2_from_3(self):
-+        self.assert_command_success(
-+            devices_before=[DEV_1, DEV_2, DEV_3],
-+            devices_updated=[DEV_3],
-+            devices_add=[],
-+            devices_remove=[DEV_2, DEV_1],
-         )
--        self.config.runner.pcmk.resource_digests(
--            SCSI_STONITH_ID,
--            SCSI_NODE,
--            name="monitor.op.digests",
--            stdout=fixture_digests_xml(
--                SCSI_STONITH_ID,
--                SCSI_NODE,
--                devices=devices,
--            ),
--            args=[
--                f"devices={devices}",
--                "CRM_meta_interval=60000",
--            ],
-+
-+    def test_remove_2_from_3_add_1(self):
-+        self.assert_command_success(
-+            devices_before=[DEV_1, DEV_2, DEV_3],
-+            devices_updated=[DEV_3, DEV_4],
-+            devices_add=[DEV_4],
-+            devices_remove=[DEV_2, DEV_1],
-+            unfence=[DEV_4],
-         )
--        self.config.corosync_conf.load_content(
--            corosync_conf_fixture(self.existing_corosync_nodes)
-+
-+    def test_add_1_remove_1(self):
-+        self.assert_command_success(
-+            devices_before=[DEV_1, DEV_2],
-+            devices_updated=[DEV_2, DEV_3],
-+            devices_add=[DEV_3],
-+            devices_remove=[DEV_1],
-+            unfence=[DEV_3],
-         )
- 
--    def test_unfence_failure_unable_to_connect(self):
--        self._unfence_failure_common_calls()
--        self.config.http.corosync.get_corosync_online_targets(
--            node_labels=self.existing_nodes
-+    def test_add_2_remove_2(self):
-+        self.assert_command_success(
-+            devices_before=[DEV_1, DEV_2],
-+            devices_updated=[DEV_3, DEV_4],
-+            devices_add=[DEV_3, DEV_4],
-+            devices_remove=[DEV_1, DEV_2],
-+            unfence=[DEV_3, DEV_4],
-         )
--        self.config.http.scsi.unfence_node(
--            DEVICES_2,
--            communication_list=[
--                dict(
--                    label=self.existing_nodes[0],
--                    raw_data=json.dumps(
--                        dict(devices=DEVICES_2, node=self.existing_nodes[0])
--                    ),
--                    was_connected=False,
--                    error_msg="errA",
--                ),
--                dict(
--                    label=self.existing_nodes[1],
--                    raw_data=json.dumps(
--                        dict(devices=DEVICES_2, node=self.existing_nodes[1])
--                    ),
--                    output=json.dumps(
--                        dto.to_dict(
--                            communication.dto.InternalCommunicationResultDto(
--                                status=communication.const.COM_STATUS_ERROR,
--                                status_msg="error",
--                                report_list=[
--                                    reports.ReportItem.error(
--                                        reports.messages.StonithUnfencingFailed(
--                                            "errB"
--                                        )
--                                    ).to_dto()
--                                ],
--                                data=None,
--                            )
--                        )
--                    ),
--                ),
--                dict(
--                    label=self.existing_nodes[2],
--                    raw_data=json.dumps(
--                        dict(devices=DEVICES_2, node=self.existing_nodes[2])
--                    ),
--                ),
--            ],
-+
-+
-+@mock.patch.object(
-+    settings,
-+    "pacemaker_api_result_schema",
-+    rc("pcmk_api_rng/api-result.rng"),
-+)
-+class TestUpdateScsiDevicesAddRemoveFailures(
-+    UpdateScsiDevicesFailuresMixin, TestCase
-+):
-+    def command(self, force_flags=()):
-+        return lambda: stonith.update_scsi_devices_add_remove(
-+            self.env_assist.get_env(),
-+            SCSI_STONITH_ID,
-+            [DEV_2],
-+            [],
-+            force_flags=force_flags,
-         )
-+
-+    def test_add_remove_are_empty(self):
-+        """
-+        lower level tested in
-+        pcs_test/tier0/lib/test_validate.ValidateAddRemoveItems
-+        """
-+        self.config.runner.cib.load(resources=fixture_scsi())
-+        self.config.runner.pcmk.is_resource_digests_supported()
-         self.env_assist.assert_raise_library_error(
--            lambda: stonith.update_scsi_devices(
--                self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2
--            ),
-+            lambda: stonith.update_scsi_devices_add_remove(
-+                self.env_assist.get_env(), SCSI_STONITH_ID, (), ()
-+            )
-         )
-         self.env_assist.assert_reports(
-             [
-                 fixture.error(
--                    reports.codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
--                    node=self.existing_nodes[0],
--                    command="api/v1/scsi-unfence-node/v1",
--                    reason="errA",
--                ),
-+                    reports.codes.ADD_REMOVE_ITEMS_NOT_SPECIFIED,
-+                    container_type=reports.const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE,
-+                    item_type="device",
-+                    container_id=SCSI_STONITH_ID,
-+                )
-+            ]
-+        )
-+
-+    def test_not_supported_resource_type(self):
-+        """
-+        lower level tested in
-+        pcs_test.tier0.lib.cib.test_stonith.ValidateStonithRestartlessUpdate
-+        """
-+        self.config.runner.cib.load(resources=fixture_scsi())
-+        self.config.runner.pcmk.is_resource_digests_supported()
-+        self.env_assist.assert_raise_library_error(
-+            lambda: stonith.update_scsi_devices_add_remove(
-+                self.env_assist.get_env(), "dummy", [DEV_2], [DEV_1]
-+            )
-+        )
-+        self.env_assist.assert_reports(
-+            [
-                 fixture.error(
--                    reports.codes.STONITH_UNFENCING_FAILED,
--                    reason="errB",
--                    context=reports.dto.ReportItemContextDto(
--                        node=self.existing_nodes[1],
--                    ),
--                ),
-+                    reports.codes.STONITH_RESTARTLESS_UPDATE_UNSUPPORTED_AGENT,
-+                    resource_id="dummy",
-+                    resource_type="Dummy",
-+                    supported_stonith_types=["fence_scsi"],
-+                )
-             ]
-         )
- 
--    def test_unfence_failure_agent_script_failed(self):
--        self._unfence_failure_common_calls()
--        self.config.http.corosync.get_corosync_online_targets(
--            node_labels=self.existing_nodes
-+    def test_stonith_resource_is_running_on_more_than_one_node(self):
-+        self.config.runner.cib.load(resources=fixture_scsi())
-+        self.config.runner.pcmk.is_resource_digests_supported()
-+        self.config.runner.pcmk.load_state(
-+            resources=FIXTURE_CRM_MON_RES_RUNNING_2, nodes=FIXTURE_CRM_MON_NODES
-         )
--        self.config.http.scsi.unfence_node(
--            DEVICES_2,
--            communication_list=[
--                dict(
--                    label=self.existing_nodes[0],
--                    raw_data=json.dumps(
--                        dict(devices=DEVICES_2, node=self.existing_nodes[0])
--                    ),
--                ),
--                dict(
--                    label=self.existing_nodes[1],
--                    raw_data=json.dumps(
--                        dict(devices=DEVICES_2, node=self.existing_nodes[1])
--                    ),
--                    output=json.dumps(
--                        dto.to_dict(
--                            communication.dto.InternalCommunicationResultDto(
--                                status=communication.const.COM_STATUS_ERROR,
--                                status_msg="error",
--                                report_list=[
--                                    reports.ReportItem.error(
--                                        reports.messages.StonithUnfencingFailed(
--                                            "errB"
--                                        )
--                                    ).to_dto()
--                                ],
--                                data=None,
--                            )
--                        )
--                    ),
--                ),
--                dict(
--                    label=self.existing_nodes[2],
--                    raw_data=json.dumps(
--                        dict(devices=DEVICES_2, node=self.existing_nodes[2])
-+        self.env_assist.assert_raise_library_error(
-+            lambda: stonith.update_scsi_devices_add_remove(
-+                self.env_assist.get_env(), SCSI_STONITH_ID, [DEV_2], []
-+            ),
-+            [
-+                fixture.error(
-+                    reports.codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM,
-+                    reason=(
-+                        f"resource '{SCSI_STONITH_ID}' is running on more than "
-+                        "1 node"
-                     ),
--                ),
-+                    reason_type=reports.const.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_OTHER,
-+                )
-             ],
-+            expected_in_processor=False,
-         )
--        self.env_assist.assert_raise_library_error(
--            lambda: stonith.update_scsi_devices(
--                self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2
--            ),
-+
-+
-+class ValidateAddRemoveItems(TestCase):
-+    CONTAINER_TYPE = ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE
-+    ITEM_TYPE = ADD_REMOVE_ITEM_TYPE_DEVICE
-+    CONTAINER_ID = "container_id"
-+
-+    def _validate(
-+        self, add, remove, current=None, adjacent=None, can_be_empty=False
-+    ):
-+        # pylint: disable=protected-access
-+        return stonith._validate_add_remove_items(
-+            add,
-+            remove,
-+            current,
-+            self.CONTAINER_TYPE,
-+            self.ITEM_TYPE,
-+            self.CONTAINER_ID,
-+            adjacent,
-+            can_be_empty,
-         )
--        self.env_assist.assert_reports(
-+
-+    def test_success_add_and_remove(self):
-+        assert_report_item_list_equal(
-+            self._validate(["a1"], ["c3"], ["b2", "c3"]), []
-+        )
-+
-+    def test_success_add_only(self):
-+        assert_report_item_list_equal(self._validate(["b2"], [], ["a1"]), [])
-+
-+    def test_success_remove_only(self):
-+        assert_report_item_list_equal(
-+            self._validate([], ["b2"], ["a1", "b2"]), []
-+        )
-+
-+    def test_add_remove_items_not_specified(self):
-+        assert_report_item_list_equal(
-+            self._validate([], [], ["a1", "b2", "c3"]),
-             [
-                 fixture.error(
--                    reports.codes.STONITH_UNFENCING_FAILED,
--                    reason="errB",
--                    context=reports.dto.ReportItemContextDto(
--                        node=self.existing_nodes[1],
--                    ),
--                ),
--            ]
-+                    reports.codes.ADD_REMOVE_ITEMS_NOT_SPECIFIED,
-+                    container_type=self.CONTAINER_TYPE,
-+                    item_type=self.ITEM_TYPE,
-+                    container_id=self.CONTAINER_ID,
-+                )
-+            ],
-         )
- 
--    def test_corosync_targets_unable_to_connect(self):
--        self._unfence_failure_common_calls()
--        self.config.http.corosync.get_corosync_online_targets(
--            communication_list=[
--                dict(
--                    label=self.existing_nodes[0],
--                    output='{"corosync":true}',
--                ),
--            ]
--            + [
--                dict(
--                    label=node,
--                    was_connected=False,
--                    errno=7,
--                    error_msg="an error",
-+    def test_add_remove_items_duplications(self):
-+        assert_report_item_list_equal(
-+            self._validate(["b2", "b2"], ["a1", "a1"], ["a1", "c3"]),
-+            [
-+                fixture.error(
-+                    reports.codes.ADD_REMOVE_ITEMS_DUPLICATION,
-+                    container_type=self.CONTAINER_TYPE,
-+                    item_type=self.ITEM_TYPE,
-+                    container_id=self.CONTAINER_ID,
-+                    duplicate_items_list=["a1", "b2"],
-                 )
--                for node in self.existing_nodes[1:]
--            ]
-+            ],
-         )
--        self.env_assist.assert_raise_library_error(
--            lambda: stonith.update_scsi_devices(
--                self.env_assist.get_env(), SCSI_STONITH_ID, DEVICES_2
--            ),
-+
-+    def test_add_items_already_in_container(self):
-+        assert_report_item_list_equal(
-+            self._validate(["a1", "b2"], [], ["a1", "b2", "c3"]),
-+            [
-+                fixture.error(
-+                    reports.codes.ADD_REMOVE_CANNOT_ADD_ITEMS_ALREADY_IN_THE_CONTAINER,
-+                    container_type=self.CONTAINER_TYPE,
-+                    item_type=self.ITEM_TYPE,
-+                    container_id=self.CONTAINER_ID,
-+                    item_list=["a1", "b2"],
-+                ),
-+            ],
-         )
--        self.env_assist.assert_reports(
-+
-+    def test_remove_items_not_in_container(self):
-+        assert_report_item_list_equal(
-+            self._validate([], ["a1", "b2"], ["c3"]),
-             [
-                 fixture.error(
--                    reports.codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
--                    force_code=reports.codes.SKIP_OFFLINE_NODES,
--                    node=node,
--                    command="remote/status",
--                    reason="an error",
-+                    reports.codes.ADD_REMOVE_CANNOT_REMOVE_ITEMS_NOT_IN_THE_CONTAINER,
-+                    container_type=self.CONTAINER_TYPE,
-+                    item_type=self.ITEM_TYPE,
-+                    container_id=self.CONTAINER_ID,
-+                    item_list=["a1", "b2"],
-                 )
--                for node in self.existing_nodes[1:]
--            ]
-+            ],
-         )
- 
--    def test_corosync_targets_skip_offline_unfence_node_running_corosync(
--        self,
--    ):
--        self._unfence_failure_common_calls()
--        self.config.http.corosync.get_corosync_online_targets(
--            communication_list=[
--                dict(
--                    label=self.existing_nodes[0],
--                    output='{"corosync":true}',
-+    def test_add_remove_items_at_the_same_time(self):
-+        assert_report_item_list_equal(
-+            self._validate(
-+                ["a1", "a1", "b2", "b2"], ["b2", "b2", "a1", "a1"], ["c3"]
-+            ),
-+            [
-+                fixture.error(
-+                    reports.codes.ADD_REMOVE_ITEMS_DUPLICATION,
-+                    container_type=self.CONTAINER_TYPE,
-+                    item_type=self.ITEM_TYPE,
-+                    container_id=self.CONTAINER_ID,
-+                    duplicate_items_list=["a1", "b2"],
-                 ),
--                dict(
--                    label=self.existing_nodes[1],
--                    output='{"corosync":false}',
-+                fixture.error(
-+                    reports.codes.ADD_REMOVE_CANNOT_REMOVE_ITEMS_NOT_IN_THE_CONTAINER,
-+                    container_type=self.CONTAINER_TYPE,
-+                    item_type=self.ITEM_TYPE,
-+                    container_id=self.CONTAINER_ID,
-+                    item_list=["a1", "b2"],
-                 ),
--                dict(
--                    label=self.existing_nodes[2],
--                    was_connected=False,
--                    errno=7,
--                    error_msg="an error",
-+                fixture.error(
-+                    reports.codes.ADD_REMOVE_CANNOT_ADD_AND_REMOVE_ITEMS_AT_THE_SAME_TIME,
-+                    container_type=self.CONTAINER_TYPE,
-+                    item_type=self.ITEM_TYPE,
-+                    container_id=self.CONTAINER_ID,
-+                    item_list=["a1", "b2"],
-                 ),
--            ]
-+            ],
-         )
--        self.config.http.scsi.unfence_node(
--            DEVICES_2,
--            communication_list=[
--                dict(
--                    label=self.existing_nodes[0],
--                    raw_data=json.dumps(
--                        dict(devices=DEVICES_2, node=self.existing_nodes[0])
--                    ),
-+
-+    def test_remove_all_items(self):
-+        assert_report_item_list_equal(
-+            self._validate([], ["a1", "b2"], ["a1", "b2"]),
-+            [
-+                fixture.error(
-+                    reports.codes.ADD_REMOVE_CANNOT_REMOVE_ALL_ITEMS_FROM_THE_CONTAINER,
-+                    container_type=self.CONTAINER_TYPE,
-+                    item_type=self.ITEM_TYPE,
-+                    container_id=self.CONTAINER_ID,
-+                    item_list=["a1", "b2"],
-                 ),
-             ],
-         )
--        self.config.env.push_cib(
--            resources=fixture_scsi(devices=DEVICES_2),
--            status=_fixture_status_lrm_ops(
--                SCSI_STONITH_ID,
--                lrm_start_ops=DEFAULT_LRM_START_OPS_UPDATED,
--                lrm_monitor_ops=DEFAULT_LRM_MONITOR_OPS_UPDATED,
--            ),
-+
-+    def test_remove_all_items_can_be_empty(self):
-+        assert_report_item_list_equal(
-+            self._validate([], ["a1", "b2"], ["a1", "b2"], can_be_empty=True),
-+            [],
-         )
--        stonith.update_scsi_devices(
--            self.env_assist.get_env(),
--            SCSI_STONITH_ID,
--            DEVICES_2,
--            force_flags=[reports.codes.SKIP_OFFLINE_NODES],
-+
-+    def test_remove_all_items_and_add_new_one(self):
-+        assert_report_item_list_equal(
-+            self._validate(["c3"], ["a1", "b2"], ["a1", "b2"]),
-+            [],
-         )
--        self.env_assist.assert_reports(
-+
-+    def test_missing_adjacent_item(self):
-+        assert_report_item_list_equal(
-+            self._validate(["a1", "b2"], [], ["c3"], adjacent="d4"),
-             [
--                fixture.warn(
--                    reports.codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
--                    node=self.existing_nodes[2],
--                    command="remote/status",
--                    reason="an error",
-+                fixture.error(
-+                    reports.codes.ADD_REMOVE_ADJACENT_ITEM_NOT_IN_THE_CONTAINER,
-+                    container_type=self.CONTAINER_TYPE,
-+                    item_type=self.ITEM_TYPE,
-+                    container_id=self.CONTAINER_ID,
-+                    adjacent_item_id="d4",
-                 ),
--            ]
-+            ],
-         )
- 
--    def test_corosync_targets_unable_to_perform_unfencing_operation(
--        self,
--    ):
--        self._unfence_failure_common_calls()
--        self.config.http.corosync.get_corosync_online_targets(
--            communication_list=[
--                dict(
--                    label=self.existing_nodes[0],
--                    was_connected=False,
--                    errno=7,
--                    error_msg="an error",
--                ),
--                dict(
--                    label=self.existing_nodes[1],
--                    was_connected=False,
--                    errno=7,
--                    error_msg="an error",
--                ),
--                dict(
--                    label=self.existing_nodes[2],
--                    output='{"corosync":false}',
-+    def test_adjacent_item_in_add_list(self):
-+        assert_report_item_list_equal(
-+            self._validate(["a1", "b2"], [], ["a1"], adjacent="a1"),
-+            [
-+                fixture.error(
-+                    reports.codes.ADD_REMOVE_CANNOT_PUT_ITEM_NEXT_TO_ITSELF,
-+                    container_type=self.CONTAINER_TYPE,
-+                    item_type=self.ITEM_TYPE,
-+                    container_id=self.CONTAINER_ID,
-+                    adjacent_item_id="a1",
-                 ),
--            ]
--        )
--        self.config.http.scsi.unfence_node(DEVICES_2, communication_list=[])
--        self.env_assist.assert_raise_library_error(
--            lambda: stonith.update_scsi_devices(
--                self.env_assist.get_env(),
--                SCSI_STONITH_ID,
--                DEVICES_2,
--                force_flags=[reports.codes.SKIP_OFFLINE_NODES],
--            ),
-+            ],
-         )
--        self.env_assist.assert_reports(
-+
-+    def test_adjacent_item_without_add_list(self):
-+        assert_report_item_list_equal(
-+            self._validate([], ["b2"], ["a1", "b2"], adjacent="a1"),
-             [
--                fixture.warn(
--                    reports.codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
--                    node=node,
--                    command="remote/status",
--                    reason="an error",
--                )
--                for node in self.existing_nodes[0:2]
--            ]
--            + [
-                 fixture.error(
--                    reports.codes.UNABLE_TO_PERFORM_OPERATION_ON_ANY_NODE,
-+                    reports.codes.ADD_REMOVE_CANNOT_SPECIFY_ADJACENT_ITEM_WITHOUT_ITEMS_TO_ADD,
-+                    container_type=self.CONTAINER_TYPE,
-+                    item_type=self.ITEM_TYPE,
-+                    container_id=self.CONTAINER_ID,
-+                    adjacent_item_id="a1",
-                 ),
--            ]
-+            ],
-         )
-diff --git a/pcsd/capabilities.xml b/pcsd/capabilities.xml
-index 745b05ad..58ebcf0f 100644
---- a/pcsd/capabilities.xml
-+++ b/pcsd/capabilities.xml
-@@ -1884,6 +1884,14 @@
-         pcs commands: stonith update-scsi-devices
-       </description>
-     </capability>
-+    <capability id="pcmk.stonith.update.scsi-devices.add-remove" in-pcs="1" in-pcsd="0">
-+      <description>
-+        Update scsi fencing devices without affecting other resources using
-+        add/remove cli syntax.
-+
-+        pcs commands: stonith update-scsi-devices
-+      </description>
-+    </capability>
-     <capability id="pcmk.stonith.scsi-unfence-node" in-pcs="0" in-pcsd="1">
-       <description>
-         Unfence scsi devices on a cluster node.
--- 
-2.31.1
-
diff --git a/SOURCES/bz1998454-01-fix-creating-resources-with-depth-operation-attribut.patch b/SOURCES/bz1998454-01-fix-creating-resources-with-depth-operation-attribut.patch
deleted file mode 100644
index 4616131..0000000
--- a/SOURCES/bz1998454-01-fix-creating-resources-with-depth-operation-attribut.patch
+++ /dev/null
@@ -1,45 +0,0 @@
-From 189c73e31f5033413fc4483e40d0bfc78d77f962 Mon Sep 17 00:00:00 2001
-From: Tomas Jelinek <tojeline@redhat.com>
-Date: Fri, 27 Aug 2021 12:05:18 +0200
-Subject: [PATCH 1/2] fix creating resources with depth operation attribute
-
----
- CHANGELOG.md                       | 9 +++++++++
- pcs/lib/cib/resource/operations.py | 2 +-
- 2 files changed, 10 insertions(+), 1 deletion(-)
-
-diff --git a/CHANGELOG.md b/CHANGELOG.md
-index f768cc36..c15546ba 100644
---- a/CHANGELOG.md
-+++ b/CHANGELOG.md
-@@ -1,5 +1,14 @@
- # Change Log
- 
-+## [Unreleased]
-+
-+### Fixed
-+- Fixed an error when creating a resource which defines 'depth' attribute for
-+  its operations ([rhbz#1998454])
-+
-+[rhbz#1998454]: https://bugzilla.redhat.com/show_bug.cgi?id=1998454
-+
-+
- ## [0.10.10] - 2021-08-19
- 
- ### Added
-diff --git a/pcs/lib/cib/resource/operations.py b/pcs/lib/cib/resource/operations.py
-index 390db71a..44b2e7dd 100644
---- a/pcs/lib/cib/resource/operations.py
-+++ b/pcs/lib/cib/resource/operations.py
-@@ -197,7 +197,7 @@ def _action_dto_to_dict(
- ) -> Dict[str, str]:
-     result = dict(
-         filter(
--            lambda item: item[0] != "deph" and item[1] not in (None, ""),
-+            lambda item: item[0] != "depth" and item[1] not in (None, ""),
-             to_dict(dto).items(),
-         )
-     )
--- 
-2.31.1
-
diff --git a/SOURCES/bz2022463-01-fix-creating-empty-cib.patch b/SOURCES/bz2022463-01-fix-creating-empty-cib.patch
new file mode 100644
index 0000000..1437dd1
--- /dev/null
+++ b/SOURCES/bz2022463-01-fix-creating-empty-cib.patch
@@ -0,0 +1,94 @@
+From f0342f110bdb4a7421532b85ca0f49070c7e5c1e Mon Sep 17 00:00:00 2001
+From: Tomas Jelinek <tojeline@redhat.com>
+Date: Thu, 13 Jan 2022 17:32:38 +0100
+Subject: [PATCH 4/5] fix creating empty cib
+
+---
+ pcs/utils.py                | 21 +++++++++++----------
+ pcs_test/tier1/test_misc.py | 25 ++++++++++++++++++++++++-
+ 2 files changed, 35 insertions(+), 11 deletions(-)
+
+diff --git a/pcs/utils.py b/pcs/utils.py
+index ad2d4452..423ffc43 100644
+--- a/pcs/utils.py
++++ b/pcs/utils.py
+@@ -2067,16 +2067,17 @@ def write_empty_cib(cibfile):
+     """
+     Commandline options: no options
+     """
+-    empty_xml = """<?xml version="1.0" encoding="UTF-8"?>
+-<cib admin_epoch="0" epoch="1" num_updates="1" validate-with="pacemaker-1.2">
+-  <configuration>
+-    <crm_config/>
+-    <nodes/>
+-    <resources/>
+-    <constraints/>
+-  </configuration>
+-  <status/>
+-</cib>"""
++    empty_xml = """
++        <cib admin_epoch="0" epoch="1" num_updates="1" validate-with="pacemaker-3.1">
++          <configuration>
++            <crm_config/>
++            <nodes/>
++            <resources/>
++            <constraints/>
++          </configuration>
++          <status/>
++        </cib>
++    """
+     with open(cibfile, "w") as f:
+         f.write(empty_xml)
+ 
+diff --git a/pcs_test/tier1/test_misc.py b/pcs_test/tier1/test_misc.py
+index 29ca6a71..6e6f72fb 100644
+--- a/pcs_test/tier1/test_misc.py
++++ b/pcs_test/tier1/test_misc.py
+@@ -1,8 +1,10 @@
++import os
+ from unittest import TestCase
+ 
+ from pcs_test.tools.assertions import AssertPcsMixin
+ from pcs_test.tools.misc import (
+     get_test_resource as rc,
++    get_tmp_dir,
+     get_tmp_file,
+     outdent,
+     write_file_to_tmpfile,
+@@ -19,7 +21,7 @@ class ParseArgvDashDash(TestCase, AssertPcsMixin):
+     cmd = "constraint colocation add R1 with R2".split()
+ 
+     def setUp(self):
+-        self.temp_cib = get_tmp_file("tier1_misc")
++        self.temp_cib = get_tmp_file("tier1_misc_dashdash")
+         write_file_to_tmpfile(rc("cib-empty.xml"), self.temp_cib)
+         self.pcs_runner = PcsRunner(self.temp_cib.name)
+         self.allowed_roles = format_list(const.PCMK_ROLES)
+@@ -89,3 +91,24 @@ class ParseArgvDashDash(TestCase, AssertPcsMixin):
+                 """
+             ),
+         )
++
++
++class EmptyCibIsPcmk2Compatible(TestCase, AssertPcsMixin):
++    # This test verifies that a default empty CIB created by pcs when -f points
++    # to an empty file conforms to minimal schema version supported by
++    # pacemaker 2.0. If pcs prints a message that CIB schema has been upgraded,
++    # then the test fails and shows there is a bug. Bundle with promoted-max
++    # requires CIB compliant with schema 3.1, which was introduced in pacemaker
++    # 2.0.0.
++    def setUp(self):
++        self.cib_dir = get_tmp_dir("tier1_misc_empty_cib")
++        self.pcs_runner = PcsRunner(os.path.join(self.cib_dir.name, "cib.xml"))
++
++    def tearDown(self):
++        self.cib_dir.cleanup()
++
++    def test_success(self):
++        self.assert_pcs_success(
++            "resource bundle create b container docker image=my.img promoted-max=1".split(),
++            "",
++        )
+-- 
+2.31.1
+
diff --git a/SOURCES/bz2028902-01-fix-enabling-corosync-qdevice.patch b/SOURCES/bz2028902-01-fix-enabling-corosync-qdevice.patch
new file mode 100644
index 0000000..e45d0b9
--- /dev/null
+++ b/SOURCES/bz2028902-01-fix-enabling-corosync-qdevice.patch
@@ -0,0 +1,25 @@
+From 6b4b0c0026e5077044e4e908d093cb613ae2e94e Mon Sep 17 00:00:00 2001
+From: Tomas Jelinek <tojeline@redhat.com>
+Date: Mon, 6 Dec 2021 16:06:31 +0100
+Subject: [PATCH 1/3] fix enabling corosync-qdevice
+
+---
+ pcsd/remote.rb | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/pcsd/remote.rb b/pcsd/remote.rb
+index c49db116..3574d665 100644
+--- a/pcsd/remote.rb
++++ b/pcsd/remote.rb
+@@ -2515,7 +2515,7 @@ def qdevice_client_enable(param, request, auth_user)
+   unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
+     return 403, 'Permission denied'
+   end
+-  if not ServiceChecker.new('corosync', enabled: true).is_enabled?('corosync')
++  if not ServiceChecker.new(['corosync'], enabled: true).is_enabled?('corosync')
+     return pcsd_success('corosync is not enabled, skipping')
+   elsif enable_service('corosync-qdevice')
+     return pcsd_success('corosync-qdevice enabled')
+-- 
+2.31.1
+
diff --git a/SOURCES/bz2032997-01-skip-checking-of-scsi-devices-to-be-removed.patch b/SOURCES/bz2032997-01-skip-checking-of-scsi-devices-to-be-removed.patch
new file mode 100644
index 0000000..e11b09e
--- /dev/null
+++ b/SOURCES/bz2032997-01-skip-checking-of-scsi-devices-to-be-removed.patch
@@ -0,0 +1,86 @@
+From 082bded126151e4f4b4667a1d8337db741828da6 Mon Sep 17 00:00:00 2001
+From: Miroslav Lisik <mlisik@redhat.com>
+Date: Thu, 16 Dec 2021 14:12:58 +0100
+Subject: [PATCH 1/5] skip checking of scsi devices to be removed before
+ unfencing to be added devices
+
+---
+ pcs/lib/commands/scsi.py                 |  3 ++-
+ pcs_test/tier0/lib/commands/test_scsi.py | 21 +++++++++++++++++----
+ 2 files changed, 19 insertions(+), 5 deletions(-)
+
+diff --git a/pcs/lib/commands/scsi.py b/pcs/lib/commands/scsi.py
+index ff20a563..ab732805 100644
+--- a/pcs/lib/commands/scsi.py
++++ b/pcs/lib/commands/scsi.py
+@@ -31,7 +31,8 @@ def unfence_node(
+         return
+     fence_scsi_bin = os.path.join(settings.fence_agent_binaries, "fence_scsi")
+     fenced_devices = []
+-    for device in original_devices:
++    # do not check devices being removed
++    for device in sorted(set(original_devices) & set(updated_devices)):
+         stdout, stderr, return_code = env.cmd_runner().run(
+             [
+                 fence_scsi_bin,
+diff --git a/pcs_test/tier0/lib/commands/test_scsi.py b/pcs_test/tier0/lib/commands/test_scsi.py
+index 8ef9836a..bc2357a9 100644
+--- a/pcs_test/tier0/lib/commands/test_scsi.py
++++ b/pcs_test/tier0/lib/commands/test_scsi.py
+@@ -13,10 +13,13 @@ class TestUnfenceNode(TestCase):
+         self.old_devices = ["device1", "device3"]
+         self.new_devices = ["device3", "device0", "device2"]
+         self.added_devices = set(self.new_devices) - set(self.old_devices)
++        self.check_devices = sorted(
++            set(self.old_devices) & set(self.new_devices)
++        )
+         self.node = "node1"
+ 
+     def test_success_devices_to_unfence(self):
+-        for old_dev in self.old_devices:
++        for old_dev in self.check_devices:
+             self.config.runner.scsi.get_status(
+                 self.node, old_dev, name=f"runner.scsi.is_fenced.{old_dev}"
+             )
+@@ -38,9 +41,19 @@ class TestUnfenceNode(TestCase):
+         )
+         self.env_assist.assert_reports([])
+ 
++    def test_success_replace_unavailable_device(self):
++        self.config.runner.scsi.unfence_node(self.node, {"device2"})
++        scsi.unfence_node(
++            self.env_assist.get_env(),
++            self.node,
++            {"device1"},
++            {"device2"},
++        )
++        self.env_assist.assert_reports([])
++
+     def test_unfencing_failure(self):
+         err_msg = "stderr"
+-        for old_dev in self.old_devices:
++        for old_dev in self.check_devices:
+             self.config.runner.scsi.get_status(
+                 self.node, old_dev, name=f"runner.scsi.is_fenced.{old_dev}"
+             )
+@@ -98,7 +111,7 @@ class TestUnfenceNode(TestCase):
+ 
+     def test_unfencing_skipped_devices_are_fenced(self):
+         stdout_off = "Status: OFF"
+-        for old_dev in self.old_devices:
++        for old_dev in self.check_devices:
+             self.config.runner.scsi.get_status(
+                 self.node,
+                 old_dev,
+@@ -116,7 +129,7 @@ class TestUnfenceNode(TestCase):
+             [
+                 fixture.info(
+                     report_codes.STONITH_UNFENCING_SKIPPED_DEVICES_FENCED,
+-                    devices=sorted(self.old_devices),
++                    devices=sorted(self.check_devices),
+                 )
+             ]
+         )
+-- 
+2.31.1
+
diff --git a/SOURCES/bz2036633-01-Make-ocf-linbit-drbd-agent-pass-OCF-validation.patch b/SOURCES/bz2036633-01-Make-ocf-linbit-drbd-agent-pass-OCF-validation.patch
new file mode 100644
index 0000000..455dcda
--- /dev/null
+++ b/SOURCES/bz2036633-01-Make-ocf-linbit-drbd-agent-pass-OCF-validation.patch
@@ -0,0 +1,41 @@
+From 46b079a93d1817f9c1d6a7403c70b30f59d19c20 Mon Sep 17 00:00:00 2001
+From: Tomas Jelinek <tojeline@redhat.com>
+Date: Tue, 4 Jan 2022 12:56:56 +0100
+Subject: [PATCH 2/5] Make ocf:linbit:drbd agent pass OCF validation
+
+---
+ data/ocf-1.0.rng | 18 ++++++++----------
+ 1 file changed, 8 insertions(+), 10 deletions(-)
+
+diff --git a/data/ocf-1.0.rng b/data/ocf-1.0.rng
+index 36ba4611..1e14a83b 100644
+--- a/data/ocf-1.0.rng
++++ b/data/ocf-1.0.rng
+@@ -169,16 +169,14 @@ RNGs. Thank you.
+       <optional>
+         <element name="content">
+           <choice>
+-            <attribute name="type">
+-              <choice>
+-                <value>boolean</value>
+-                <value>string</value>
+-                <value>integer</value>
+-                <value>second</value><!-- used by fence agents -->
+-                <value>int</value><!-- used by fence agents intead of integer -->
+-                <value>time</value><!-- used by pacemaker metadata -->
+-              </choice>
+-            </attribute>
++            <!--
++              OCF 1.0 allows values: boolean, integer, string. Agents, however,
++              quite often use other values: int (fence agents), numeric
++              (ocf:linbit:drbd), second (fence agents), time (pacemaker
++              metadata). Since pcs doesn't actually care about the type, we
++              allow any type to keep compatibility with existing agents.
++            -->
++            <attribute name="type" />
+             <group>
+               <!--
+                 used by fence agents and processed by pcs even though it is not
+-- 
+2.31.1
+
diff --git a/SOURCES/bz2042433-01-fix-creating-empty-cib.patch b/SOURCES/bz2042433-01-fix-creating-empty-cib.patch
deleted file mode 100644
index df3f45e..0000000
--- a/SOURCES/bz2042433-01-fix-creating-empty-cib.patch
+++ /dev/null
@@ -1,94 +0,0 @@
-From eae00a30e6eb682e60ec1ace4ec6633591254e15 Mon Sep 17 00:00:00 2001
-From: Tomas Jelinek <tojeline@redhat.com>
-Date: Thu, 13 Jan 2022 17:32:38 +0100
-Subject: [PATCH] fix creating empty cib
-
----
- pcs/utils.py                | 21 +++++++++++----------
- pcs_test/tier1/test_misc.py | 25 ++++++++++++++++++++++++-
- 2 files changed, 35 insertions(+), 11 deletions(-)
-
-diff --git a/pcs/utils.py b/pcs/utils.py
-index ad2d4452..423ffc43 100644
---- a/pcs/utils.py
-+++ b/pcs/utils.py
-@@ -2067,16 +2067,17 @@ def write_empty_cib(cibfile):
-     """
-     Commandline options: no options
-     """
--    empty_xml = """<?xml version="1.0" encoding="UTF-8"?>
--<cib admin_epoch="0" epoch="1" num_updates="1" validate-with="pacemaker-1.2">
--  <configuration>
--    <crm_config/>
--    <nodes/>
--    <resources/>
--    <constraints/>
--  </configuration>
--  <status/>
--</cib>"""
-+    empty_xml = """
-+        <cib admin_epoch="0" epoch="1" num_updates="1" validate-with="pacemaker-3.1">
-+          <configuration>
-+            <crm_config/>
-+            <nodes/>
-+            <resources/>
-+            <constraints/>
-+          </configuration>
-+          <status/>
-+        </cib>
-+    """
-     with open(cibfile, "w") as f:
-         f.write(empty_xml)
- 
-diff --git a/pcs_test/tier1/test_misc.py b/pcs_test/tier1/test_misc.py
-index 29ca6a71..6e6f72fb 100644
---- a/pcs_test/tier1/test_misc.py
-+++ b/pcs_test/tier1/test_misc.py
-@@ -1,8 +1,10 @@
-+import os
- from unittest import TestCase
- 
- from pcs_test.tools.assertions import AssertPcsMixin
- from pcs_test.tools.misc import (
-     get_test_resource as rc,
-+    get_tmp_dir,
-     get_tmp_file,
-     outdent,
-     write_file_to_tmpfile,
-@@ -19,7 +21,7 @@ class ParseArgvDashDash(TestCase, AssertPcsMixin):
-     cmd = "constraint colocation add R1 with R2".split()
- 
-     def setUp(self):
--        self.temp_cib = get_tmp_file("tier1_misc")
-+        self.temp_cib = get_tmp_file("tier1_misc_dashdash")
-         write_file_to_tmpfile(rc("cib-empty.xml"), self.temp_cib)
-         self.pcs_runner = PcsRunner(self.temp_cib.name)
-         self.allowed_roles = format_list(const.PCMK_ROLES)
-@@ -89,3 +91,24 @@ class ParseArgvDashDash(TestCase, AssertPcsMixin):
-                 """
-             ),
-         )
-+
-+
-+class EmptyCibIsPcmk2Compatible(TestCase, AssertPcsMixin):
-+    # This test verifies that a default empty CIB created by pcs when -f points
-+    # to an empty file conforms to minimal schema version supported by
-+    # pacemaker 2.0. If pcs prints a message that CIB schema has been upgraded,
-+    # then the test fails and shows there is a bug. Bundle with promoted-max
-+    # requires CIB compliant with schema 3.1, which was introduced in pacemaker
-+    # 2.0.0.
-+    def setUp(self):
-+        self.cib_dir = get_tmp_dir("tier1_misc_empty_cib")
-+        self.pcs_runner = PcsRunner(os.path.join(self.cib_dir.name, "cib.xml"))
-+
-+    def tearDown(self):
-+        self.cib_dir.cleanup()
-+
-+    def test_success(self):
-+        self.assert_pcs_success(
-+            "resource bundle create b container docker image=my.img promoted-max=1".split(),
-+            "",
-+        )
--- 
-2.31.1
-
diff --git a/SOURCES/bz2044409-01-fix-backend-parameter-all-in-cluster-destroy.patch b/SOURCES/bz2044409-01-fix-backend-parameter-all-in-cluster-destroy.patch
new file mode 100644
index 0000000..1131d7f
--- /dev/null
+++ b/SOURCES/bz2044409-01-fix-backend-parameter-all-in-cluster-destroy.patch
@@ -0,0 +1,23 @@
+From fa75f40361bc39cbd645b8014713e4c0ad0cda18 Mon Sep 17 00:00:00 2001
+From: Ivan Devat <idevat@redhat.com>
+Date: Mon, 24 Jan 2022 14:08:54 +0100
+Subject: [PATCH 2/2] fix backend parameter "all" in cluster destroy
+
+---
+ src/app/backend/calls/destroyCluster.ts | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/src/app/backend/calls/destroyCluster.ts b/src/app/backend/calls/destroyCluster.ts
+index b6e83a41..cf41ea42 100644
+--- a/src/app/backend/calls/destroyCluster.ts
++++ b/src/app/backend/calls/destroyCluster.ts
+@@ -4,5 +4,5 @@ const { url } = endpoints.destroyCluster;
+ 
+ export const destroyCluster = (clusterName: string): CallResult =>
+   http.post(url({ clusterName }), {
+-    params: [["--all", "1"]],
++    params: [["all", "1"]],
+   });
+-- 
+2.31.1
+
diff --git a/SOURCES/bz2047983-01-Fix-snmp-client.patch b/SOURCES/bz2047983-01-Fix-snmp-client.patch
new file mode 100644
index 0000000..a520771
--- /dev/null
+++ b/SOURCES/bz2047983-01-Fix-snmp-client.patch
@@ -0,0 +1,25 @@
+From 68aa09a89804084e2764b06f0ae37f56cc609bda Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Valentin=20Vidi=C4=87?= <vvidic@valentin-vidic.from.hr>
+Date: Wed, 15 Dec 2021 20:32:26 +0100
+Subject: [PATCH 1/2] Fix snmp client
+
+Required constant is missing causing the command to fail on startup and breaking the pcs_snmp_agent service.
+---
+ pcsd/pcsd-cli-main.rb | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/pcsd/pcsd-cli-main.rb b/pcsd/pcsd-cli-main.rb
+index 29b9006d..be72d543 100644
+--- a/pcsd/pcsd-cli-main.rb
++++ b/pcsd/pcsd-cli-main.rb
+@@ -10,6 +10,7 @@ require 'remote.rb'
+ 
+ 
+ PCS = get_pcs_path()
++PCS_INTERNAL = get_pcs_internal_path()
+ $logger_device = StringIO.new
+ $logger = Logger.new($logger_device)
+ early_log($logger)
+-- 
+2.34.1
+
diff --git a/SOURCES/bz2050274-01-process-invalid-OCF-agents-the-same-way-as-before.patch b/SOURCES/bz2050274-01-process-invalid-OCF-agents-the-same-way-as-before.patch
new file mode 100644
index 0000000..43a3d38
--- /dev/null
+++ b/SOURCES/bz2050274-01-process-invalid-OCF-agents-the-same-way-as-before.patch
@@ -0,0 +1,934 @@
+From ae3435418f0af6e5f22f463871aa90a5c5b2d15f Mon Sep 17 00:00:00 2001
+From: Tomas Jelinek <tojeline@redhat.com>
+Date: Fri, 4 Feb 2022 16:23:18 +0100
+Subject: [PATCH 1/3] process invalid OCF agents as if they complied with OCF
+ 1.0
+
+---
+ pcs/common/reports/codes.py                   |   4 +-
+ pcs/common/reports/messages.py                |  13 +-
+ pcs/lib/commands/resource.py                  |   3 +-
+ pcs/lib/commands/resource_agent.py            |   4 +-
+ pcs/lib/commands/stonith.py                   |   3 +-
+ pcs/lib/resource_agent/__init__.py            |   1 -
+ pcs/lib/resource_agent/error.py               |  14 --
+ pcs/lib/resource_agent/facade.py              |  37 ++-
+ pcs/lib/resource_agent/xml.py                 |  15 +-
+ .../tier0/common/reports/test_messages.py     |  18 +-
+ .../tier0/lib/resource_agent/test_facade.py   |  47 ++++
+ pcs_test/tier0/lib/resource_agent/test_xml.py | 226 ++++++++----------
+ 12 files changed, 201 insertions(+), 184 deletions(-)
+
+diff --git a/pcs/common/reports/codes.py b/pcs/common/reports/codes.py
+index 3e0512d9..e8dee00f 100644
+--- a/pcs/common/reports/codes.py
++++ b/pcs/common/reports/codes.py
+@@ -36,8 +36,8 @@ ADD_REMOVE_CANNOT_SPECIFY_ADJACENT_ITEM_WITHOUT_ITEMS_TO_ADD = M(
+     "ADD_REMOVE_CANNOT_SPECIFY_ADJACENT_ITEM_WITHOUT_ITEMS_TO_ADD"
+ )
+ AGENT_GENERIC_ERROR = M("AGENT_GENERIC_ERROR")
+-AGENT_IMPLEMENTS_UNSUPPORTED_OCF_VERSION = M(
+-    "AGENT_IMPLEMENTS_UNSUPPORTED_OCF_VERSION"
++AGENT_IMPLEMENTS_UNSUPPORTED_OCF_VERSION_ASSUMED_VERSION = M(
++    "AGENT_IMPLEMENTS_UNSUPPORTED_OCF_VERSION_ASSUMED_VERSION"
+ )
+ AGENT_NAME_GUESS_FOUND_MORE_THAN_ONE = M("AGENT_NAME_GUESS_FOUND_MORE_THAN_ONE")
+ AGENT_NAME_GUESS_FOUND_NONE = M("AGENT_NAME_GUESS_FOUND_NONE")
+diff --git a/pcs/common/reports/messages.py b/pcs/common/reports/messages.py
+index 9d665e73..7df1e1eb 100644
+--- a/pcs/common/reports/messages.py
++++ b/pcs/common/reports/messages.py
+@@ -3789,9 +3789,9 @@ class AgentNameGuessFoundNone(ReportItemMessage):
+ 
+ 
+ @dataclass(frozen=True)
+-class AgentImplementsUnsupportedOcfVersion(ReportItemMessage):
++class AgentImplementsUnsupportedOcfVersionAssumedVersion(ReportItemMessage):
+     """
+-    Specified agent implements OCF version not supported by pcs
++    Specified agent implements OCF version not supported by pcs, assumed OCF 1.0
+ 
+     agent -- name of the agent
+     ocf_version -- OCF version implemented by the agent
+@@ -3801,7 +3801,8 @@ class AgentImplementsUnsupportedOcfVersion(ReportItemMessage):
+     agent: str
+     ocf_version: str
+     supported_versions: List[str]
+-    _code = codes.AGENT_IMPLEMENTS_UNSUPPORTED_OCF_VERSION
++    assumed_version: str
++    _code = codes.AGENT_IMPLEMENTS_UNSUPPORTED_OCF_VERSION_ASSUMED_VERSION
+ 
+     @property
+     def message(self) -> str:
+@@ -3809,9 +3810,9 @@ class AgentImplementsUnsupportedOcfVersion(ReportItemMessage):
+         _is = format_plural(self.supported_versions, "is")
+         _version_list = format_list(self.supported_versions)
+         return (
+-            f"Unable to process agent '{self.agent}' as it implements "
+-            f"unsupported OCF version '{self.ocf_version}', supported "
+-            f"{_version} {_is}: {_version_list}"
++            f"Agent '{self.agent}' implements unsupported OCF version "
++            f"'{self.ocf_version}', supported {_version} {_is}: "
++            f"{_version_list}; assumed version '{self.assumed_version}'"
+         )
+ 
+ 
+diff --git a/pcs/lib/commands/resource.py b/pcs/lib/commands/resource.py
+index 82ce73e0..c4b6252c 100644
+--- a/pcs/lib/commands/resource.py
++++ b/pcs/lib/commands/resource.py
+@@ -84,7 +84,6 @@ from pcs.lib.resource_agent import (
+     ResourceAgentName,
+     split_resource_agent_name,
+     UnableToGetAgentMetadata,
+-    UnsupportedOcfVersion,
+ )
+ from pcs.lib.tools import get_tmp_cib
+ from pcs.lib.validate import ValueTimeInterval
+@@ -162,7 +161,7 @@ def _get_agent_facade(
+             else find_one_resource_agent_by_type(runner, report_processor, name)
+         )
+         return factory.facade_from_parsed_name(split_name)
+-    except (UnableToGetAgentMetadata, UnsupportedOcfVersion) as e:
++    except UnableToGetAgentMetadata as e:
+         if allow_absent_agent:
+             report_processor.report(
+                 resource_agent_error_to_report_item(
+diff --git a/pcs/lib/commands/resource_agent.py b/pcs/lib/commands/resource_agent.py
+index e6167b13..4a1831c0 100644
+--- a/pcs/lib/commands/resource_agent.py
++++ b/pcs/lib/commands/resource_agent.py
+@@ -139,7 +139,9 @@ def _complete_agent_list(
+         try:
+             split_name = split_resource_agent_name(name)
+             metadata = (
+-                agent_factory.facade_from_parsed_name(split_name).metadata
++                agent_factory.facade_from_parsed_name(
++                    split_name, report_warnings=False
++                ).metadata
+                 if describe
+                 else name_to_void_metadata(split_name)
+             )
+diff --git a/pcs/lib/commands/stonith.py b/pcs/lib/commands/stonith.py
+index 093f5be9..2aa299d7 100644
+--- a/pcs/lib/commands/stonith.py
++++ b/pcs/lib/commands/stonith.py
+@@ -45,7 +45,6 @@ from pcs.lib.resource_agent import (
+     ResourceAgentFacadeFactory,
+     ResourceAgentName,
+     UnableToGetAgentMetadata,
+-    UnsupportedOcfVersion,
+ )
+ from pcs.lib.validate import validate_add_remove_items
+ from pcs.lib.xml_tools import get_root
+@@ -62,7 +61,7 @@ def _get_agent_facade(
+             raise InvalidResourceAgentName(name)
+         full_name = ResourceAgentName("stonith", None, name)
+         return factory.facade_from_parsed_name(full_name)
+-    except (UnableToGetAgentMetadata, UnsupportedOcfVersion) as e:
++    except UnableToGetAgentMetadata as e:
+         if allow_absent_agent:
+             report_processor.report(
+                 resource_agent_error_to_report_item(
+diff --git a/pcs/lib/resource_agent/__init__.py b/pcs/lib/resource_agent/__init__.py
+index 4548017f..c6086331 100644
+--- a/pcs/lib/resource_agent/__init__.py
++++ b/pcs/lib/resource_agent/__init__.py
+@@ -10,7 +10,6 @@ from .error import (
+     ResourceAgentError,
+     resource_agent_error_to_report_item,
+     UnableToGetAgentMetadata,
+-    UnsupportedOcfVersion,
+ )
+ from .facade import ResourceAgentFacade, ResourceAgentFacadeFactory
+ from .list import (
+diff --git a/pcs/lib/resource_agent/error.py b/pcs/lib/resource_agent/error.py
+index d4178333..f1dd7f3d 100644
+--- a/pcs/lib/resource_agent/error.py
++++ b/pcs/lib/resource_agent/error.py
+@@ -2,8 +2,6 @@ from typing import Iterable
+ 
+ from pcs.common import reports
+ 
+-from . import const
+-
+ 
+ class ResourceAgentError(Exception):
+     def __init__(self, agent_name: str):
+@@ -37,12 +35,6 @@ class UnableToGetAgentMetadata(ResourceAgentError):
+         self.message = message
+ 
+ 
+-class UnsupportedOcfVersion(ResourceAgentError):
+-    def __init__(self, agent_name: str, ocf_version: str):
+-        super().__init__(agent_name)
+-        self.ocf_version = ocf_version
+-
+-
+ def resource_agent_error_to_report_item(
+     e: ResourceAgentError,
+     severity: reports.ReportItemSeverity = reports.ReportItemSeverity.error(),
+@@ -69,10 +61,4 @@ def resource_agent_error_to_report_item(
+         message = reports.messages.UnableToGetAgentMetadata(
+             e.agent_name, e.message
+         )
+-    elif isinstance(e, UnsupportedOcfVersion):
+-        message = reports.messages.AgentImplementsUnsupportedOcfVersion(
+-            e.agent_name,
+-            e.ocf_version,
+-            sorted(const.SUPPORTED_OCF_VERSIONS),
+-        )
+     return reports.ReportItem(severity, message)
+diff --git a/pcs/lib/resource_agent/facade.py b/pcs/lib/resource_agent/facade.py
+index 4dbb59b8..dea59a1a 100644
+--- a/pcs/lib/resource_agent/facade.py
++++ b/pcs/lib/resource_agent/facade.py
+@@ -188,18 +188,32 @@ class ResourceAgentFacadeFactory:
+         self._fenced_metadata = None
+ 
+     def facade_from_parsed_name(
+-        self, name: ResourceAgentName
++        self, name: ResourceAgentName, report_warnings=True
+     ) -> ResourceAgentFacade:
+         """
+         Create ResourceAgentFacade based on specified agent name
+ 
+         name -- agent name to get a facade for
+         """
+-        return self._facade_from_metadata(
+-            ocf_version_to_ocf_unified(
+-                parse_metadata(name, load_metadata(self._runner, name))
+-            )
++        metadata, raw_ocf_version = parse_metadata(
++            name,
++            load_metadata(self._runner, name),
+         )
++        if (
++            report_warnings
++            and raw_ocf_version not in const.SUPPORTED_OCF_VERSIONS
++        ):
++            self._report_processor.report(
++                reports.ReportItem.warning(
++                    reports.messages.AgentImplementsUnsupportedOcfVersionAssumedVersion(
++                        name.full_name,
++                        raw_ocf_version,
++                        sorted(const.SUPPORTED_OCF_VERSIONS),
++                        const.OCF_1_0,
++                    )
++                )
++            )
++        return self._facade_from_metadata(ocf_version_to_ocf_unified(metadata))
+ 
+     def void_facade_from_parsed_name(
+         self, name: ResourceAgentName
+@@ -232,15 +246,12 @@ class ResourceAgentFacadeFactory:
+                 const.FAKE_AGENT_STANDARD, None, const.PACEMAKER_FENCED
+             )
+             try:
++                metadata, _ = parse_metadata(
++                    agent_name,
++                    load_fake_agent_metadata(self._runner, agent_name.type),
++                )
+                 self._fenced_metadata = ocf_unified_to_pcs(
+-                    ocf_version_to_ocf_unified(
+-                        parse_metadata(
+-                            agent_name,
+-                            load_fake_agent_metadata(
+-                                self._runner, agent_name.type
+-                            ),
+-                        )
+-                    )
++                    ocf_version_to_ocf_unified(metadata)
+                 )
+             except ResourceAgentError as e:
+                 # If pcs is unable to load fenced metadata, cache an empty
+diff --git a/pcs/lib/resource_agent/xml.py b/pcs/lib/resource_agent/xml.py
+index 82f8fbfa..1ba97216 100644
+--- a/pcs/lib/resource_agent/xml.py
++++ b/pcs/lib/resource_agent/xml.py
+@@ -8,7 +8,7 @@ from pcs.common.tools import xml_fromstring
+ from pcs.lib.external import CommandRunner
+ 
+ from . import const
+-from .error import UnableToGetAgentMetadata, UnsupportedOcfVersion
++from .error import UnableToGetAgentMetadata
+ from .types import (
+     FakeAgentName,
+     ResourceAgentActionOcf1_0,
+@@ -137,8 +137,11 @@ def load_fake_agent_metadata(
+ 
+ 
+ def parse_metadata(
+-    name: ResourceAgentName, metadata: _Element
+-) -> Union[ResourceAgentMetadataOcf1_0, ResourceAgentMetadataOcf1_1]:
++    name: ResourceAgentName,
++    metadata: _Element,
++) -> Tuple[
++    Union[ResourceAgentMetadataOcf1_0, ResourceAgentMetadataOcf1_1], str
++]:
+     """
+     Parse XML metadata to a dataclass
+ 
+@@ -146,11 +149,9 @@ def parse_metadata(
+     metadata -- metadata XML document
+     """
+     ocf_version = _get_ocf_version(metadata)
+-    if ocf_version == const.OCF_1_0:
+-        return _parse_agent_1_0(name, metadata)
+     if ocf_version == const.OCF_1_1:
+-        return _parse_agent_1_1(name, metadata)
+-    raise UnsupportedOcfVersion(name.full_name, ocf_version)
++        return _parse_agent_1_1(name, metadata), ocf_version
++    return _parse_agent_1_0(name, metadata), ocf_version
+ 
+ 
+ def _parse_agent_1_0(
+diff --git a/pcs_test/tier0/common/reports/test_messages.py b/pcs_test/tier0/common/reports/test_messages.py
+index 4a7b4945..b885a9eb 100644
+--- a/pcs_test/tier0/common/reports/test_messages.py
++++ b/pcs_test/tier0/common/reports/test_messages.py
+@@ -2833,22 +2833,22 @@ class AgentNameGuessFoundNone(NameBuildTest):
+         )
+ 
+ 
+-class AgentImplementsUnsupportedOcfVersion(NameBuildTest):
++class AgentImplementsUnsupportedOcfVersionAssumedVersion(NameBuildTest):
+     def test_singular(self):
+         self.assert_message_from_report(
+-            "Unable to process agent 'agent-name' as it implements unsupported "
+-            "OCF version 'ocf-2.3', supported version is: 'v1'",
+-            reports.AgentImplementsUnsupportedOcfVersion(
+-                "agent-name", "ocf-2.3", ["v1"]
++            "Agent 'agent-name' implements unsupported OCF version 'ocf-2.3', "
++            "supported version is: 'v1'; assumed version 'v1'",
++            reports.AgentImplementsUnsupportedOcfVersionAssumedVersion(
++                "agent-name", "ocf-2.3", ["v1"], "v1"
+             ),
+         )
+ 
+     def test_plural(self):
+         self.assert_message_from_report(
+-            "Unable to process agent 'agent-name' as it implements unsupported "
+-            "OCF version 'ocf-2.3', supported versions are: 'v1', 'v2', 'v3'",
+-            reports.AgentImplementsUnsupportedOcfVersion(
+-                "agent-name", "ocf-2.3", ["v1", "v2", "v3"]
++            "Agent 'agent-name' implements unsupported OCF version 'ocf-2.3', "
++            "supported versions are: 'v1', 'v2', 'v3'; assumed version 'v1'",
++            reports.AgentImplementsUnsupportedOcfVersionAssumedVersion(
++                "agent-name", "ocf-2.3", ["v1", "v2", "v3"], "v1"
+             ),
+         )
+ 
+diff --git a/pcs_test/tier0/lib/resource_agent/test_facade.py b/pcs_test/tier0/lib/resource_agent/test_facade.py
+index 654eb35e..f6a9899c 100644
+--- a/pcs_test/tier0/lib/resource_agent/test_facade.py
++++ b/pcs_test/tier0/lib/resource_agent/test_facade.py
+@@ -92,6 +92,14 @@ class ResourceAgentFacadeFactory(TestCase):
+             </parameters>
+         </resource-agent>
+     """
++    _fixture_agent_bad_version_xml = """
++        <resource-agent name="agent">
++            <version>0.1.2</version>
++            <parameters>
++                <parameter name="agent-param"/>
++            </parameters>
++        </resource-agent>
++    """
+     _fixture_fenced_xml = """
+         <resource-agent name="pacemaker-fenced">
+             <parameters>
+@@ -125,6 +133,45 @@ class ResourceAgentFacadeFactory(TestCase):
+         self.assertEqual(facade.metadata.name, name)
+         self.assertTrue(facade.metadata.agent_exists)
+ 
++    def test_facade_bad_ocf_version(self):
++        name = ra.ResourceAgentName("service", None, "daemon")
++        self.config.runner.pcmk.load_agent(
++            agent_name="service:daemon",
++            stdout=self._fixture_agent_bad_version_xml,
++        )
++
++        env = self.env_assist.get_env()
++        facade = ra.ResourceAgentFacadeFactory(
++            env.cmd_runner(), env.report_processor
++        ).facade_from_parsed_name(name)
++        self.assertEqual(facade.metadata.name, name)
++        self.assertTrue(facade.metadata.agent_exists)
++        self.env_assist.assert_reports(
++            [
++                fixture.warn(
++                    reports.codes.AGENT_IMPLEMENTS_UNSUPPORTED_OCF_VERSION_ASSUMED_VERSION,
++                    agent=name.full_name,
++                    ocf_version="0.1.2",
++                    supported_versions=sorted(ra.const.SUPPORTED_OCF_VERSIONS),
++                    assumed_version=ra.const.OCF_1_0,
++                )
++            ]
++        )
++
++    def test_facade_bad_ocf_version_disabled_warning(self):
++        name = ra.ResourceAgentName("service", None, "daemon")
++        self.config.runner.pcmk.load_agent(
++            agent_name="service:daemon",
++            stdout=self._fixture_agent_bad_version_xml,
++        )
++
++        env = self.env_assist.get_env()
++        facade = ra.ResourceAgentFacadeFactory(
++            env.cmd_runner(), env.report_processor
++        ).facade_from_parsed_name(name, report_warnings=False)
++        self.assertEqual(facade.metadata.name, name)
++        self.assertTrue(facade.metadata.agent_exists)
++
+     def test_facade_missing_agent(self):
+         name = ra.ResourceAgentName("service", None, "daemon")
+         self.config.runner.pcmk.load_agent(
+diff --git a/pcs_test/tier0/lib/resource_agent/test_xml.py b/pcs_test/tier0/lib/resource_agent/test_xml.py
+index c4176f32..26bbbb7d 100644
+--- a/pcs_test/tier0/lib/resource_agent/test_xml.py
++++ b/pcs_test/tier0/lib/resource_agent/test_xml.py
+@@ -351,6 +351,7 @@ class LoadFakeAgentMetadata(TestCase):
+ class ParseOcfToolsMixin:
+     agent_name = ra.ResourceAgentName("ocf", "pacemaker", "Dummy")
+     ocf_version = None
++    parsed_ocf_version = None
+ 
+     def parse(self, xml, agent_name=None):
+         agent_name = agent_name or self.agent_name
+@@ -383,19 +384,17 @@ class ParseOcfToolsMixin:
+             version_el.text = ocf_version
+         return etree_to_str(dom)
+ 
+-
+-class ParseOcfGeneric(ParseOcfToolsMixin, TestCase):
+-    def test_unsupported_ocf_version(self):
+-        with self.assertRaises(ra.UnsupportedOcfVersion) as cm:
+-            self.parse(self.xml("""<resource-agent/>""", ocf_version="1.2"))
+-        self.assertEqual(cm.exception.agent_name, self.agent_name.full_name)
+-        self.assertEqual(cm.exception.ocf_version, "1.2")
++    def assert_parse_result(self, xml, metadata):
++        self.assertEqual(
++            self.parse(xml),
++            (metadata, self.parsed_ocf_version or self.ocf_version),
++        )
+ 
+ 
+ class ParseOcf10BaseMixin(ParseOcfToolsMixin):
+     def test_empty_agent(self):
+-        self.assertEqual(
+-            self.parse(self.xml("""<resource-agent/>""")),
++        self.assert_parse_result(
++            self.xml("""<resource-agent/>"""),
+             ResourceAgentMetadataOcf1_0(
+                 self.agent_name,
+                 shortdesc=None,
+@@ -406,16 +405,14 @@ class ParseOcf10BaseMixin(ParseOcfToolsMixin):
+         )
+ 
+     def test_desc_element(self):
+-        self.assertEqual(
+-            self.parse(
+-                self.xml(
+-                    """
++        self.assert_parse_result(
++            self.xml(
++                """
+                         <resource-agent>
+                             <shortdesc>This is a shortdesc</shortdesc>
+                             <longdesc>This is a longdesc</longdesc>
+                         </resource-agent>
+                     """
+-                )
+             ),
+             ResourceAgentMetadataOcf1_0(
+                 self.agent_name,
+@@ -427,16 +424,14 @@ class ParseOcf10BaseMixin(ParseOcfToolsMixin):
+         )
+ 
+     def test_desc_element_empty(self):
+-        self.assertEqual(
+-            self.parse(
+-                self.xml(
+-                    """
++        self.assert_parse_result(
++            self.xml(
++                """
+                         <resource-agent>
+                             <longdesc/>
+                             <shortdesc/>
+                         </resource-agent>
+                     """
+-                )
+             ),
+             ResourceAgentMetadataOcf1_0(
+                 self.agent_name,
+@@ -448,15 +443,13 @@ class ParseOcf10BaseMixin(ParseOcfToolsMixin):
+         )
+ 
+     def test_desc_attribute(self):
+-        self.assertEqual(
+-            self.parse(
+-                self.xml(
+-                    """
++        self.assert_parse_result(
++            self.xml(
++                """
+                         <resource-agent shortdesc="This is a shortdesc">
+                             <longdesc></longdesc>
+                         </resource-agent>
+                     """
+-                )
+             ),
+             ResourceAgentMetadataOcf1_0(
+                 self.agent_name,
+@@ -468,13 +461,11 @@ class ParseOcf10BaseMixin(ParseOcfToolsMixin):
+         )
+ 
+     def test_desc_attribute_empty(self):
+-        self.assertEqual(
+-            self.parse(
+-                self.xml(
+-                    """
++        self.assert_parse_result(
++            self.xml(
++                """
+                         <resource-agent shortdesc=""/>
+                     """
+-                )
+             ),
+             ResourceAgentMetadataOcf1_0(
+                 self.agent_name,
+@@ -486,15 +477,13 @@ class ParseOcf10BaseMixin(ParseOcfToolsMixin):
+         )
+ 
+     def test_desc_element_and_attribute(self):
+-        self.assertEqual(
+-            self.parse(
+-                self.xml(
+-                    """
++        self.assert_parse_result(
++            self.xml(
++                """
+                         <resource-agent shortdesc="shortdesc attribute">
+                             <shortdesc>shortdesc element</shortdesc>
+                         </resource-agent>
+                     """
+-                )
+             ),
+             ResourceAgentMetadataOcf1_0(
+                 self.agent_name,
+@@ -506,15 +495,13 @@ class ParseOcf10BaseMixin(ParseOcfToolsMixin):
+         )
+ 
+     def test_desc_element_empty_and_attribute(self):
+-        self.assertEqual(
+-            self.parse(
+-                self.xml(
+-                    """
++        self.assert_parse_result(
++            self.xml(
++                """
+                         <resource-agent shortdesc="shortdesc attribute">
+                             <shortdesc></shortdesc>
+                         </resource-agent>
+                     """
+-                )
+             ),
+             ResourceAgentMetadataOcf1_0(
+                 self.agent_name,
+@@ -526,15 +513,13 @@ class ParseOcf10BaseMixin(ParseOcfToolsMixin):
+         )
+ 
+     def test_desc_element_empty_and_attribute_empty(self):
+-        self.assertEqual(
+-            self.parse(
+-                self.xml(
+-                    """
++        self.assert_parse_result(
++            self.xml(
++                """
+                         <resource-agent shortdesc="">
+                             <shortdesc></shortdesc>
+                         </resource-agent>
+                     """
+-                )
+             ),
+             ResourceAgentMetadataOcf1_0(
+                 self.agent_name,
+@@ -546,15 +531,13 @@ class ParseOcf10BaseMixin(ParseOcfToolsMixin):
+         )
+ 
+     def test_parameters_empty_list(self):
+-        self.assertEqual(
+-            self.parse(
+-                self.xml(
+-                    """
++        self.assert_parse_result(
++            self.xml(
++                """
+                         <resource-agent>
+                             <parameters/>
+                         </resource-agent>
+                     """
+-                )
+             ),
+             ResourceAgentMetadataOcf1_0(
+                 self.agent_name,
+@@ -581,17 +564,15 @@ class ParseOcf10BaseMixin(ParseOcfToolsMixin):
+             )
+ 
+     def test_parameters_minimal(self):
+-        self.assertEqual(
+-            self.parse(
+-                self.xml(
+-                    """
++        self.assert_parse_result(
++            self.xml(
++                """
+                         <resource-agent>
+                             <parameters>
+                                 <parameter name="a_parameter"/>
+                             </parameters>
+                         </resource-agent>
+                     """
+-                )
+             ),
+             ResourceAgentMetadataOcf1_0(
+                 self.agent_name,
+@@ -616,10 +597,9 @@ class ParseOcf10BaseMixin(ParseOcfToolsMixin):
+         )
+ 
+     def test_parameters_all_settings(self):
+-        self.assertEqual(
+-            self.parse(
+-                self.xml(
+-                    """
++        self.assert_parse_result(
++            self.xml(
++                """
+                         <resource-agent>
+                             <parameters>
+                                 <parameter name="a_parameter" required="1"
+@@ -632,7 +612,6 @@ class ParseOcf10BaseMixin(ParseOcfToolsMixin):
+                             </parameters>
+                         </resource-agent>
+                     """
+-                )
+             ),
+             ResourceAgentMetadataOcf1_0(
+                 self.agent_name,
+@@ -657,10 +636,9 @@ class ParseOcf10BaseMixin(ParseOcfToolsMixin):
+         )
+ 
+     def test_parameters_content(self):
+-        self.assertEqual(
+-            self.parse(
+-                self.xml(
+-                    """
++        self.assert_parse_result(
++            self.xml(
++                """
+                         <resource-agent>
+                             <parameters>
+                                 <parameter name="with_type">
+@@ -676,7 +654,6 @@ class ParseOcf10BaseMixin(ParseOcfToolsMixin):
+                             </parameters>
+                         </resource-agent>
+                     """
+-                )
+             ),
+             ResourceAgentMetadataOcf1_0(
+                 self.agent_name,
+@@ -713,15 +690,13 @@ class ParseOcf10BaseMixin(ParseOcfToolsMixin):
+         )
+ 
+     def test_actions_empty_list(self):
+-        self.assertEqual(
+-            self.parse(
+-                self.xml(
+-                    """
++        self.assert_parse_result(
++            self.xml(
++                """
+                         <resource-agent>
+                             <actions/>
+                         </resource-agent>
+                     """
+-                )
+             ),
+             ResourceAgentMetadataOcf1_0(
+                 self.agent_name,
+@@ -748,10 +723,9 @@ class ParseOcf10BaseMixin(ParseOcfToolsMixin):
+             )
+ 
+     def test_actions_multiple(self):
+-        self.assertEqual(
+-            self.parse(
+-                self.xml(
+-                    """
++        self.assert_parse_result(
++            self.xml(
++                """
+                         <resource-agent>
+                             <actions>
+                                 <action name="minimal"/>
+@@ -764,7 +738,6 @@ class ParseOcf10BaseMixin(ParseOcfToolsMixin):
+                             </actions>
+                         </resource-agent>
+                     """
+-                )
+             ),
+             ResourceAgentMetadataOcf1_0(
+                 self.agent_name,
+@@ -808,7 +781,26 @@ class ParseOcf10BaseMixin(ParseOcfToolsMixin):
+ 
+ 
+ class ParseOcf10NoVersion(ParseOcf10BaseMixin, TestCase):
+-    pass
++    parsed_ocf_version = "1.0"
++
++
++class ParseOcf10UnsupportedVersion(ParseOcf10BaseMixin, TestCase):
++    ocf_version = "0.1.2"
++
++    # These tests test that pcs raises an error if an agent doesn't conform to
++    # OCF schema. There is, however, no validation against OCF schema for
++    # agents with unsupported OCF version. That means no error message, pcs
++    # tries to process the agent and crashes. However bad that sounds, it's
++    # indended as that's how pcs behaved before OCF 1.1 was implemented.
++    # There's therefore no point in running these tests.
++
++    def test_parameters_empty_parameter(self):
++        # parameters must have at least 'name' attribute
++        pass
++
++    def test_actions_empty_action(self):
++        # actions must have at least 'name' attribute
++        pass
+ 
+ 
+ class ParseOcf10ExplicitVersion(ParseOcf10BaseMixin, TestCase):
+@@ -819,8 +811,8 @@ class ParseOcf11(ParseOcfToolsMixin, TestCase):
+     ocf_version = "1.1"
+ 
+     def test_empty_agent(self):
+-        self.assertEqual(
+-            self.parse(self.xml("""<resource-agent/>""")),
++        self.assert_parse_result(
++            self.xml("""<resource-agent/>"""),
+             ResourceAgentMetadataOcf1_1(
+                 self.agent_name,
+                 shortdesc=None,
+@@ -831,16 +823,14 @@ class ParseOcf11(ParseOcfToolsMixin, TestCase):
+         )
+ 
+     def test_desc_element(self):
+-        self.assertEqual(
+-            self.parse(
+-                self.xml(
+-                    """
++        self.assert_parse_result(
++            self.xml(
++                """
+                         <resource-agent>
+                             <shortdesc>This is a shortdesc</shortdesc>
+                             <longdesc>This is a longdesc</longdesc>
+                         </resource-agent>
+                     """
+-                )
+             ),
+             ResourceAgentMetadataOcf1_1(
+                 self.agent_name,
+@@ -852,16 +842,14 @@ class ParseOcf11(ParseOcfToolsMixin, TestCase):
+         )
+ 
+     def test_desc_element_empty(self):
+-        self.assertEqual(
+-            self.parse(
+-                self.xml(
+-                    """
++        self.assert_parse_result(
++            self.xml(
++                """
+                         <resource-agent>
+                             <longdesc/>
+                             <shortdesc/>
+                         </resource-agent>
+                     """
+-                )
+             ),
+             ResourceAgentMetadataOcf1_1(
+                 self.agent_name,
+@@ -873,15 +861,13 @@ class ParseOcf11(ParseOcfToolsMixin, TestCase):
+         )
+ 
+     def test_parameters_empty_list(self):
+-        self.assertEqual(
+-            self.parse(
+-                self.xml(
+-                    """
++        self.assert_parse_result(
++            self.xml(
++                """
+                         <resource-agent>
+                             <parameters/>
+                         </resource-agent>
+                     """
+-                )
+             ),
+             ResourceAgentMetadataOcf1_1(
+                 self.agent_name,
+@@ -908,17 +894,15 @@ class ParseOcf11(ParseOcfToolsMixin, TestCase):
+             )
+ 
+     def test_parameters_minimal(self):
+-        self.assertEqual(
+-            self.parse(
+-                self.xml(
+-                    """
++        self.assert_parse_result(
++            self.xml(
++                """
+                         <resource-agent>
+                             <parameters>
+                                 <parameter name="a_parameter"/>
+                             </parameters>
+                         </resource-agent>
+                     """
+-                )
+             ),
+             ResourceAgentMetadataOcf1_1(
+                 self.agent_name,
+@@ -945,10 +929,9 @@ class ParseOcf11(ParseOcfToolsMixin, TestCase):
+         )
+ 
+     def test_parameters_deprecated_minimal(self):
+-        self.assertEqual(
+-            self.parse(
+-                self.xml(
+-                    """
++        self.assert_parse_result(
++            self.xml(
++                """
+                         <resource-agent>
+                             <parameters>
+                                 <parameter name="a_parameter">
+@@ -957,7 +940,6 @@ class ParseOcf11(ParseOcfToolsMixin, TestCase):
+                             </parameters>
+                         </resource-agent>
+                     """
+-                )
+             ),
+             ResourceAgentMetadataOcf1_1(
+                 self.agent_name,
+@@ -984,10 +966,9 @@ class ParseOcf11(ParseOcfToolsMixin, TestCase):
+         )
+ 
+     def test_parameters_deprecated_replaced_with(self):
+-        self.assertEqual(
+-            self.parse(
+-                self.xml(
+-                    """
++        self.assert_parse_result(
++            self.xml(
++                """
+                         <resource-agent>
+                             <parameters>
+                                 <parameter name="a_parameter">
+@@ -999,7 +980,6 @@ class ParseOcf11(ParseOcfToolsMixin, TestCase):
+                             </parameters>
+                         </resource-agent>
+                     """
+-                )
+             ),
+             ResourceAgentMetadataOcf1_1(
+                 self.agent_name,
+@@ -1026,10 +1006,9 @@ class ParseOcf11(ParseOcfToolsMixin, TestCase):
+         )
+ 
+     def test_parameters_all_settings(self):
+-        self.assertEqual(
+-            self.parse(
+-                self.xml(
+-                    """
++        self.assert_parse_result(
++            self.xml(
++                """
+                         <resource-agent>
+                             <parameters>
+                                 <parameter name="a_parameter"
+@@ -1048,7 +1027,6 @@ class ParseOcf11(ParseOcfToolsMixin, TestCase):
+                             </parameters>
+                         </resource-agent>
+                     """
+-                )
+             ),
+             ResourceAgentMetadataOcf1_1(
+                 self.agent_name,
+@@ -1075,10 +1053,9 @@ class ParseOcf11(ParseOcfToolsMixin, TestCase):
+         )
+ 
+     def test_parameters_content(self):
+-        self.assertEqual(
+-            self.parse(
+-                self.xml(
+-                    """
++        self.assert_parse_result(
++            self.xml(
++                """
+                         <resource-agent>
+                             <parameters>
+                                 <parameter name="with_type">
+@@ -1094,7 +1071,6 @@ class ParseOcf11(ParseOcfToolsMixin, TestCase):
+                             </parameters>
+                         </resource-agent>
+                     """
+-                )
+             ),
+             ResourceAgentMetadataOcf1_1(
+                 self.agent_name,
+@@ -1135,15 +1111,13 @@ class ParseOcf11(ParseOcfToolsMixin, TestCase):
+         )
+ 
+     def test_actions_empty_list(self):
+-        self.assertEqual(
+-            self.parse(
+-                self.xml(
+-                    """
++        self.assert_parse_result(
++            self.xml(
++                """
+                         <resource-agent>
+                             <actions/>
+                         </resource-agent>
+                     """
+-                )
+             ),
+             ResourceAgentMetadataOcf1_1(
+                 self.agent_name,
+@@ -1170,10 +1144,9 @@ class ParseOcf11(ParseOcfToolsMixin, TestCase):
+             )
+ 
+     def test_actions_multiple(self):
+-        self.assertEqual(
+-            self.parse(
+-                self.xml(
+-                    """
++        self.assert_parse_result(
++            self.xml(
++                """
+                         <resource-agent>
+                             <actions>
+                                 <action name="minimal"/>
+@@ -1186,7 +1159,6 @@ class ParseOcf11(ParseOcfToolsMixin, TestCase):
+                             </actions>
+                         </resource-agent>
+                     """
+-                )
+             ),
+             ResourceAgentMetadataOcf1_1(
+                 self.agent_name,
+-- 
+2.34.1
+
diff --git a/SOURCES/bz2050274-02-relax-OCF-1.0-parser.patch b/SOURCES/bz2050274-02-relax-OCF-1.0-parser.patch
new file mode 100644
index 0000000..8e8c073
--- /dev/null
+++ b/SOURCES/bz2050274-02-relax-OCF-1.0-parser.patch
@@ -0,0 +1,587 @@
+From 65b30a04a234449cb4aa65606d47bf1d673592a4 Mon Sep 17 00:00:00 2001
+From: Tomas Jelinek <tojeline@redhat.com>
+Date: Wed, 9 Feb 2022 11:16:49 +0100
+Subject: [PATCH 2/3] relax OCF 1.0 parser
+
+---
+ pcs/lib/resource_agent/facade.py              |  50 ++++--
+ pcs/lib/resource_agent/ocf_transform.py       |  51 +++++-
+ pcs/lib/resource_agent/xml.py                 |   8 +-
+ .../tier0/lib/resource_agent/test_facade.py   |  44 +++++
+ .../lib/resource_agent/test_ocf_transform.py  |  48 +++++-
+ pcs_test/tier0/lib/resource_agent/test_xml.py | 155 ++++++++++--------
+ 6 files changed, 256 insertions(+), 100 deletions(-)
+
+diff --git a/pcs/lib/resource_agent/facade.py b/pcs/lib/resource_agent/facade.py
+index dea59a1a..8a65eb1c 100644
+--- a/pcs/lib/resource_agent/facade.py
++++ b/pcs/lib/resource_agent/facade.py
+@@ -2,12 +2,19 @@ from collections import defaultdict
+ from dataclasses import replace as dc_replace
+ from typing import Dict, Iterable, List, Optional, Set
+ 
++from lxml import etree
++
++from pcs import settings
+ from pcs.common import reports
+ from pcs.lib import validate
+ from pcs.lib.external import CommandRunner
+ 
+ from . import const
+-from .error import ResourceAgentError, resource_agent_error_to_report_item
++from .error import (
++    ResourceAgentError,
++    resource_agent_error_to_report_item,
++    UnableToGetAgentMetadata,
++)
+ from .name import name_to_void_metadata
+ from .ocf_transform import ocf_version_to_ocf_unified
+ from .pcs_transform import get_additional_trace_parameters, ocf_unified_to_pcs
+@@ -195,24 +202,33 @@ class ResourceAgentFacadeFactory:
+ 
+         name -- agent name to get a facade for
+         """
+-        metadata, raw_ocf_version = parse_metadata(
+-            name,
+-            load_metadata(self._runner, name),
+-        )
+-        if (
+-            report_warnings
+-            and raw_ocf_version not in const.SUPPORTED_OCF_VERSIONS
+-        ):
+-            self._report_processor.report(
+-                reports.ReportItem.warning(
+-                    reports.messages.AgentImplementsUnsupportedOcfVersionAssumedVersion(
+-                        name.full_name,
+-                        raw_ocf_version,
+-                        sorted(const.SUPPORTED_OCF_VERSIONS),
+-                        const.OCF_1_0,
++        dom_metadata = load_metadata(self._runner, name)
++        metadata, raw_ocf_version = parse_metadata(name, dom_metadata)
++        if report_warnings:
++            if raw_ocf_version not in const.SUPPORTED_OCF_VERSIONS:
++                self._report_processor.report(
++                    reports.ReportItem.warning(
++                        reports.messages.AgentImplementsUnsupportedOcfVersionAssumedVersion(
++                            name.full_name,
++                            raw_ocf_version,
++                            sorted(const.SUPPORTED_OCF_VERSIONS),
++                            const.OCF_1_0,
++                        )
+                     )
+                 )
+-            )
++            if raw_ocf_version != const.OCF_1_1:
++                try:
++                    etree.RelaxNG(
++                        file=settings.path.ocf_1_0_schema
++                    ).assertValid(dom_metadata)
++                except etree.DocumentInvalid as e:
++                    self._report_processor.report(
++                        resource_agent_error_to_report_item(
++                            UnableToGetAgentMetadata(name.full_name, str(e)),
++                            severity=reports.ReportItemSeverity.warning(),
++                            is_stonith=name.is_stonith,
++                        )
++                    )
+         return self._facade_from_metadata(ocf_version_to_ocf_unified(metadata))
+ 
+     def void_facade_from_parsed_name(
+diff --git a/pcs/lib/resource_agent/ocf_transform.py b/pcs/lib/resource_agent/ocf_transform.py
+index e841b55e..7e6a14ad 100644
+--- a/pcs/lib/resource_agent/ocf_transform.py
++++ b/pcs/lib/resource_agent/ocf_transform.py
+@@ -67,20 +67,42 @@ def _ocf_1_1_to_ocf_unified(
+         longdesc=metadata.longdesc,
+         parameters=_ocf_1_1_parameter_list_to_ocf_unified(metadata.parameters),
+         # OCF 1.1 actions are the same as in OCF 1.0
+-        actions=_ocf_1_0_action_list_to_ocf_unified(metadata.actions),
++        actions=_ocf_1_1_action_list_to_ocf_unified(metadata.actions),
+     )
+ 
+ 
+ def _ocf_1_0_action_list_to_ocf_unified(
+-    action_list: Iterable[
+-        Union[ResourceAgentActionOcf1_0, ResourceAgentActionOcf1_1]
+-    ],
++    action_list: Iterable[ResourceAgentActionOcf1_0],
+ ) -> List[ResourceAgentAction]:
+     """
+     Transform OCF 1.0 actions to a universal format
+ 
+     action_list -- actions according OCF 1.0
+     """
++    return [
++        ResourceAgentAction(
++            name=action.name,
++            timeout=action.timeout,
++            interval=action.interval,
++            role=action.role,
++            start_delay=action.start_delay,
++            depth=action.depth,
++            automatic=_bool_value_legacy(action.automatic),
++            on_target=_bool_value_legacy(action.on_target),
++        )
++        for action in action_list
++        if action.name
++    ]
++
++
++def _ocf_1_1_action_list_to_ocf_unified(
++    action_list: Iterable[ResourceAgentActionOcf1_1],
++) -> List[ResourceAgentAction]:
++    """
++    Transform OCF 1.1 actions to a universal format
++
++    action_list -- actions according OCF 1.1
++    """
+     return [
+         ResourceAgentAction(
+             name=action.name,
+@@ -111,6 +133,8 @@ def _ocf_1_0_parameter_list_to_ocf_unified(
+ 
+     result = []
+     for parameter in parameter_list:
++        if not parameter.name:
++            continue
+         result.append(
+             ResourceAgentParameter(
+                 name=parameter.name,
+@@ -119,17 +143,17 @@ def _ocf_1_0_parameter_list_to_ocf_unified(
+                 type=parameter.type,
+                 default=parameter.default,
+                 enum_values=parameter.enum_values,
+-                required=_bool_value(parameter.required),
++                required=_bool_value_legacy(parameter.required),
+                 advanced=False,
+-                deprecated=_bool_value(parameter.deprecated),
++                deprecated=_bool_value_legacy(parameter.deprecated),
+                 deprecated_by=sorted(deprecated_by_dict[parameter.name]),
+                 deprecated_desc=None,
+                 unique_group=(
+                     f"{const.DEFAULT_UNIQUE_GROUP_PREFIX}{parameter.name}"
+-                    if _bool_value(parameter.unique)
++                    if _bool_value_legacy(parameter.unique)
+                     else None
+                 ),
+-                reloadable=_bool_value(parameter.unique),
++                reloadable=_bool_value_legacy(parameter.unique),
+             )
+         )
+     return result
+@@ -170,3 +194,14 @@ def _bool_value(value: Optional[str]) -> bool:
+     value -- raw bool value
+     """
+     return value == "1"
++
++
++def _bool_value_legacy(value: Optional[str]) -> bool:
++    """
++    Transform raw bool value from metadata to bool type in backward compatible way
++
++    value -- raw bool value
++    """
++    return (
++        False if not value else value.lower() in {"true", "on", "yes", "y", "1"}
++    )
+diff --git a/pcs/lib/resource_agent/xml.py b/pcs/lib/resource_agent/xml.py
+index 1ba97216..0fc70527 100644
+--- a/pcs/lib/resource_agent/xml.py
++++ b/pcs/lib/resource_agent/xml.py
+@@ -94,9 +94,7 @@ def _metadata_xml_to_dom(metadata: str) -> _Element:
+     """
+     dom = xml_fromstring(metadata)
+     ocf_version = _get_ocf_version(dom)
+-    if ocf_version == const.OCF_1_0:
+-        etree.RelaxNG(file=settings.path.ocf_1_0_schema).assertValid(dom)
+-    elif ocf_version == const.OCF_1_1:
++    if ocf_version == const.OCF_1_1:
+         etree.RelaxNG(file=settings.path.ocf_1_1_schema).assertValid(dom)
+     return dom
+ 
+@@ -230,7 +228,7 @@ def _parse_parameters_1_0(
+         )
+         result.append(
+             ResourceAgentParameterOcf1_0(
+-                name=str(parameter_el.attrib["name"]),
++                name=str(parameter_el.get("name", "")),
+                 shortdesc=_get_shortdesc(parameter_el),
+                 longdesc=_get_longdesc(parameter_el),
+                 type=value_type,
+@@ -286,7 +284,7 @@ def _parse_parameters_1_1(
+ def _parse_actions_1_0(element: _Element) -> List[ResourceAgentActionOcf1_0]:
+     return [
+         ResourceAgentActionOcf1_0(
+-            name=str(action.attrib["name"]),
++            name=str(action.get("name", "")),
+             timeout=action.get("timeout"),
+             interval=action.get("interval"),
+             role=action.get("role"),
+diff --git a/pcs_test/tier0/lib/resource_agent/test_facade.py b/pcs_test/tier0/lib/resource_agent/test_facade.py
+index f6a9899c..313dfa2b 100644
+--- a/pcs_test/tier0/lib/resource_agent/test_facade.py
++++ b/pcs_test/tier0/lib/resource_agent/test_facade.py
+@@ -100,6 +100,13 @@ class ResourceAgentFacadeFactory(TestCase):
+             </parameters>
+         </resource-agent>
+     """
++    _fixture_agent_not_valid_xml = """
++        <resource-agent name="agent">
++            <parameters>
++                <parameter label="something wrong"/>
++            </parameters>
++        </resource-agent>
++    """
+     _fixture_fenced_xml = """
+         <resource-agent name="pacemaker-fenced">
+             <parameters>
+@@ -172,6 +179,43 @@ class ResourceAgentFacadeFactory(TestCase):
+         self.assertEqual(facade.metadata.name, name)
+         self.assertTrue(facade.metadata.agent_exists)
+ 
++    def test_facade_ocf_1_0_not_valid(self):
++        name = ra.ResourceAgentName("service", None, "daemon")
++        self.config.runner.pcmk.load_agent(
++            agent_name="service:daemon",
++            stdout=self._fixture_agent_not_valid_xml,
++        )
++
++        env = self.env_assist.get_env()
++        facade = ra.ResourceAgentFacadeFactory(
++            env.cmd_runner(), env.report_processor
++        ).facade_from_parsed_name(name)
++        self.assertEqual(facade.metadata.name, name)
++        self.assertTrue(facade.metadata.agent_exists)
++        self.env_assist.assert_reports(
++            [
++                fixture.warn(
++                    reports.codes.UNABLE_TO_GET_AGENT_METADATA,
++                    agent=name.full_name,
++                    reason="Element parameter failed to validate attributes, line 3",
++                )
++            ]
++        )
++
++    def test_facade_ocf_1_0_not_valid_disabled_warning(self):
++        name = ra.ResourceAgentName("service", None, "daemon")
++        self.config.runner.pcmk.load_agent(
++            agent_name="service:daemon",
++            stdout=self._fixture_agent_not_valid_xml,
++        )
++
++        env = self.env_assist.get_env()
++        facade = ra.ResourceAgentFacadeFactory(
++            env.cmd_runner(), env.report_processor
++        ).facade_from_parsed_name(name, report_warnings=False)
++        self.assertEqual(facade.metadata.name, name)
++        self.assertTrue(facade.metadata.agent_exists)
++
+     def test_facade_missing_agent(self):
+         name = ra.ResourceAgentName("service", None, "daemon")
+         self.config.runner.pcmk.load_agent(
+diff --git a/pcs_test/tier0/lib/resource_agent/test_ocf_transform.py b/pcs_test/tier0/lib/resource_agent/test_ocf_transform.py
+index 9e41b6af..d0de86e5 100644
+--- a/pcs_test/tier0/lib/resource_agent/test_ocf_transform.py
++++ b/pcs_test/tier0/lib/resource_agent/test_ocf_transform.py
+@@ -66,6 +66,18 @@ class OcfVersionToOcfUnified(TestCase):
+                     obsoletes=None,
+                     unique=None,
+                 ),
++                ra.types.ResourceAgentParameterOcf1_0(
++                    name="",
++                    shortdesc="Parameters with no name are ignored",
++                    longdesc=None,
++                    type="string",
++                    default=None,
++                    enum_values=None,
++                    required=None,
++                    deprecated=None,
++                    obsoletes=None,
++                    unique=None,
++                ),
+                 ra.types.ResourceAgentParameterOcf1_0(
+                     name="param_2",
+                     shortdesc="param_2 shortdesc",
+@@ -109,10 +121,10 @@ class OcfVersionToOcfUnified(TestCase):
+                     type="string",
+                     default=None,
+                     enum_values=None,
+-                    required="1",
+-                    deprecated="1",
++                    required="yeS",
++                    deprecated="True",
+                     obsoletes="param_4",
+-                    unique="1",
++                    unique="on",
+                 ),
+                 ra.types.ResourceAgentParameterOcf1_0(
+                     name="param_6",
+@@ -138,6 +150,16 @@ class OcfVersionToOcfUnified(TestCase):
+                     automatic=None,
+                     on_target=None,
+                 ),
++                ra.types.ResourceAgentActionOcf1_0(
++                    name="",
++                    timeout=None,
++                    interval=None,
++                    role=None,
++                    start_delay=None,
++                    depth=None,
++                    automatic=None,
++                    on_target=None,
++                ),
+                 ra.types.ResourceAgentActionOcf1_0(
+                     name="action_2",
+                     timeout="12",
+@@ -158,6 +180,16 @@ class OcfVersionToOcfUnified(TestCase):
+                     automatic="1",
+                     on_target="0",
+                 ),
++                ra.types.ResourceAgentActionOcf1_0(
++                    name="action_4",
++                    timeout=None,
++                    interval=None,
++                    role=None,
++                    start_delay=None,
++                    depth=None,
++                    automatic="yes",
++                    on_target="True",
++                ),
+             ],
+         )
+         metadata_out = ra.ResourceAgentMetadata(
+@@ -289,6 +321,16 @@ class OcfVersionToOcfUnified(TestCase):
+                     automatic=True,
+                     on_target=False,
+                 ),
++                ra.ResourceAgentAction(
++                    name="action_4",
++                    timeout=None,
++                    interval=None,
++                    role=None,
++                    start_delay=None,
++                    depth=None,
++                    automatic=True,
++                    on_target=True,
++                ),
+             ],
+         )
+         self.assertEqual(
+diff --git a/pcs_test/tier0/lib/resource_agent/test_xml.py b/pcs_test/tier0/lib/resource_agent/test_xml.py
+index 26bbbb7d..ea055ee2 100644
+--- a/pcs_test/tier0/lib/resource_agent/test_xml.py
++++ b/pcs_test/tier0/lib/resource_agent/test_xml.py
+@@ -164,8 +164,13 @@ class MetadataXmlToDom(TestCase):
+             ra.xml._metadata_xml_to_dom("not an xml")
+ 
+     def test_no_version_not_valid(self):
+-        with self.assertRaises(etree.DocumentInvalid):
+-            ra.xml._metadata_xml_to_dom("<resource-agent/>")
++        # pylint: disable=no-self-use
++        metadata = """
++            <resource-agent/>
++        """
++        assert_xml_equal(
++            metadata, etree_to_str(ra.xml._metadata_xml_to_dom(metadata))
++        )
+ 
+     def test_no_version_valid(self):
+         # pylint: disable=no-self-use
+@@ -178,14 +183,15 @@ class MetadataXmlToDom(TestCase):
+         )
+ 
+     def test_ocf_1_0_not_valid(self):
+-        with self.assertRaises(etree.DocumentInvalid):
+-            ra.xml._metadata_xml_to_dom(
+-                """
+-                    <resource-agent>
+-                        <version>1.0</version>
+-                    </resource-agent>
+-                """
+-            )
++        # pylint: disable=no-self-use
++        metadata = """
++            <resource-agent>
++                <version>1.0</version>
++            </resource-agent>
++        """
++        assert_xml_equal(
++            metadata, etree_to_str(ra.xml._metadata_xml_to_dom(metadata))
++        )
+ 
+     def test_ocf_1_0_valid(self):
+         # pylint: disable=no-self-use
+@@ -273,19 +279,16 @@ class LoadMetadata(TestCase):
+ 
+     def test_not_valid_xml(self):
+         agent_name = ra.ResourceAgentName("ocf", "pacemaker", "Dummy")
++        metadata = "<resource-agent/>"
+         self.config.runner.pcmk.load_agent(
+             agent_name="ocf:pacemaker:Dummy",
+-            stdout="<resource-agent/>",
++            stdout=metadata,
+         )
+ 
+         env = self.env_assist.get_env()
+-        with self.assertRaises(ra.UnableToGetAgentMetadata) as cm:
+-            ra.xml.load_metadata(env.cmd_runner(), agent_name)
+-        self.assertEqual(cm.exception.agent_name, "ocf:pacemaker:Dummy")
+-        self.assertTrue(
+-            cm.exception.message.startswith(
+-                "Element resource-agent failed to validate"
+-            )
++        assert_xml_equal(
++            metadata,
++            etree_to_str(ra.xml.load_metadata(env.cmd_runner(), agent_name)),
+         )
+ 
+ 
+@@ -335,16 +338,15 @@ class LoadFakeAgentMetadata(TestCase):
+ 
+     def test_not_valid_xml(self):
+         agent_name = ra.const.PACEMAKER_FENCED
+-        self.config.runner.pcmk.load_fenced_metadata(stdout="<resource-agent/>")
++        metadata = "<resource-agent/>"
++        self.config.runner.pcmk.load_fenced_metadata(stdout=metadata)
+ 
+         env = self.env_assist.get_env()
+-        with self.assertRaises(ra.UnableToGetAgentMetadata) as cm:
+-            ra.xml.load_fake_agent_metadata(env.cmd_runner(), agent_name)
+-        self.assertEqual(cm.exception.agent_name, "pacemaker-fenced")
+-        self.assertTrue(
+-            cm.exception.message.startswith(
+-                "Element resource-agent failed to validate"
+-            )
++        assert_xml_equal(
++            metadata,
++            etree_to_str(
++                ra.xml.load_fake_agent_metadata(env.cmd_runner(), agent_name)
++            ),
+         )
+ 
+ 
+@@ -549,19 +551,37 @@ class ParseOcf10BaseMixin(ParseOcfToolsMixin):
+         )
+ 
+     def test_parameters_empty_parameter(self):
+-        # parameters must have at least 'name' attribute
+-        with self.assertRaises(ra.UnableToGetAgentMetadata):
+-            self.parse(
+-                self.xml(
+-                    """
+-                        <resource-agent>
+-                            <parameters>
+-                                <parameter/>
+-                            </parameters>
+-                        </resource-agent>
+-                    """
+-                )
+-            )
++        self.assert_parse_result(
++            self.xml(
++                """
++                    <resource-agent>
++                        <parameters>
++                            <parameter/>
++                        </parameters>
++                    </resource-agent>
++                """
++            ),
++            ResourceAgentMetadataOcf1_0(
++                self.agent_name,
++                shortdesc=None,
++                longdesc=None,
++                parameters=[
++                    ResourceAgentParameterOcf1_0(
++                        name="",
++                        shortdesc=None,
++                        longdesc=None,
++                        type="string",
++                        default=None,
++                        enum_values=None,
++                        required=None,
++                        deprecated=None,
++                        obsoletes=None,
++                        unique=None,
++                    )
++                ],
++                actions=[],
++            ),
++        )
+ 
+     def test_parameters_minimal(self):
+         self.assert_parse_result(
+@@ -708,19 +728,35 @@ class ParseOcf10BaseMixin(ParseOcfToolsMixin):
+         )
+ 
+     def test_actions_empty_action(self):
+-        # actions must have at least 'name' attribute
+-        with self.assertRaises(ra.UnableToGetAgentMetadata):
+-            self.parse(
+-                self.xml(
+-                    """
+-                        <resource-agent>
+-                            <actions>
+-                                <action/>
+-                            </actions>
+-                        </resource-agent>
+-                    """
+-                )
+-            )
++        self.assert_parse_result(
++            self.xml(
++                """
++                    <resource-agent>
++                        <actions>
++                            <action/>
++                        </actions>
++                    </resource-agent>
++                """
++            ),
++            ResourceAgentMetadataOcf1_0(
++                self.agent_name,
++                shortdesc=None,
++                longdesc=None,
++                parameters=[],
++                actions=[
++                    ResourceAgentActionOcf1_0(
++                        name="",
++                        timeout=None,
++                        interval=None,
++                        role=None,
++                        start_delay=None,
++                        depth=None,
++                        automatic=None,
++                        on_target=None,
++                    ),
++                ],
++            ),
++        )
+ 
+     def test_actions_multiple(self):
+         self.assert_parse_result(
+@@ -787,21 +823,6 @@ class ParseOcf10NoVersion(ParseOcf10BaseMixin, TestCase):
+ class ParseOcf10UnsupportedVersion(ParseOcf10BaseMixin, TestCase):
+     ocf_version = "0.1.2"
+ 
+-    # These tests test that pcs raises an error if an agent doesn't conform to
+-    # OCF schema. There is, however, no validation against OCF schema for
+-    # agents with unsupported OCF version. That means no error message, pcs
+-    # tries to process the agent and crashes. However bad that sounds, it's
+-    # indended as that's how pcs behaved before OCF 1.1 was implemented.
+-    # There's therefore no point in running these tests.
+-
+-    def test_parameters_empty_parameter(self):
+-        # parameters must have at least 'name' attribute
+-        pass
+-
+-    def test_actions_empty_action(self):
+-        # actions must have at least 'name' attribute
+-        pass
+-
+ 
+ class ParseOcf10ExplicitVersion(ParseOcf10BaseMixin, TestCase):
+     ocf_version = "1.0"
+-- 
+2.34.1
+
diff --git a/SOURCES/do-not-support-cluster-setup-with-udp-u-transport.patch b/SOURCES/do-not-support-cluster-setup-with-udp-u-transport.patch
index a23583a..a25b509 100644
--- a/SOURCES/do-not-support-cluster-setup-with-udp-u-transport.patch
+++ b/SOURCES/do-not-support-cluster-setup-with-udp-u-transport.patch
@@ -1,7 +1,7 @@
-From e46d60cb36cb8ca4b153f75caa20b165945b1d26 Mon Sep 17 00:00:00 2001
+From f7230b92c946add84ed6072c20a4df5d97c77de2 Mon Sep 17 00:00:00 2001
 From: Ivan Devat <idevat@redhat.com>
 Date: Tue, 20 Nov 2018 15:03:56 +0100
-Subject: [PATCH 2/2] do not support cluster setup with udp(u) transport
+Subject: [PATCH 3/3] do not support cluster setup with udp(u) transport
 
 ---
  pcs/pcs.8.in              | 2 ++
@@ -10,10 +10,10 @@ Subject: [PATCH 2/2] do not support cluster setup with udp(u) transport
  3 files changed, 6 insertions(+)
 
 diff --git a/pcs/pcs.8.in b/pcs/pcs.8.in
-index 1695d75c..80d165fc 100644
+index 05ee66db..101d66f7 100644
 --- a/pcs/pcs.8.in
 +++ b/pcs/pcs.8.in
-@@ -429,6 +429,8 @@ By default, encryption is enabled with cipher=aes256 and hash=sha256. To disable
+@@ -436,6 +436,8 @@ By default, encryption is enabled with cipher=aes256 and hash=sha256. To disable
  
  Transports udp and udpu:
  .br
@@ -23,10 +23,10 @@ index 1695d75c..80d165fc 100644
  .br
  Transport options are: ip_version, netmtu
 diff --git a/pcs/usage.py b/pcs/usage.py
-index 66e097f1..783d926d 100644
+index 78bb5ee7..b6f3dd10 100644
 --- a/pcs/usage.py
 +++ b/pcs/usage.py
-@@ -872,6 +872,7 @@ Commands:
+@@ -890,6 +890,7 @@ Commands:
              hash=sha256. To disable encryption, set cipher=none and hash=none.
  
          Transports udp and udpu:
@@ -49,5 +49,5 @@ index 2f26e831..a7702ac4 100644
  #csetup-transport-options.knet .without-knet
  {
 -- 
-2.31.1
+2.34.1
 
diff --git a/SOURCES/simplify-ternar-expression.patch b/SOURCES/simplify-ternar-expression.patch
new file mode 100644
index 0000000..0835fbd
--- /dev/null
+++ b/SOURCES/simplify-ternar-expression.patch
@@ -0,0 +1,26 @@
+From f44cdc871a39da3960bd04565b4d1d5ffa19bd23 Mon Sep 17 00:00:00 2001
+From: Ivan Devat <idevat@redhat.com>
+Date: Thu, 20 Jan 2022 13:32:49 +0100
+Subject: [PATCH 1/2] simplify ternar expression
+
+The motivation for this is that covscan complains about it.
+---
+ src/app/view/share/useUrlTabs.ts | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/src/app/view/share/useUrlTabs.ts b/src/app/view/share/useUrlTabs.ts
+index 7278dad8..a1136bf3 100644
+--- a/src/app/view/share/useUrlTabs.ts
++++ b/src/app/view/share/useUrlTabs.ts
+@@ -13,7 +13,7 @@ export const useUrlTabs = <TABS extends ReadonlyArray<string>>(
+ 
+   return {
+     currentTab,
+-    matchedContext: tab !== null ? tab.matched : `/${defaultTab}`,
++    matchedContext: tab?.matched ?? `/${defaultTab}`,
+     tabList,
+   };
+ };
+-- 
+2.31.1
+
diff --git a/SPECS/pcs.spec b/SPECS/pcs.spec
index d43ce81..e5441aa 100644
--- a/SPECS/pcs.spec
+++ b/SPECS/pcs.spec
@@ -1,6 +1,6 @@
 Name: pcs
-Version: 0.10.10
-Release: 4%{?dist}.1
+Version: 0.10.12
+Release: 6%{?dist}
 # https://docs.fedoraproject.org/en-US/packaging-guidelines/LicensingGuidelines/
 # https://fedoraproject.org/wiki/Licensing:Main?rd=Licensing#Good_Licenses
 # GPLv2: pcs
@@ -20,13 +20,13 @@ Summary: Pacemaker Configuration System
 ExclusiveArch: i686 x86_64 s390x ppc64le aarch64
 
 %global version_or_commit %{version}
-# %%global version_or_commit %%{version}.210-9862
+# %%global version_or_commit %%{version}.22-9d83b
 
 %global pcs_source_name %{name}-%{version_or_commit}
 
 # ui_commit can be determined by hash, tag or branch
-%global ui_commit 0.1.7
-%global ui_modules_version 0.1.7
+%global ui_commit 0.1.12
+%global ui_modules_version 0.1.12
 %global ui_src_name pcs-web-ui-%{ui_commit}
 
 %global pcs_snmp_pkg_name  pcs-snmp
@@ -115,16 +115,26 @@ Source101: https://github.com/ClusterLabs/pcs-web-ui/releases/download/%{ui_modu
 # They should come before downstream patches to avoid unnecessary conflicts.
 # Z-streams are exception here: they can come from upstream but should be
 # applied at the end to keep z-stream changes as straightforward as possible.
+
+# pcs patches: <= 200
 # Patch1: bzNUMBER-01-name.patch
-Patch1: bz1998454-01-fix-creating-resources-with-depth-operation-attribut.patch
-Patch2: add-missing-file-test_stonith_update_scsi_devices.py.patch
-Patch3: bz1992668-01-add-add-remove-syntax-for-command-pcs-stonith-update.patch
-Patch4: bz1991654-01-fix-unfencing-in-pcs-stonith-update-scsi-devices.patch
+Patch1: bz2028902-01-fix-enabling-corosync-qdevice.patch
+Patch2: bz1384485-01-fix-rsc-update-cmd-when-unable-to-get-agent-metadata.patch
+Patch3: bz2032997-01-skip-checking-of-scsi-devices-to-be-removed.patch
+Patch4: bz2036633-01-Make-ocf-linbit-drbd-agent-pass-OCF-validation.patch
+Patch5: bz1990784-01-Multiple-fixes-of-pcs-resource-move-autodelete-comma.patch
+Patch6: bz2022463-01-fix-creating-empty-cib.patch
+Patch7: bz2047983-01-Fix-snmp-client.patch
+Patch8: bz2050274-01-process-invalid-OCF-agents-the-same-way-as-before.patch
+Patch9: bz2050274-02-relax-OCF-1.0-parser.patch
 
 # Downstream patches do not come from upstream. They adapt pcs for specific
 # RHEL needs.
 Patch101: do-not-support-cluster-setup-with-udp-u-transport.patch
-Patch102: bz2042433-01-fix-creating-empty-cib.patch
+
+# ui patches: >200
+Patch201: simplify-ternar-expression.patch
+Patch202: bz2044409-01-fix-backend-parameter-all-in-cluster-destroy.patch
 
 # git for patches
 BuildRequires: git-core
@@ -266,8 +276,6 @@ Provides: bundled(pyagentx) = %{pyagentx_version}
 SNMP agent that provides information about pacemaker cluster to the master agent (snmpd)
 
 %prep
-%autosetup -p1 -S git -n %{pcs_source_name}
-
 # -- following is inspired by python-simplejon.el5 --
 # Update timestamps on the files touched by a patch, to avoid non-equal
 # .pyc/.pyo files across the multilib peers within a build
@@ -307,18 +315,32 @@ update_times_patch(){
   update_times ${patch_file_name} `diffstat -p1 -l ${patch_file_name}`
 }
 
+# documentation for setup/autosetup/autopatch:
+#   * http://ftp.rpm.org/max-rpm/s1-rpm-inside-macros.html
+#   * https://rpm-software-management.github.io/rpm/manual/autosetup.html
+# patch web-ui sources
+%autosetup -D -T -b 100 -a 101 -S git -n %{ui_src_name} -N
+%autopatch -p1 -m 201
+# update_times_patch %%{PATCH201}
+update_times_patch %{PATCH201}
+update_times_patch %{PATCH202}
+
+# patch pcs sources
+%autosetup -S git -n %{pcs_source_name} -N
+%autopatch -p1 -M 200
+
 update_times_patch %{PATCH1}
 update_times_patch %{PATCH2}
 update_times_patch %{PATCH3}
 update_times_patch %{PATCH4}
+update_times_patch %{PATCH5}
+update_times_patch %{PATCH6}
+update_times_patch %{PATCH7}
+update_times_patch %{PATCH8}
+update_times_patch %{PATCH9}
 update_times_patch %{PATCH101}
-update_times_patch %{PATCH102}
 
 cp -f %SOURCE1 %{pcsd_public_dir}/images
-# prepare dirs/files necessary for building web ui
-# inside SOURCE100 is only directory %%{ui_src_name}
-tar -xzf %SOURCE100 -C %{pcsd_public_dir}
-tar -xf %SOURCE101 -C %{pcsd_public_dir}/%{ui_src_name}
 
 # prepare dirs/files necessary for building all bundles
 # -----------------------------------------------------
@@ -357,19 +379,20 @@ cp -f %SOURCE45 rpm/
 %define debug_package %{nil}
 
 ./autogen.sh
-%{configure} --enable-local-build --enable-use-local-cache-only --enable-individual-bundling PYTHON=%{__python3}
+%{configure} --enable-local-build --enable-use-local-cache-only --enable-individual-bundling PYTHON=%{__python3} ruby_CFLAGS="%{optflags}" ruby_LIBS="%{build_ldflags}"
 make all
 
+# build pcs-web-ui
+make -C %{_builddir}/%{ui_src_name} build BUILD_USE_EXISTING_NODE_MODULES=true
+
 %install
 rm -rf $RPM_BUILD_ROOT
 pwd
 
 %make_install
 
-# build web ui and put it to pcsd
-make -C %{pcsd_public_dir}/%{ui_src_name} build
-mv %{pcsd_public_dir}/%{ui_src_name}/build  ${RPM_BUILD_ROOT}%{_libdir}/%{pcsd_public_dir}/ui
-rm -r %{pcsd_public_dir}/%{ui_src_name}
+# something like make install for pcs-web-ui
+cp -r %{_builddir}/%{ui_src_name}/build  ${RPM_BUILD_ROOT}%{_libdir}/%{pcsd_public_dir}/ui
 
 # prepare license files
 # some rubygems do not have a license file (ruby2_keywords, thin)
@@ -429,16 +452,6 @@ rm -rf $RPM_BUILD_ROOT%{_libdir}/%{rubygem_bundle_dir}/gems/json-%{version_rubyg
 rm -rf $RPM_BUILD_ROOT%{_libdir}/%{rubygem_bundle_dir}/gems/thin-%{version_rubygem_thin}/ext
 
 %check
-# In the building environment LC_CTYPE is set to C which causes tests to fail
-# due to python prints a warning about it to stderr. The following environment
-# variable disables the warning.
-# On the live system either UTF8 locale is set or the warning is emmited
-# which breaks pcs. That is the correct behavior since with wrong locales it
-# would be probably broken anyway.
-# The main concern here is to make the tests pass.
-# See https://fedoraproject.org/wiki/Changes/python3_c.utf-8_locale for details.
-export PYTHONCOERCECLOCALE=0
-
 run_all_tests(){
   #run pcs tests
 
@@ -450,7 +463,6 @@ run_all_tests(){
   #   TODO: Investigate the issue
 
     %{__python3} pcs_test/suite --tier0 -v --vanilla --all-but \
-    pcs_test.tier0.lib.commands.test_resource_agent.DescribeAgentUtf8.test_describe \
     pcs_test.tier0.daemon.app.test_app_remote.SyncConfigMutualExclusive.test_get_not_locked \
     pcs_test.tier0.daemon.app.test_app_remote.SyncConfigMutualExclusive.test_post_not_locked \
 
@@ -575,17 +587,47 @@ remove_all_tests
 %license pyagentx_LICENSE.txt
 
 %changelog
-* Wed Jan 26 2022 Miroslav Lisik <mlisik@redhat.com> - 0.10.10-4.el8_5.1
+* Fri Feb 11 2022 Miroslav Lisik <mlisik@redhat.com> - 0.10.12-6
+- Fixed processing agents not conforming to OCF schema
+- Resolves: rhbz#2050274
+
+* Tue Feb 01 2022 Miroslav Lisik <mlisik@redhat.com> - 0.10.12-5
+- Fixed snmp client
+- Resolves: rhbz#2047983
+
+* Tue Jan 25 2022 Miroslav Lisik <mlisik@redhat.com> - 0.10.12-4
+- Fixed cluster destroy in web ui
+- Fixed covscan issue in web ui
+- Resolves: rhbz#1970508
+
+* Fri Jan 14 2022 Miroslav Lisik <mlisik@redhat.com> - 0.10.12-3
+- Fixed 'pcs resource move --autodelete' command
+- Fixed removing of unavailable fence-scsi storage device
+- Fixed ocf validation of ocf linbit drdb agent
 - Fixed creating empty cib
-- Resolves: rhbz#bz2042433
+- Updated pcs-web-ui
+- Resolves: rhbz#1990784 rhbz#2022463 rhbz#2032997 rhbz#2036633
 
-* Fri Sep 24 2021 Miroslav Lisik <mlisik@redhat.com> - 0.10.10-4
-- Fixed unfencing in `pcs stonith update-scsi-devices`
-- Resolves: rhbz#bz1991654
+* Wed Dec 15 2021 Miroslav Lisik <mlisik@redhat.com> - 0.10.12-2
+- Fixed rsc update cmd when unable to get agent metadata
+- Fixed enabling corosync-qdevice
+- Resolves: rhbz#1384485 rhbz#2028902
 
-* Fri Sep 10 2021 Miroslav Lisik <mlisik@redhat.com> - 0.10.10-3
-- Added add/remove syntax for command `pcs stonith update-scsi-devices`
-- Resolves: rhbz#1992668
+* Thu Dec 02 2021 Miroslav Lisik <mlisik@redhat.com> - 0.10.12-1
+- Rebased to latest upstream sources (see CHANGELOG.md)
+- Updated pcs-web-ui
+- Resolves: rhbz#1552470 rhbz#1997011 rhbz#2017311 rhbz#2017312 rhbz#2024543 rhbz#2012128
+
+* Mon Nov 22 2021 Miroslav Lisik <mlisik@redhat.com> - 0.10.11-2
+- Rebased to latest upstream sources (see CHANGELOG.md)
+- Removed 'export PYTHONCOERCECLOCALE=0'
+- Resolves: rhbz#1384485 rhbz#1936833 rhbz#1968088 rhbz#1990784 rhbz#2012128
+
+* Mon Nov 01 2021 Miroslav Lisik <mlisik@redhat.com> - 0.10.11-1
+- Rebased to latest upstream sources (see CHANGELOG.md)
+- Updated pcs-web-ui
+- Enabled wui patching
+- Resolves: rhbz#1533090 rhbz#1970508 rhbz#1997011 rhbz#2003066 rhbz#2003068 rhbz#2012128
 
 * Fri Aug 27 2021 Miroslav Lisik <mlisik@redhat.com> - 0.10.10-2
 - Fixed create resources with depth operation attribute