672796
From db91fd68f1baa7b19f06dc8156822430decce4e7 Mon Sep 17 00:00:00 2001
672796
From: Miroslav Lisik <mlisik@redhat.com>
672796
Date: Thu, 2 Sep 2021 10:29:59 +0200
672796
Subject: [PATCH 1/2] update
672796
672796
---
672796
 Makefile.am                                   |   9 +-
672796
 configure.ac                                  |   5 +
672796
 pcs/config.py                                 |  13 +-
672796
 pcs/lib/communication/corosync.py             |   8 +-
672796
 pcs/utils.py                                  |   4 +-
672796
 pcs_test/suite.py                             |  70 ++
672796
 .../cluster/test_add_nodes_validation.py      |  18 +-
672796
 .../test_stonith_update_scsi_devices.py       |  11 +-
672796
 pcs_test/tier0/lib/test_env_corosync.py       | 618 ++++++++--------
672796
 pcs_test/tier1/legacy/test_constraints.py     |  76 +-
672796
 pcs_test/tier1/legacy/test_resource.py        |  48 +-
672796
 pcs_test/tier1/legacy/test_stonith.py         |  71 +-
672796
 .../tools/command_env/config_http_corosync.py |  23 +-
672796
 pcs_test/tools/fixture_cib.py                 |  65 ++
672796
 pcsd/Makefile.am                              |   1 -
672796
 pcsd/capabilities.xml                         |   7 -
672796
 pcsd/fenceagent.rb                            |  59 --
672796
 pcsd/pcs.rb                                   |  15 -
672796
 pcsd/pcsd.rb                                  | 671 +-----------------
672796
 pcsd/remote.rb                                | 559 +--------------
672796
 pcsd/resource.rb                              |   3 -
672796
 pcsd/rserver.rb                               |   1 -
672796
 pcsd/test/test_resource.rb                    |   4 -
672796
 23 files changed, 634 insertions(+), 1725 deletions(-)
672796
 delete mode 100644 pcsd/fenceagent.rb
672796
672796
diff --git a/Makefile.am b/Makefile.am
672796
index 6aede970..34692969 100644
672796
--- a/Makefile.am
672796
+++ b/Makefile.am
672796
@@ -188,8 +188,13 @@ endif
672796
 
672796
 pylint:
672796
 if DEV_TESTS
672796
+if PARALLEL_PYLINT
672796
+pylint_options = --jobs=0
672796
+else
672796
+pylint_options = 
672796
+endif
672796
 	export PYTHONPATH=${abs_top_builddir}/${PCS_BUNDLED_DIR_LOCAL}/packages && \
672796
-		$(TIME) $(PYTHON) -m pylint --rcfile pylintrc --persistent=n --reports=n --score=n --disable similarities ${PCS_PYTHON_PACKAGES}
672796
+		$(TIME) $(PYTHON) -m pylint --rcfile pylintrc --persistent=n --reports=n --score=n --disable similarities ${pylint_options} ${PCS_PYTHON_PACKAGES}
672796
 endif
672796
 
672796
 
672796
@@ -213,7 +218,7 @@ endif
672796
 
672796
 tests_tier0:
672796
 	export PYTHONPATH=${abs_top_builddir}/${PCS_BUNDLED_DIR_LOCAL}/packages && \
672796
-		$(PYTHON) ${abs_builddir}/pcs_test/suite.py $(python_test_options) --tier0
672796
+		$(PYTHON) ${abs_builddir}/pcs_test/suite.py ${python_test_options} --tier0
672796
 
672796
 tests_tier1:
672796
 if EXECUTE_TIER1_TESTS
672796
diff --git a/configure.ac b/configure.ac
672796
index f7b9d1ad..75d65616 100644
672796
--- a/configure.ac
672796
+++ b/configure.ac
672796
@@ -148,6 +148,11 @@ AC_ARG_ENABLE([parallel-tests],
672796
 	      [parallel_tests="yes"])
672796
 AM_CONDITIONAL([PARALLEL_TESTS], [test "x$parallel_tests" = "xyes"])
672796
 
672796
+AC_ARG_ENABLE([parallel-pylint],
672796
+	      [AS_HELP_STRING([--enable-parallel-pylint], [Enable running pylint in multiple threads (default: no)])],
672796
+	      [parallel_pylint="yes"])
672796
+AM_CONDITIONAL([PARALLEL_PYLINT], [test "x$parallel_pylint" = "xyes"])
672796
+
672796
 AC_ARG_ENABLE([local-build],
672796
 	      [AS_HELP_STRING([--enable-local-build], [Download and install all dependencies as user / bundles])],
672796
 	      [local_build="yes"])
672796
diff --git a/pcs/config.py b/pcs/config.py
672796
index a0290499..a3e7e164 100644
672796
--- a/pcs/config.py
672796
+++ b/pcs/config.py
672796
@@ -345,12 +345,13 @@ def config_restore_remote(infile_name, infile_obj):
672796
                 err_msgs.append(output)
672796
                 continue
672796
             _status = json.loads(output)
672796
-            if (
672796
-                _status["corosync"]
672796
-                or _status["pacemaker"]
672796
-                or
672796
-                # not supported by older pcsd, do not fail if not present
672796
-                _status.get("pacemaker_remote", False)
672796
+            if any(
672796
+                _status["node"]["services"][service_name]["running"]
672796
+                for service_name in (
672796
+                    "corosync",
672796
+                    "pacemaker",
672796
+                    "pacemaker_remote",
672796
+                )
672796
             ):
672796
                 err_msgs.append(
672796
                     "Cluster is currently running on node %s. You need to stop "
672796
diff --git a/pcs/lib/communication/corosync.py b/pcs/lib/communication/corosync.py
672796
index fab8e38f..e2a2949c 100644
672796
--- a/pcs/lib/communication/corosync.py
672796
+++ b/pcs/lib/communication/corosync.py
672796
@@ -28,7 +28,7 @@ class CheckCorosyncOffline(
672796
             self._set_skip_offline(skip_offline_targets)
672796
 
672796
     def _get_request_data(self):
672796
-        return RequestData("remote/status")
672796
+        return RequestData("remote/status", [("version", "2")])
672796
 
672796
     def _process_response(self, response):
672796
         report_item = self._get_response_report(response)
672796
@@ -53,7 +53,7 @@ class CheckCorosyncOffline(
672796
             return
672796
         try:
672796
             status = response.data
672796
-            if not json.loads(status)["corosync"]:
672796
+            if not json.loads(status)["node"]["corosync"]:
672796
                 report_item = ReportItem.info(
672796
                     reports.messages.CorosyncNotRunningOnNode(node_label),
672796
                 )
672796
@@ -94,7 +94,7 @@ class GetCorosyncOnlineTargets(
672796
         self._corosync_online_target_list = []
672796
 
672796
     def _get_request_data(self):
672796
-        return RequestData("remote/status")
672796
+        return RequestData("remote/status", [("version", "2")])
672796
 
672796
     def _process_response(self, response):
672796
         report_item = self._get_response_report(response)
672796
@@ -103,7 +103,7 @@ class GetCorosyncOnlineTargets(
672796
             return
672796
         try:
672796
             status = response.data
672796
-            if json.loads(status)["corosync"]:
672796
+            if json.loads(status)["node"]["corosync"]:
672796
                 self._corosync_online_target_list.append(
672796
                     response.request.target
672796
                 )
672796
diff --git a/pcs/utils.py b/pcs/utils.py
672796
index ef778b52..7774016e 100644
672796
--- a/pcs/utils.py
672796
+++ b/pcs/utils.py
672796
@@ -186,7 +186,9 @@ def checkStatus(node):
672796
     Commandline options:
672796
       * --request-timeout - timeout for HTTP requests
672796
     """
672796
-    return sendHTTPRequest(node, "remote/status", None, False, False)
672796
+    return sendHTTPRequest(
672796
+        node, "remote/status", urlencode({"version": "2"}), False, False
672796
+    )
672796
 
672796
 
672796
 # Check and see if we're authorized (faster than a status check)
672796
diff --git a/pcs_test/suite.py b/pcs_test/suite.py
672796
index 75ab66cd..bd98b8b0 100644
672796
--- a/pcs_test/suite.py
672796
+++ b/pcs_test/suite.py
672796
@@ -1,6 +1,8 @@
672796
 import importlib
672796
 import os
672796
 import sys
672796
+from threading import Thread
672796
+import time
672796
 import unittest
672796
 
672796
 try:
672796
@@ -84,6 +86,67 @@ def discover_tests(
672796
     return unittest.TestLoader().loadTestsFromNames(explicitly_enumerated_tests)
672796
 
672796
 
672796
+def tier1_fixtures_needed(test_list):
672796
+    for test_name in tests_from_suite(test_list):
672796
+        if test_name.startswith("pcs_test.tier1.legacy."):
672796
+            return True
672796
+    return False
672796
+
672796
+
672796
+def run_tier1_fixtures(run_concurrently=True):
672796
+    # pylint: disable=import-outside-toplevel
672796
+    from pcs_test.tier1.legacy.test_constraints import (
672796
+        CONSTRAINT_TEST_CIB_FIXTURE,
672796
+    )
672796
+    from pcs_test.tier1.legacy.test_resource import RESOURCE_TEST_CIB_FIXTURE
672796
+    from pcs_test.tier1.legacy.test_stonith import (
672796
+        STONITH_LEVEL_TEST_CIB_FIXTURE,
672796
+    )
672796
+
672796
+    fixture_instances = [
672796
+        CONSTRAINT_TEST_CIB_FIXTURE,
672796
+        RESOURCE_TEST_CIB_FIXTURE,
672796
+        STONITH_LEVEL_TEST_CIB_FIXTURE,
672796
+    ]
672796
+    print("Preparing tier1 fixtures...")
672796
+    time_start = time.time()
672796
+    if run_concurrently:
672796
+        thread_list = []
672796
+        for instance in fixture_instances:
672796
+            thread = Thread(target=instance.set_up)
672796
+            thread.daemon = True
672796
+            thread.start()
672796
+            thread_list.append(thread)
672796
+        timeout_counter = 30  # 30 * 10s = 5min
672796
+        while thread_list:
672796
+            if timeout_counter < 0:
672796
+                raise AssertionError("Fixture threads seem to be stuck :(")
672796
+            for thread in thread_list:
672796
+                thread.join(timeout=10)
672796
+                sys.stdout.write(".")
672796
+                sys.stdout.flush()
672796
+                timeout_counter -= 1
672796
+                if not thread.is_alive():
672796
+                    thread_list.remove(thread)
672796
+                    continue
672796
+
672796
+    else:
672796
+        for instance in fixture_instances:
672796
+            instance.set_up()
672796
+    time_stop = time.time()
672796
+    time_taken = time_stop - time_start
672796
+    sys.stdout.write("Tier1 fixtures prepared in %.3fs\n" % (time_taken))
672796
+    sys.stdout.flush()
672796
+
672796
+    def cleanup():
672796
+        print("Cleaning tier1 fixtures...", end=" ")
672796
+        for instance in fixture_instances:
672796
+            instance.clean_up()
672796
+        print("done")
672796
+
672796
+    return cleanup
672796
+
672796
+
672796
 def main():
672796
     # pylint: disable=import-outside-toplevel
672796
     if "BUNDLED_LIB_LOCATION" in os.environ:
672796
@@ -141,6 +204,11 @@ def main():
672796
         sys.exit()
672796
 
672796
     tests_to_run = discovered_tests
672796
+    tier1_fixtures_cleanup = None
672796
+    if tier1_fixtures_needed(tests_to_run):
672796
+        tier1_fixtures_cleanup = run_tier1_fixtures(
672796
+            run_concurrently=run_concurrently
672796
+        )
672796
     if run_concurrently:
672796
         tests_to_run = ConcurrentTestSuite(
672796
             discovered_tests,
672796
@@ -174,6 +242,8 @@ def main():
672796
         verbosity=2 if "-v" in sys.argv else 1, resultclass=ResultClass
672796
     )
672796
     test_result = test_runner.run(tests_to_run)
672796
+    if tier1_fixtures_cleanup:
672796
+        tier1_fixtures_cleanup()
672796
     if not test_result.wasSuccessful():
672796
         sys.exit(1)
672796
 
672796
diff --git a/pcs_test/tier0/lib/commands/cluster/test_add_nodes_validation.py b/pcs_test/tier0/lib/commands/cluster/test_add_nodes_validation.py
672796
index c66a5dff..69cdeed2 100644
672796
--- a/pcs_test/tier0/lib/commands/cluster/test_add_nodes_validation.py
672796
+++ b/pcs_test/tier0/lib/commands/cluster/test_add_nodes_validation.py
672796
@@ -14,6 +14,9 @@ from pcs_test.tier0.lib.commands.cluster.test_add_nodes import (
672796
 )
672796
 from pcs_test.tools import fixture
672796
 from pcs_test.tools.command_env import get_env_tools
672796
+from pcs_test.tools.command_env.config_http_corosync import (
672796
+    corosync_running_check_response,
672796
+)
672796
 from pcs_test.tools.custom_mock import patch_getaddrinfo
672796
 
672796
 from pcs import settings
672796
@@ -1170,7 +1173,10 @@ class ClusterStatus(TestCase):
672796
             .local.read_sbd_config(name_sufix="_2")
672796
             .http.corosync.check_corosync_offline(
672796
                 communication_list=[
672796
-                    {"label": "node1", "output": '{"corosync":true}'},
672796
+                    {
672796
+                        "label": "node1",
672796
+                        "output": corosync_running_check_response(True),
672796
+                    },
672796
                     {"label": "node2", "output": "an error"},
672796
                     {
672796
                         "label": "node3",
672796
@@ -1178,8 +1184,14 @@ class ClusterStatus(TestCase):
672796
                         "errno": 7,
672796
                         "error_msg": "an error",
672796
                     },
672796
-                    {"label": "node4", "output": '{"corosync":true}'},
672796
-                    {"label": "node5", "output": '{"corosync":false}'},
672796
+                    {
672796
+                        "label": "node4",
672796
+                        "output": corosync_running_check_response(True),
672796
+                    },
672796
+                    {
672796
+                        "label": "node5",
672796
+                        "output": corosync_running_check_response(False),
672796
+                    },
672796
                 ]
672796
             )
672796
             .local.get_host_info(new_nodes)
672796
diff --git a/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py b/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py
672796
index 3bc51325..593757d8 100644
672796
--- a/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py
672796
+++ b/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py
672796
@@ -4,6 +4,9 @@ from unittest import mock, TestCase
672796
 
672796
 from pcs_test.tools import fixture
672796
 from pcs_test.tools.command_env import get_env_tools
672796
+from pcs_test.tools.command_env.config_http_corosync import (
672796
+    corosync_running_check_response,
672796
+)
672796
 from pcs_test.tools.misc import get_test_resource as rc
672796
 
672796
 from pcs import settings
672796
@@ -1013,7 +1016,7 @@ class TestUpdateScsiDevicesFailures(TestCase):
672796
             communication_list=[
672796
                 dict(
672796
                     label=self.existing_nodes[0],
672796
-                    output='{"corosync":true}',
672796
+                    output=corosync_running_check_response(True),
672796
                 ),
672796
             ]
672796
             + [
672796
@@ -1052,11 +1055,11 @@ class TestUpdateScsiDevicesFailures(TestCase):
672796
             communication_list=[
672796
                 dict(
672796
                     label=self.existing_nodes[0],
672796
-                    output='{"corosync":true}',
672796
+                    output=corosync_running_check_response(True),
672796
                 ),
672796
                 dict(
672796
                     label=self.existing_nodes[1],
672796
-                    output='{"corosync":false}',
672796
+                    output=corosync_running_check_response(False),
672796
                 ),
672796
                 dict(
672796
                     label=self.existing_nodes[2],
672796
@@ -1122,7 +1125,7 @@ class TestUpdateScsiDevicesFailures(TestCase):
672796
                 ),
672796
                 dict(
672796
                     label=self.existing_nodes[2],
672796
-                    output='{"corosync":false}',
672796
+                    output=corosync_running_check_response(False),
672796
                 ),
672796
             ]
672796
         )
672796
diff --git a/pcs_test/tier0/lib/test_env_corosync.py b/pcs_test/tier0/lib/test_env_corosync.py
672796
index dafc63a0..7063ee80 100644
672796
--- a/pcs_test/tier0/lib/test_env_corosync.py
672796
+++ b/pcs_test/tier0/lib/test_env_corosync.py
672796
@@ -14,6 +14,9 @@ from pcs.lib.corosync.config_parser import (
672796
 from pcs_test.tools import fixture
672796
 from pcs_test.tools.assertions import assert_raise_library_error
672796
 from pcs_test.tools.command_env import get_env_tools
672796
+from pcs_test.tools.command_env.config_http_corosync import (
672796
+    corosync_running_check_response,
672796
+)
672796
 
672796
 
672796
 class PushCorosyncConfLiveBase(TestCase):
672796
@@ -92,12 +95,11 @@ class PushCorosyncConfLiveNoQdeviceTest(PushCorosyncConfLiveBase):
672796
         )
672796
 
672796
     def test_dont_need_stopped_cluster(self):
672796
-        (
672796
-            self.config.http.corosync.set_corosync_conf(
672796
-                self.corosync_conf_text, node_labels=self.node_labels
672796
-            ).http.corosync.reload_corosync_conf(
672796
-                node_labels=self.node_labels[:1]
672796
-            )
672796
+        self.config.http.corosync.set_corosync_conf(
672796
+            self.corosync_conf_text, node_labels=self.node_labels
672796
+        )
672796
+        self.config.http.corosync.reload_corosync_conf(
672796
+            node_labels=self.node_labels[:1]
672796
         )
672796
         self.env_assistant.get_env().push_corosync_conf(
672796
             self.corosync_conf_facade
672796
@@ -114,26 +116,19 @@ class PushCorosyncConfLiveNoQdeviceTest(PushCorosyncConfLiveBase):
672796
                     node="node-2",
672796
                 ),
672796
                 fixture.info(
672796
-                    report_codes.COROSYNC_CONFIG_RELOADED, node="node-1"
672796
+                    report_codes.COROSYNC_CONFIG_RELOADED,
672796
+                    node="node-1",
672796
                 ),
672796
             ]
672796
         )
672796
 
672796
     def test_dont_need_stopped_cluster_error(self):
672796
-        (
672796
-            self.config.http.corosync.set_corosync_conf(
672796
-                self.corosync_conf_text,
672796
-                communication_list=[
672796
-                    {
672796
-                        "label": "node-1",
672796
-                    },
672796
-                    {
672796
-                        "label": "node-2",
672796
-                        "response_code": 400,
672796
-                        "output": "Failed",
672796
-                    },
672796
-                ],
672796
-            )
672796
+        self.config.http.corosync.set_corosync_conf(
672796
+            self.corosync_conf_text,
672796
+            communication_list=[
672796
+                {"label": "node-1"},
672796
+                {"label": "node-2", "response_code": 400, "output": "Failed"},
672796
+            ],
672796
         )
672796
         env = self.env_assistant.get_env()
672796
         self.env_assistant.assert_raise_library_error(
672796
@@ -162,35 +157,28 @@ class PushCorosyncConfLiveNoQdeviceTest(PushCorosyncConfLiveBase):
672796
         )
672796
 
672796
     def test_dont_need_stopped_cluster_error_skip_offline(self):
672796
-        (
672796
-            self.config.http.corosync.set_corosync_conf(
672796
-                self.corosync_conf_text,
672796
-                communication_list=[
672796
+        self.config.http.corosync.set_corosync_conf(
672796
+            self.corosync_conf_text,
672796
+            communication_list=[
672796
+                {
672796
+                    "label": "node-1",
672796
+                    "response_code": 400,
672796
+                    "output": "Failed",
672796
+                },
672796
+                {"label": "node-2"},
672796
+            ],
672796
+        )
672796
+        self.config.http.corosync.reload_corosync_conf(
672796
+            communication_list=[
672796
+                [
672796
                     {
672796
-                        "label": "node-1",
672796
+                        "label": self.node_labels[0],
672796
                         "response_code": 400,
672796
                         "output": "Failed",
672796
                     },
672796
-                    {
672796
-                        "label": "node-2",
672796
-                    },
672796
                 ],
672796
-            ).http.corosync.reload_corosync_conf(
672796
-                communication_list=[
672796
-                    [
672796
-                        {
672796
-                            "label": self.node_labels[0],
672796
-                            "response_code": 400,
672796
-                            "output": "Failed",
672796
-                        },
672796
-                    ],
672796
-                    [
672796
-                        {
672796
-                            "label": self.node_labels[1],
672796
-                        },
672796
-                    ],
672796
-                ]
672796
-            )
672796
+                [{"label": self.node_labels[1]}],
672796
+            ]
672796
         )
672796
         self.env_assistant.get_env().push_corosync_conf(
672796
             self.corosync_conf_facade, skip_offline_nodes=True
672796
@@ -219,33 +207,29 @@ class PushCorosyncConfLiveNoQdeviceTest(PushCorosyncConfLiveBase):
672796
                     reason="Failed",
672796
                 ),
672796
                 fixture.info(
672796
-                    report_codes.COROSYNC_CONFIG_RELOADED, node="node-2"
672796
+                    report_codes.COROSYNC_CONFIG_RELOADED,
672796
+                    node="node-2",
672796
                 ),
672796
             ]
672796
         )
672796
 
672796
     def test_reload_on_another_node(self):
672796
-        (
672796
-            self.config.http.corosync.set_corosync_conf(
672796
-                self.corosync_conf_text, node_labels=self.node_labels
672796
-            ).http.corosync.reload_corosync_conf(
672796
-                communication_list=[
672796
-                    [
672796
-                        {
672796
-                            "label": self.node_labels[0],
672796
-                            "response_code": 200,
672796
-                            "output": json.dumps(
672796
-                                dict(code="not_running", message="not running")
672796
-                            ),
672796
-                        },
672796
-                    ],
672796
-                    [
672796
-                        {
672796
-                            "label": self.node_labels[1],
672796
-                        },
672796
-                    ],
672796
-                ]
672796
-            )
672796
+        self.config.http.corosync.set_corosync_conf(
672796
+            self.corosync_conf_text, node_labels=self.node_labels
672796
+        )
672796
+        self.config.http.corosync.reload_corosync_conf(
672796
+            communication_list=[
672796
+                [
672796
+                    {
672796
+                        "label": self.node_labels[0],
672796
+                        "response_code": 200,
672796
+                        "output": json.dumps(
672796
+                            dict(code="not_running", message="not running")
672796
+                        ),
672796
+                    },
672796
+                ],
672796
+                [{"label": self.node_labels[1]}],
672796
+            ]
672796
         )
672796
         self.env_assistant.get_env().push_corosync_conf(
672796
             self.corosync_conf_facade
672796
@@ -266,35 +250,35 @@ class PushCorosyncConfLiveNoQdeviceTest(PushCorosyncConfLiveBase):
672796
                     node="node-1",
672796
                 ),
672796
                 fixture.info(
672796
-                    report_codes.COROSYNC_CONFIG_RELOADED, node="node-2"
672796
+                    report_codes.COROSYNC_CONFIG_RELOADED,
672796
+                    node="node-2",
672796
                 ),
672796
             ]
672796
         )
672796
 
672796
     def test_reload_not_successful(self):
672796
-        (
672796
-            self.config.http.corosync.set_corosync_conf(
672796
-                self.corosync_conf_text, node_labels=self.node_labels
672796
-            ).http.corosync.reload_corosync_conf(
672796
-                communication_list=[
672796
-                    [
672796
-                        {
672796
-                            "label": self.node_labels[0],
672796
-                            "response_code": 200,
672796
-                            "output": json.dumps(
672796
-                                dict(code="not_running", message="not running")
672796
-                            ),
672796
-                        },
672796
-                    ],
672796
-                    [
672796
-                        {
672796
-                            "label": self.node_labels[1],
672796
-                            "response_code": 200,
672796
-                            "output": "not a json",
672796
-                        },
672796
-                    ],
672796
-                ]
672796
-            )
672796
+        self.config.http.corosync.set_corosync_conf(
672796
+            self.corosync_conf_text, node_labels=self.node_labels
672796
+        )
672796
+        self.config.http.corosync.reload_corosync_conf(
672796
+            communication_list=[
672796
+                [
672796
+                    {
672796
+                        "label": self.node_labels[0],
672796
+                        "response_code": 200,
672796
+                        "output": json.dumps(
672796
+                            dict(code="not_running", message="not running")
672796
+                        ),
672796
+                    },
672796
+                ],
672796
+                [
672796
+                    {
672796
+                        "label": self.node_labels[1],
672796
+                        "response_code": 200,
672796
+                        "output": "not a json",
672796
+                    },
672796
+                ],
672796
+            ]
672796
         )
672796
         self.env_assistant.assert_raise_library_error(
672796
             lambda: self.env_assistant.get_env().push_corosync_conf(
672796
@@ -318,7 +302,8 @@ class PushCorosyncConfLiveNoQdeviceTest(PushCorosyncConfLiveBase):
672796
                     node="node-1",
672796
                 ),
672796
                 fixture.warn(
672796
-                    report_codes.INVALID_RESPONSE_FORMAT, node="node-2"
672796
+                    report_codes.INVALID_RESPONSE_FORMAT,
672796
+                    node="node-2",
672796
                 ),
672796
                 fixture.error(
672796
                     report_codes.UNABLE_TO_PERFORM_OPERATION_ON_ANY_NODE
672796
@@ -327,23 +312,22 @@ class PushCorosyncConfLiveNoQdeviceTest(PushCorosyncConfLiveBase):
672796
         )
672796
 
672796
     def test_reload_corosync_not_running_anywhere(self):
672796
-        (
672796
-            self.config.http.corosync.set_corosync_conf(
672796
-                self.corosync_conf_text, node_labels=self.node_labels
672796
-            ).http.corosync.reload_corosync_conf(
672796
-                communication_list=[
672796
-                    [
672796
-                        {
672796
-                            "label": node,
672796
-                            "response_code": 200,
672796
-                            "output": json.dumps(
672796
-                                dict(code="not_running", message="not running")
672796
-                            ),
672796
-                        },
672796
-                    ]
672796
-                    for node in self.node_labels
672796
+        self.config.http.corosync.set_corosync_conf(
672796
+            self.corosync_conf_text, node_labels=self.node_labels
672796
+        )
672796
+        self.config.http.corosync.reload_corosync_conf(
672796
+            communication_list=[
672796
+                [
672796
+                    {
672796
+                        "label": node,
672796
+                        "response_code": 200,
672796
+                        "output": json.dumps(
672796
+                            dict(code="not_running", message="not running")
672796
+                        ),
672796
+                    },
672796
                 ]
672796
-            )
672796
+                for node in self.node_labels
672796
+            ]
672796
         )
672796
         self.env_assistant.get_env().push_corosync_conf(
672796
             self.corosync_conf_facade
672796
@@ -372,12 +356,11 @@ class PushCorosyncConfLiveNoQdeviceTest(PushCorosyncConfLiveBase):
672796
 
672796
     def test_need_stopped_cluster(self):
672796
         self.corosync_conf_facade.need_stopped_cluster = True
672796
-        (
672796
-            self.config.http.corosync.check_corosync_offline(
672796
-                node_labels=self.node_labels
672796
-            ).http.corosync.set_corosync_conf(
672796
-                self.corosync_conf_text, node_labels=self.node_labels
672796
-            )
672796
+        self.config.http.corosync.check_corosync_offline(
672796
+            node_labels=self.node_labels
672796
+        )
672796
+        self.config.http.corosync.set_corosync_conf(
672796
+            self.corosync_conf_text, node_labels=self.node_labels
672796
         )
672796
         self.env_assistant.get_env().push_corosync_conf(
672796
             self.corosync_conf_facade
672796
@@ -407,21 +390,14 @@ class PushCorosyncConfLiveNoQdeviceTest(PushCorosyncConfLiveBase):
672796
 
672796
     def test_need_stopped_cluster_not_stopped(self):
672796
         self.corosync_conf_facade.need_stopped_cluster = True
672796
-        (
672796
-            self.config.http.corosync.check_corosync_offline(
672796
-                communication_list=[
672796
-                    {
672796
-                        "label": self.node_labels[0],
672796
-                        "output": '{"corosync":true}',
672796
-                    }
672796
-                ]
672796
-                + [
672796
-                    {
672796
-                        "label": node,
672796
-                    }
672796
-                    for node in self.node_labels[1:]
672796
-                ]
672796
-            )
672796
+        self.config.http.corosync.check_corosync_offline(
672796
+            communication_list=[
672796
+                {
672796
+                    "label": self.node_labels[0],
672796
+                    "output": corosync_running_check_response(True),
672796
+                }
672796
+            ]
672796
+            + [{"label": node} for node in self.node_labels[1:]]
672796
         )
672796
         env = self.env_assistant.get_env()
672796
         self.env_assistant.assert_raise_library_error(
672796
@@ -445,18 +421,14 @@ class PushCorosyncConfLiveNoQdeviceTest(PushCorosyncConfLiveBase):
672796
         # If we know for sure that corosync is running, skip_offline doesn't
672796
         # matter.
672796
         self.corosync_conf_facade.need_stopped_cluster = True
672796
-        (
672796
-            self.config.http.corosync.check_corosync_offline(
672796
-                communication_list=[
672796
-                    dict(
672796
-                        label="node-1",
672796
-                        output='{"corosync":true}',
672796
-                    ),
672796
-                    dict(
672796
-                        label="node-2",
672796
-                    ),
672796
-                ]
672796
-            )
672796
+        self.config.http.corosync.check_corosync_offline(
672796
+            communication_list=[
672796
+                dict(
672796
+                    label="node-1",
672796
+                    output=corosync_running_check_response(True),
672796
+                ),
672796
+                dict(label="node-2"),
672796
+            ]
672796
         )
672796
         env = self.env_assistant.get_env()
672796
         self.env_assistant.assert_raise_library_error(
672796
@@ -481,19 +453,17 @@ class PushCorosyncConfLiveNoQdeviceTest(PushCorosyncConfLiveBase):
672796
 
672796
     def test_need_stopped_cluster_json_error(self):
672796
         self.corosync_conf_facade.need_stopped_cluster = True
672796
-        (
672796
-            self.config.http.corosync.check_corosync_offline(
672796
-                communication_list=[
672796
-                    dict(label="node-1", output="{"),  # not valid json
672796
-                    dict(
672796
-                        label="node-2",
672796
-                        # The expected key (/corosync) is missing, we don't
672796
-                        # care about version 2 status key
672796
-                        # (/services/corosync/running)
672796
-                        output='{"services":{"corosync":{"running":true}}}',
672796
-                    ),
672796
-                ]
672796
-            )
672796
+        self.config.http.corosync.check_corosync_offline(
672796
+            communication_list=[
672796
+                dict(label="node-1", output="{"),  # not valid json
672796
+                dict(
672796
+                    label="node-2",
672796
+                    # The expected key (/corosync) is missing, tested code
672796
+                    # doesn't care about a new key added in version 2 status
672796
+                    # (/services/corosync/running)
672796
+                    output='{"services":{"corosync":{"running":true}}}',
672796
+                ),
672796
+            ]
672796
         )
672796
         env = self.env_assistant.get_env()
672796
         self.env_assistant.assert_raise_library_error(
672796
@@ -517,19 +487,15 @@ class PushCorosyncConfLiveNoQdeviceTest(PushCorosyncConfLiveBase):
672796
 
672796
     def test_need_stopped_cluster_comunnication_failure(self):
672796
         self.corosync_conf_facade.need_stopped_cluster = True
672796
-        (
672796
-            self.config.http.corosync.check_corosync_offline(
672796
-                communication_list=[
672796
-                    dict(
672796
-                        label="node-1",
672796
-                    ),
672796
-                    dict(
672796
-                        label="node-2",
672796
-                        response_code=401,
672796
-                        output="""{"notauthorized":"true"}""",
672796
-                    ),
672796
-                ]
672796
-            )
672796
+        self.config.http.corosync.check_corosync_offline(
672796
+            communication_list=[
672796
+                dict(label="node-1"),
672796
+                dict(
672796
+                    label="node-2",
672796
+                    response_code=401,
672796
+                    output='{"notauthorized":"true"}',
672796
+                ),
672796
+            ]
672796
         )
672796
         env = self.env_assistant.get_env()
672796
         self.env_assistant.assert_raise_library_error(
672796
@@ -560,29 +526,26 @@ class PushCorosyncConfLiveNoQdeviceTest(PushCorosyncConfLiveBase):
672796
     def test_need_stopped_cluster_comunnication_failures_skip_offline(self):
672796
         # If we don't know if corosync is running, skip_offline matters.
672796
         self.corosync_conf_facade.need_stopped_cluster = True
672796
-        (
672796
-            self.config.http.corosync.check_corosync_offline(
672796
-                communication_list=[
672796
-                    dict(
672796
-                        label="node-1",
672796
-                        response_code=401,
672796
-                        output="""{"notauthorized":"true"}""",
672796
-                    ),
672796
-                    dict(label="node-2", output="{"),  # not valid json
672796
-                ]
672796
-            ).http.corosync.set_corosync_conf(
672796
-                self.corosync_conf_text,
672796
-                communication_list=[
672796
-                    dict(
672796
-                        label="node-1",
672796
-                        response_code=401,
672796
-                        output="""{"notauthorized":"true"}""",
672796
-                    ),
672796
-                    dict(
672796
-                        label="node-2",
672796
-                    ),
672796
-                ],
672796
-            )
672796
+        self.config.http.corosync.check_corosync_offline(
672796
+            communication_list=[
672796
+                dict(
672796
+                    label="node-1",
672796
+                    response_code=401,
672796
+                    output='{"notauthorized":"true"}',
672796
+                ),
672796
+                dict(label="node-2", output="{"),  # not valid json
672796
+            ]
672796
+        )
672796
+        self.config.http.corosync.set_corosync_conf(
672796
+            self.corosync_conf_text,
672796
+            communication_list=[
672796
+                dict(
672796
+                    label="node-1",
672796
+                    response_code=401,
672796
+                    output='{"notauthorized":"true"}',
672796
+                ),
672796
+                dict(label="node-2"),
672796
+            ],
672796
         )
672796
         self.env_assistant.get_env().push_corosync_conf(
672796
             self.corosync_conf_facade, skip_offline_nodes=True
672796
@@ -662,15 +625,17 @@ class PushCorosyncConfLiveWithQdeviceTest(PushCorosyncConfLiveBase):
672796
 
672796
     def test_qdevice_reload(self):
672796
         self.corosync_conf_facade.need_qdevice_reload = True
672796
-        (
672796
-            self.config.http.corosync.set_corosync_conf(
672796
-                self.corosync_conf_text, node_labels=self.node_labels
672796
-            )
672796
-            .http.corosync.reload_corosync_conf(
672796
-                node_labels=self.node_labels[:1]
672796
-            )
672796
-            .http.corosync.qdevice_client_stop(node_labels=self.node_labels)
672796
-            .http.corosync.qdevice_client_start(node_labels=self.node_labels)
672796
+        self.config.http.corosync.set_corosync_conf(
672796
+            self.corosync_conf_text, node_labels=self.node_labels
672796
+        )
672796
+        self.config.http.corosync.reload_corosync_conf(
672796
+            node_labels=self.node_labels[:1]
672796
+        )
672796
+        self.config.http.corosync.qdevice_client_stop(
672796
+            node_labels=self.node_labels
672796
+        )
672796
+        self.config.http.corosync.qdevice_client_start(
672796
+            node_labels=self.node_labels
672796
         )
672796
 
672796
         self.env_assistant.get_env().push_corosync_conf(
672796
@@ -689,7 +654,8 @@ class PushCorosyncConfLiveWithQdeviceTest(PushCorosyncConfLiveBase):
672796
                     node="node-2",
672796
                 ),
672796
                 fixture.info(
672796
-                    report_codes.COROSYNC_CONFIG_RELOADED, node="node-1"
672796
+                    report_codes.COROSYNC_CONFIG_RELOADED,
672796
+                    node="node-1",
672796
                 ),
672796
                 fixture.info(report_codes.QDEVICE_CLIENT_RELOAD_STARTED),
672796
                 fixture.info(
672796
@@ -725,34 +691,34 @@ class PushCorosyncConfLiveWithQdeviceTest(PushCorosyncConfLiveBase):
672796
 
672796
     def test_qdevice_reload_corosync_stopped(self):
672796
         self.corosync_conf_facade.need_qdevice_reload = True
672796
-        (
672796
-            self.config.http.corosync.set_corosync_conf(
672796
-                self.corosync_conf_text, node_labels=self.node_labels
672796
-            )
672796
-            .http.corosync.reload_corosync_conf(
672796
-                communication_list=[
672796
-                    [
672796
-                        {
672796
-                            "label": label,
672796
-                            "response_code": 200,
672796
-                            "output": json.dumps(
672796
-                                dict(code="not_running", message="")
672796
-                            ),
672796
-                        },
672796
-                    ]
672796
-                    for label in self.node_labels
672796
-                ]
672796
-            )
672796
-            .http.corosync.qdevice_client_stop(node_labels=self.node_labels)
672796
-            .http.corosync.qdevice_client_start(
672796
-                communication_list=[
672796
+        self.config.http.corosync.set_corosync_conf(
672796
+            self.corosync_conf_text, node_labels=self.node_labels
672796
+        )
672796
+        self.config.http.corosync.reload_corosync_conf(
672796
+            communication_list=[
672796
+                [
672796
                     {
672796
                         "label": label,
672796
-                        "output": "corosync is not running, skipping",
672796
-                    }
672796
-                    for label in self.node_labels
672796
+                        "response_code": 200,
672796
+                        "output": json.dumps(
672796
+                            dict(code="not_running", message="")
672796
+                        ),
672796
+                    },
672796
                 ]
672796
-            )
672796
+                for label in self.node_labels
672796
+            ]
672796
+        )
672796
+        self.config.http.corosync.qdevice_client_stop(
672796
+            node_labels=self.node_labels
672796
+        )
672796
+        self.config.http.corosync.qdevice_client_start(
672796
+            communication_list=[
672796
+                {
672796
+                    "label": label,
672796
+                    "output": "corosync is not running, skipping",
672796
+                }
672796
+                for label in self.node_labels
672796
+            ]
672796
         )
672796
 
672796
         self.env_assistant.get_env().push_corosync_conf(
672796
@@ -816,38 +782,28 @@ class PushCorosyncConfLiveWithQdeviceTest(PushCorosyncConfLiveBase):
672796
         # This also tests that failing to stop qdevice on a node doesn't prevent
672796
         # starting qdevice on the same node.
672796
         self.corosync_conf_facade.need_qdevice_reload = True
672796
-        (
672796
-            self.config.http.corosync.set_corosync_conf(
672796
-                self.corosync_conf_text, node_labels=self.node_labels
672796
-            )
672796
-            .http.corosync.reload_corosync_conf(
672796
-                node_labels=self.node_labels[:1]
672796
-            )
672796
-            .http.corosync.qdevice_client_stop(
672796
-                communication_list=[
672796
-                    dict(
672796
-                        label="node-1",
672796
-                    ),
672796
-                    dict(
672796
-                        label="node-2",
672796
-                        response_code=400,
672796
-                        output="error",
672796
-                    ),
672796
-                ]
672796
-            )
672796
-            .http.corosync.qdevice_client_start(
672796
-                communication_list=[
672796
-                    dict(
672796
-                        label="node-1",
672796
-                        errno=8,
672796
-                        error_msg="failure",
672796
-                        was_connected=False,
672796
-                    ),
672796
-                    dict(
672796
-                        label="node-2",
672796
-                    ),
672796
-                ]
672796
-            )
672796
+        self.config.http.corosync.set_corosync_conf(
672796
+            self.corosync_conf_text, node_labels=self.node_labels
672796
+        )
672796
+        self.config.http.corosync.reload_corosync_conf(
672796
+            node_labels=self.node_labels[:1]
672796
+        )
672796
+        self.config.http.corosync.qdevice_client_stop(
672796
+            communication_list=[
672796
+                dict(label="node-1"),
672796
+                dict(label="node-2", response_code=400, output="error"),
672796
+            ]
672796
+        )
672796
+        self.config.http.corosync.qdevice_client_start(
672796
+            communication_list=[
672796
+                dict(
672796
+                    label="node-1",
672796
+                    errno=8,
672796
+                    error_msg="failure",
672796
+                    was_connected=False,
672796
+                ),
672796
+                dict(label="node-2"),
672796
+            ]
672796
         )
672796
 
672796
         env = self.env_assistant.get_env()
672796
@@ -867,7 +823,8 @@ class PushCorosyncConfLiveWithQdeviceTest(PushCorosyncConfLiveBase):
672796
                     node="node-2",
672796
                 ),
672796
                 fixture.info(
672796
-                    report_codes.COROSYNC_CONFIG_RELOADED, node="node-1"
672796
+                    report_codes.COROSYNC_CONFIG_RELOADED,
672796
+                    node="node-1",
672796
                 ),
672796
                 fixture.info(report_codes.QDEVICE_CLIENT_RELOAD_STARTED),
672796
                 fixture.info(
672796
@@ -903,62 +860,46 @@ class PushCorosyncConfLiveWithQdeviceTest(PushCorosyncConfLiveBase):
672796
 
672796
     def test_qdevice_reload_failures_skip_offline(self):
672796
         self.corosync_conf_facade.need_qdevice_reload = True
672796
-        (
672796
-            self.config.http.corosync.set_corosync_conf(
672796
-                self.corosync_conf_text,
672796
-                communication_list=[
672796
-                    dict(
672796
-                        label="node-1",
672796
-                    ),
672796
-                    dict(
672796
-                        label="node-2",
672796
-                        errno=8,
672796
-                        error_msg="failure",
672796
-                        was_connected=False,
672796
-                    ),
672796
+        self.config.http.corosync.set_corosync_conf(
672796
+            self.corosync_conf_text,
672796
+            communication_list=[
672796
+                dict(label="node-1"),
672796
+                dict(
672796
+                    label="node-2",
672796
+                    errno=8,
672796
+                    error_msg="failure",
672796
+                    was_connected=False,
672796
+                ),
672796
+            ],
672796
+        )
672796
+        self.config.http.corosync.reload_corosync_conf(
672796
+            communication_list=[
672796
+                [
672796
+                    {
672796
+                        "label": self.node_labels[0],
672796
+                        "response_code": 400,
672796
+                        "output": "Failed",
672796
+                    },
672796
                 ],
672796
-            )
672796
-            .http.corosync.reload_corosync_conf(
672796
-                communication_list=[
672796
-                    [
672796
-                        {
672796
-                            "label": self.node_labels[0],
672796
-                            "response_code": 400,
672796
-                            "output": "Failed",
672796
-                        },
672796
-                    ],
672796
-                    [
672796
-                        {
672796
-                            "label": self.node_labels[1],
672796
-                        },
672796
-                    ],
672796
-                ]
672796
-            )
672796
-            .http.corosync.qdevice_client_stop(
672796
-                communication_list=[
672796
-                    dict(
672796
-                        label="node-1",
672796
-                    ),
672796
-                    dict(
672796
-                        label="node-2",
672796
-                        response_code=400,
672796
-                        output="error",
672796
-                    ),
672796
-                ]
672796
-            )
672796
-            .http.corosync.qdevice_client_start(
672796
-                communication_list=[
672796
-                    dict(
672796
-                        label="node-1",
672796
-                        errno=8,
672796
-                        error_msg="failure",
672796
-                        was_connected=False,
672796
-                    ),
672796
-                    dict(
672796
-                        label="node-2",
672796
-                    ),
672796
-                ]
672796
-            )
672796
+                [{"label": self.node_labels[1]}],
672796
+            ]
672796
+        )
672796
+        self.config.http.corosync.qdevice_client_stop(
672796
+            communication_list=[
672796
+                dict(label="node-1"),
672796
+                dict(label="node-2", response_code=400, output="error"),
672796
+            ]
672796
+        )
672796
+        self.config.http.corosync.qdevice_client_start(
672796
+            communication_list=[
672796
+                dict(
672796
+                    label="node-1",
672796
+                    errno=8,
672796
+                    error_msg="failure",
672796
+                    was_connected=False,
672796
+                ),
672796
+                dict(label="node-2"),
672796
+            ]
672796
         )
672796
 
672796
         env = self.env_assistant.get_env()
672796
@@ -990,7 +931,8 @@ class PushCorosyncConfLiveWithQdeviceTest(PushCorosyncConfLiveBase):
672796
                     reason="Failed",
672796
                 ),
672796
                 fixture.info(
672796
-                    report_codes.COROSYNC_CONFIG_RELOADED, node="node-2"
672796
+                    report_codes.COROSYNC_CONFIG_RELOADED,
672796
+                    node="node-2",
672796
                 ),
672796
                 fixture.info(report_codes.QDEVICE_CLIENT_RELOAD_STARTED),
672796
                 fixture.info(
672796
@@ -1024,29 +966,28 @@ class PushCorosyncConfLiveWithQdeviceTest(PushCorosyncConfLiveBase):
672796
 
672796
     def test_reload_not_successful(self):
672796
         self.corosync_conf_facade.need_qdevice_reload = True
672796
-        (
672796
-            self.config.http.corosync.set_corosync_conf(
672796
-                self.corosync_conf_text, node_labels=self.node_labels
672796
-            ).http.corosync.reload_corosync_conf(
672796
-                communication_list=[
672796
-                    [
672796
-                        {
672796
-                            "label": self.node_labels[0],
672796
-                            "response_code": 200,
672796
-                            "output": json.dumps(
672796
-                                dict(code="not_running", message="not running")
672796
-                            ),
672796
-                        },
672796
-                    ],
672796
-                    [
672796
-                        {
672796
-                            "label": self.node_labels[1],
672796
-                            "response_code": 200,
672796
-                            "output": "not a json",
672796
-                        },
672796
-                    ],
672796
-                ]
672796
-            )
672796
+        self.config.http.corosync.set_corosync_conf(
672796
+            self.corosync_conf_text, node_labels=self.node_labels
672796
+        )
672796
+        self.config.http.corosync.reload_corosync_conf(
672796
+            communication_list=[
672796
+                [
672796
+                    {
672796
+                        "label": self.node_labels[0],
672796
+                        "response_code": 200,
672796
+                        "output": json.dumps(
672796
+                            dict(code="not_running", message="not running")
672796
+                        ),
672796
+                    },
672796
+                ],
672796
+                [
672796
+                    {
672796
+                        "label": self.node_labels[1],
672796
+                        "response_code": 200,
672796
+                        "output": "not a json",
672796
+                    },
672796
+                ],
672796
+            ]
672796
         )
672796
         self.env_assistant.assert_raise_library_error(
672796
             lambda: self.env_assistant.get_env().push_corosync_conf(
672796
@@ -1070,7 +1011,8 @@ class PushCorosyncConfLiveWithQdeviceTest(PushCorosyncConfLiveBase):
672796
                     node="node-1",
672796
                 ),
672796
                 fixture.warn(
672796
-                    report_codes.INVALID_RESPONSE_FORMAT, node="node-2"
672796
+                    report_codes.INVALID_RESPONSE_FORMAT,
672796
+                    node="node-2",
672796
                 ),
672796
                 fixture.error(
672796
                     report_codes.UNABLE_TO_PERFORM_OPERATION_ON_ANY_NODE
672796
diff --git a/pcs_test/tier1/legacy/test_constraints.py b/pcs_test/tier1/legacy/test_constraints.py
672796
index 36924925..49b413a8 100644
672796
--- a/pcs_test/tier1/legacy/test_constraints.py
672796
+++ b/pcs_test/tier1/legacy/test_constraints.py
672796
@@ -13,9 +13,11 @@ from pcs_test.tools.assertions import (
672796
 from pcs_test.tools.bin_mock import get_mock_settings
672796
 from pcs_test.tools.cib import get_assert_pcs_effect_mixin
672796
 from pcs_test.tools.fixture_cib import (
672796
+    CachedCibFixture,
672796
     fixture_master_xml,
672796
     fixture_to_cib,
672796
     wrap_element_by_master,
672796
+    wrap_element_by_master_file,
672796
 )
672796
 from pcs_test.tools.misc import (
672796
     get_test_resource as rc,
672796
@@ -23,7 +25,6 @@ from pcs_test.tools.misc import (
672796
     skip_unless_crm_rule,
672796
     outdent,
672796
     ParametrizedTestMetaClass,
672796
-    write_data_to_tmpfile,
672796
     write_file_to_tmpfile,
672796
 )
672796
 from pcs_test.tools.pcs_runner import pcs, PcsRunner
672796
@@ -54,70 +55,63 @@ empty_cib = rc("cib-empty-3.7.xml")
672796
 large_cib = rc("cib-large.xml")
672796
 
672796
 
672796
-@skip_unless_crm_rule()
672796
-class ConstraintTest(unittest.TestCase):
672796
-    def setUp(self):
672796
-        self.temp_cib = get_tmp_file("tier1_constraints")
672796
-        write_file_to_tmpfile(empty_cib, self.temp_cib)
672796
-        self.temp_corosync_conf = None
672796
-
672796
-    def tearDown(self):
672796
-        self.temp_cib.close()
672796
-        if self.temp_corosync_conf:
672796
-            self.temp_corosync_conf.close()
672796
-
672796
-    def fixture_resources(self):
672796
-        write_data_to_tmpfile(self.fixture_cib_cache(), self.temp_cib)
672796
-
672796
-    def fixture_cib_cache(self):
672796
-        if not hasattr(self.__class__, "cib_cache"):
672796
-            self.__class__.cib_cache = self.fixture_cib()
672796
-        return self.__class__.cib_cache
672796
-
672796
-    def fixture_cib(self):
672796
-        write_file_to_tmpfile(empty_cib, self.temp_cib)
672796
-        self.setupClusterA()
672796
-        self.temp_cib.flush()
672796
-        self.temp_cib.seek(0)
672796
-        cib_content = self.temp_cib.read()
672796
-        self.temp_cib.seek(0)
672796
-        write_file_to_tmpfile(empty_cib, self.temp_cib)
672796
-        return cib_content
672796
-
672796
-    # Sets up a cluster with Resources, groups, master/slave resource and clones
672796
-    def setupClusterA(self):
672796
+class ConstraintTestCibFixture(CachedCibFixture):
672796
+    def _setup_cib(self):
672796
         line = "resource create D1 ocf:heartbeat:Dummy".split()
672796
-        output, returnVal = pcs(self.temp_cib.name, line)
672796
+        output, returnVal = pcs(self.cache_path, line)
672796
         assert returnVal == 0 and output == ""
672796
 
672796
         line = "resource create D2 ocf:heartbeat:Dummy".split()
672796
-        output, returnVal = pcs(self.temp_cib.name, line)
672796
+        output, returnVal = pcs(self.cache_path, line)
672796
         assert returnVal == 0 and output == ""
672796
 
672796
         line = "resource create D3 ocf:heartbeat:Dummy".split()
672796
-        output, returnVal = pcs(self.temp_cib.name, line)
672796
+        output, returnVal = pcs(self.cache_path, line)
672796
         assert returnVal == 0 and output == ""
672796
 
672796
         line = "resource create D4 ocf:heartbeat:Dummy".split()
672796
-        output, returnVal = pcs(self.temp_cib.name, line)
672796
+        output, returnVal = pcs(self.cache_path, line)
672796
         assert returnVal == 0 and output == ""
672796
 
672796
         line = "resource create D5 ocf:heartbeat:Dummy".split()
672796
-        output, returnVal = pcs(self.temp_cib.name, line)
672796
+        output, returnVal = pcs(self.cache_path, line)
672796
         assert returnVal == 0 and output == ""
672796
 
672796
         line = "resource create D6 ocf:heartbeat:Dummy".split()
672796
-        output, returnVal = pcs(self.temp_cib.name, line)
672796
+        output, returnVal = pcs(self.cache_path, line)
672796
         assert returnVal == 0 and output == ""
672796
 
672796
         line = "resource clone D3".split()
672796
-        output, returnVal = pcs(self.temp_cib.name, line)
672796
+        output, returnVal = pcs(self.cache_path, line)
672796
         assert returnVal == 0 and output == ""
672796
 
672796
         # pcs no longer allows turning resources into masters but supports
672796
         # existing ones. In order to test it, we need to put a master in the
672796
         # CIB without pcs.
672796
-        wrap_element_by_master(self.temp_cib, "D4", master_id="Master")
672796
+        wrap_element_by_master_file(self.cache_path, "D4", master_id="Master")
672796
+
672796
+
672796
+CONSTRAINT_TEST_CIB_FIXTURE = ConstraintTestCibFixture(
672796
+    "fixture_tier1_constraints", empty_cib
672796
+)
672796
+
672796
+
672796
+@skip_unless_crm_rule()
672796
+class ConstraintTest(unittest.TestCase):
672796
+    def setUp(self):
672796
+        self.temp_cib = get_tmp_file("tier1_constraints")
672796
+        write_file_to_tmpfile(empty_cib, self.temp_cib)
672796
+        self.temp_corosync_conf = None
672796
+
672796
+    def tearDown(self):
672796
+        self.temp_cib.close()
672796
+        if self.temp_corosync_conf:
672796
+            self.temp_corosync_conf.close()
672796
+
672796
+    def fixture_resources(self):
672796
+        write_file_to_tmpfile(
672796
+            CONSTRAINT_TEST_CIB_FIXTURE.cache_path, self.temp_cib
672796
+        )
672796
 
672796
     def testConstraintRules(self):
672796
         self.fixture_resources()
672796
diff --git a/pcs_test/tier1/legacy/test_resource.py b/pcs_test/tier1/legacy/test_resource.py
672796
index 8b043260..ecf0d23d 100644
672796
--- a/pcs_test/tier1/legacy/test_resource.py
672796
+++ b/pcs_test/tier1/legacy/test_resource.py
672796
@@ -12,8 +12,10 @@ from pcs_test.tools.assertions import (
672796
 from pcs_test.tools.bin_mock import get_mock_settings
672796
 from pcs_test.tools.cib import get_assert_pcs_effect_mixin
672796
 from pcs_test.tools.fixture_cib import (
672796
+    CachedCibFixture,
672796
     fixture_master_xml,
672796
     fixture_to_cib,
672796
+    wrap_element_by_master_file,
672796
     wrap_element_by_master,
672796
 )
672796
 from pcs_test.tools.misc import (
672796
@@ -154,21 +156,8 @@ class ResourceDescribe(TestCase, AssertPcsMixin):
672796
         )
672796
 
672796
 
672796
-class Resource(TestCase, AssertPcsMixin):
672796
-    def setUp(self):
672796
-        self.temp_cib = get_tmp_file("tier1_resource")
672796
-        self.temp_large_cib = get_tmp_file("tier1_resource_large")
672796
-        write_file_to_tmpfile(empty_cib, self.temp_cib)
672796
-        write_file_to_tmpfile(large_cib, self.temp_large_cib)
672796
-        self.pcs_runner = PcsRunner(self.temp_cib.name)
672796
-        self.pcs_runner.mock_settings = get_mock_settings("crm_resource_binary")
672796
-
672796
-    def tearDown(self):
672796
-        self.temp_cib.close()
672796
-        self.temp_large_cib.close()
672796
-
672796
-    # Setups up a cluster with Resources, groups, master/slave resource & clones
672796
-    def setupClusterA(self):
672796
+class ResourceTestCibFixture(CachedCibFixture):
672796
+    def _setup_cib(self):
672796
         self.assert_pcs_success(
672796
             (
672796
                 "resource create --no-default-ops ClusterIP ocf:heartbeat:IPaddr2"
672796
@@ -215,7 +204,34 @@ class Resource(TestCase, AssertPcsMixin):
672796
         # pcs no longer allows turning resources into masters but supports
672796
         # existing ones. In order to test it, we need to put a master in the
672796
         # CIB without pcs.
672796
-        wrap_element_by_master(self.temp_cib, "ClusterIP5", master_id="Master")
672796
+        wrap_element_by_master_file(
672796
+            self.cache_path, "ClusterIP5", master_id="Master"
672796
+        )
672796
+
672796
+
672796
+RESOURCE_TEST_CIB_FIXTURE = ResourceTestCibFixture(
672796
+    "fixture_tier1_resource", empty_cib
672796
+)
672796
+
672796
+
672796
+class Resource(TestCase, AssertPcsMixin):
672796
+    def setUp(self):
672796
+        self.temp_cib = get_tmp_file("tier1_resource")
672796
+        self.temp_large_cib = get_tmp_file("tier1_resource_large")
672796
+        write_file_to_tmpfile(empty_cib, self.temp_cib)
672796
+        write_file_to_tmpfile(large_cib, self.temp_large_cib)
672796
+        self.pcs_runner = PcsRunner(self.temp_cib.name)
672796
+        self.pcs_runner.mock_settings = get_mock_settings("crm_resource_binary")
672796
+
672796
+    def tearDown(self):
672796
+        self.temp_cib.close()
672796
+        self.temp_large_cib.close()
672796
+
672796
+    # Setups up a cluster with Resources, groups, master/slave resource & clones
672796
+    def setupClusterA(self):
672796
+        write_file_to_tmpfile(
672796
+            RESOURCE_TEST_CIB_FIXTURE.cache_path, self.temp_cib
672796
+        )
672796
 
672796
     def testCaseInsensitive(self):
672796
         o, r = pcs(
672796
diff --git a/pcs_test/tier1/legacy/test_stonith.py b/pcs_test/tier1/legacy/test_stonith.py
672796
index b3def2d4..f6b93f01 100644
672796
--- a/pcs_test/tier1/legacy/test_stonith.py
672796
+++ b/pcs_test/tier1/legacy/test_stonith.py
672796
@@ -8,6 +8,7 @@ from pcs.common.str_tools import indent
672796
 from pcs_test.tier1.cib_resource.common import ResourceTest
672796
 from pcs_test.tools.assertions import AssertPcsMixin
672796
 from pcs_test.tools.bin_mock import get_mock_settings
672796
+from pcs_test.tools.fixture_cib import CachedCibFixture
672796
 from pcs_test.tools.misc import (
672796
     get_test_resource as rc,
672796
     get_tmp_file,
672796
@@ -840,6 +841,46 @@ _fixture_stonith_level_cache = None
672796
 _fixture_stonith_level_cache_lock = Lock()
672796
 
672796
 
672796
+class StonithLevelTestCibFixture(CachedCibFixture):
672796
+    def _fixture_stonith_resource(self, name):
672796
+        self.assert_pcs_success(
672796
+            [
672796
+                "stonith",
672796
+                "create",
672796
+                name,
672796
+                "fence_apc",
672796
+                "pcmk_host_list=rh7-1 rh7-2",
672796
+                "ip=i",
672796
+                "username=u",
672796
+            ]
672796
+        )
672796
+
672796
+    def _setup_cib(self):
672796
+        self._fixture_stonith_resource("F1")
672796
+        self._fixture_stonith_resource("F2")
672796
+        self._fixture_stonith_resource("F3")
672796
+
672796
+        self.assert_pcs_success("stonith level add 1 rh7-1 F1".split())
672796
+        self.assert_pcs_success("stonith level add 2 rh7-1 F2".split())
672796
+        self.assert_pcs_success("stonith level add 2 rh7-2 F1".split())
672796
+        self.assert_pcs_success("stonith level add 1 rh7-2 F2".split())
672796
+        self.assert_pcs_success("stonith level add 4 regexp%rh7-\\d F3".split())
672796
+        self.assert_pcs_success(
672796
+            "stonith level add 3 regexp%rh7-\\d F2 F1".split()
672796
+        )
672796
+        self.assert_pcs_success(
672796
+            "stonith level add 5 attrib%fencewith=levels1 F3 F2".split()
672796
+        )
672796
+        self.assert_pcs_success(
672796
+            "stonith level add 6 attrib%fencewith=levels2 F3 F1".split()
672796
+        )
672796
+
672796
+
672796
+STONITH_LEVEL_TEST_CIB_FIXTURE = StonithLevelTestCibFixture(
672796
+    "fixture_tier1_stonith_level_tests", rc("cib-empty-withnodes.xml")
672796
+)
672796
+
672796
+
672796
 class LevelTestsBase(TestCase, AssertPcsMixin):
672796
     def setUp(self):
672796
         self.temp_cib = get_tmp_file("tier1_test_stonith_level")
672796
@@ -877,26 +918,11 @@ class LevelTestsBase(TestCase, AssertPcsMixin):
672796
                 _fixture_stonith_level_cache = self.fixture_cib_config()
672796
             return _fixture_stonith_level_cache
672796
 
672796
-    def fixture_cib_config(self):
672796
-        self.fixture_stonith_resource("F1")
672796
-        self.fixture_stonith_resource("F2")
672796
-        self.fixture_stonith_resource("F3")
672796
-
672796
-        self.assert_pcs_success("stonith level add 1 rh7-1 F1".split())
672796
-        self.assert_pcs_success("stonith level add 2 rh7-1 F2".split())
672796
-        self.assert_pcs_success("stonith level add 2 rh7-2 F1".split())
672796
-        self.assert_pcs_success("stonith level add 1 rh7-2 F2".split())
672796
-        self.assert_pcs_success("stonith level add 4 regexp%rh7-\\d F3".split())
672796
-        self.assert_pcs_success(
672796
-            "stonith level add 3 regexp%rh7-\\d F2 F1".split()
672796
-        )
672796
-        self.assert_pcs_success(
672796
-            "stonith level add 5 attrib%fencewith=levels1 F3 F2".split()
672796
-        )
672796
-        self.assert_pcs_success(
672796
-            "stonith level add 6 attrib%fencewith=levels2 F3 F1".split()
672796
-        )
672796
-
672796
+    @staticmethod
672796
+    def fixture_cib_config():
672796
+        cib_content = ""
672796
+        with open(STONITH_LEVEL_TEST_CIB_FIXTURE.cache_path, "r") as cib_file:
672796
+            cib_content = cib_file.read()
672796
         config = outdent(
672796
             """\
672796
             Target: rh7-1
672796
@@ -914,12 +940,7 @@ class LevelTestsBase(TestCase, AssertPcsMixin):
672796
               Level 6 - F3,F1
672796
             """
672796
         )
672796
-
672796
         config_lines = config.splitlines()
672796
-        self.temp_cib.flush()
672796
-        self.temp_cib.seek(0)
672796
-        cib_content = self.temp_cib.read()
672796
-        self.temp_cib.seek(0)
672796
         return cib_content, config, config_lines
672796
 
672796
 
672796
diff --git a/pcs_test/tools/command_env/config_http_corosync.py b/pcs_test/tools/command_env/config_http_corosync.py
672796
index cdaf65ff..7f84f406 100644
672796
--- a/pcs_test/tools/command_env/config_http_corosync.py
672796
+++ b/pcs_test/tools/command_env/config_http_corosync.py
672796
@@ -6,6 +6,23 @@ from pcs_test.tools.command_env.mock_node_communicator import (
672796
 )
672796
 
672796
 
672796
+def corosync_running_check_response(running):
672796
+    return json.dumps(
672796
+        {
672796
+            "node": {
672796
+                "corosync": running,
672796
+                "services": {
672796
+                    "corosync": {
672796
+                        "installed": True,
672796
+                        "enabled": not running,
672796
+                        "running": running,
672796
+                    }
672796
+                },
672796
+            }
672796
+        }
672796
+    )
672796
+
672796
+
672796
 class CorosyncShortcuts:
672796
     def __init__(self, calls):
672796
         self.__calls = calls
672796
@@ -29,7 +46,8 @@ class CorosyncShortcuts:
672796
             node_labels,
672796
             communication_list,
672796
             action="remote/status",
672796
-            output='{"corosync":false}',
672796
+            param_list=[("version", "2")],
672796
+            output=corosync_running_check_response(False),
672796
         )
672796
 
672796
     def get_corosync_online_targets(
672796
@@ -51,7 +69,8 @@ class CorosyncShortcuts:
672796
             node_labels,
672796
             communication_list,
672796
             action="remote/status",
672796
-            output='{"corosync":true}',
672796
+            param_list=[("version", "2")],
672796
+            output=corosync_running_check_response(True),
672796
         )
672796
 
672796
     def get_corosync_conf(
672796
diff --git a/pcs_test/tools/fixture_cib.py b/pcs_test/tools/fixture_cib.py
672796
index 730b0e33..602491c8 100644
672796
--- a/pcs_test/tools/fixture_cib.py
672796
+++ b/pcs_test/tools/fixture_cib.py
672796
@@ -3,7 +3,14 @@ import os
672796
 from unittest import mock
672796
 from lxml import etree
672796
 
672796
+from pcs_test.tools.assertions import AssertPcsMixin
672796
 from pcs_test.tools.custom_mock import MockLibraryReportProcessor
672796
+from pcs_test.tools.misc import (
672796
+    get_test_resource,
672796
+    get_tmp_file,
672796
+    write_file_to_tmpfile,
672796
+)
672796
+from pcs_test.tools.pcs_runner import PcsRunner
672796
 from pcs_test.tools.xml import etree_to_str
672796
 
672796
 from pcs import settings
672796
@@ -12,6 +19,54 @@ from pcs.lib.external import CommandRunner
672796
 # pylint: disable=line-too-long
672796
 
672796
 
672796
+class CachedCibFixture(AssertPcsMixin):
672796
+    def __init__(self, cache_name, empty_cib_path):
672796
+        self._empty_cib_path = empty_cib_path
672796
+        self._cache_name = cache_name
672796
+        self._cache_path = None
672796
+        self._pcs_runner = None
672796
+
672796
+    def _setup_cib(self):
672796
+        raise NotImplementedError()
672796
+
672796
+    def set_up(self):
672796
+        fixture_dir = get_test_resource("temp_fixtures")
672796
+        os.makedirs(fixture_dir, exist_ok=True)
672796
+        self._cache_path = os.path.join(fixture_dir, self._cache_name)
672796
+        self._pcs_runner = PcsRunner(self._cache_path)
672796
+
672796
+        with open(self._empty_cib_path, "r") as template_file, open(
672796
+            self.cache_path, "w"
672796
+        ) as cache_file:
672796
+            cache_file.write(template_file.read())
672796
+        self._setup_cib()
672796
+
672796
+    def clean_up(self):
672796
+        if os.path.isfile(self.cache_path):
672796
+            os.unlink(self.cache_path)
672796
+
672796
+    @property
672796
+    def cache_path(self):
672796
+        if self._cache_path is None:
672796
+            raise AssertionError("Cache has not been initiialized")
672796
+        return self._cache_path
672796
+
672796
+    # methods for supporting assert_pcs_success
672796
+    @property
672796
+    def pcs_runner(self):
672796
+        if self._pcs_runner is None:
672796
+            raise AssertionError("Cache has not been initialized")
672796
+        return self._pcs_runner
672796
+
672796
+    def assertEqual(self, first, second, msg=None):
672796
+        # pylint: disable=invalid-name
672796
+        # pylint: disable=no-self-use
672796
+        if first != second:
672796
+            raise AssertionError(
672796
+                f"{msg}\n{first} != {second}" if msg else f"{first} != {second}"
672796
+            )
672796
+
672796
+
672796
 def wrap_element_by_master(cib_file, resource_id, master_id=None):
672796
     cib_file.seek(0)
672796
     cib_tree = etree.parse(cib_file, etree.XMLParser(huge_tree=True)).getroot()
672796
@@ -49,6 +104,16 @@ def wrap_element_by_master(cib_file, resource_id, master_id=None):
672796
     )
672796
 
672796
 
672796
+def wrap_element_by_master_file(filepath, resource_id, master_id=None):
672796
+    cib_tmp = get_tmp_file("wrap_by_master")
672796
+    write_file_to_tmpfile(filepath, cib_tmp)
672796
+    wrap_element_by_master(cib_tmp, resource_id, master_id=master_id)
672796
+    cib_tmp.seek(0)
672796
+    with open(filepath, "w") as target:
672796
+        target.write(cib_tmp.read())
672796
+    cib_tmp.close()
672796
+
672796
+
672796
 def fixture_master_xml(name, all_ops=True, meta_dict=None):
672796
     default_ops = f"""
672796
             
672796
diff --git a/pcsd/Makefile.am b/pcsd/Makefile.am
672796
index 066ae8b6..a16917f5 100644
672796
--- a/pcsd/Makefile.am
672796
+++ b/pcsd/Makefile.am
672796
@@ -50,7 +50,6 @@ dist_pcsd_DATA		= \
672796
 			  cluster.rb \
672796
 			  config.rb \
672796
 			  corosyncconf.rb \
672796
-			  fenceagent.rb \
672796
 			  pcsd_action_command.rb \
672796
 			  pcsd-cli-main.rb \
672796
 			  pcsd_exchange_format.rb \
672796
diff --git a/pcsd/capabilities.xml b/pcsd/capabilities.xml
672796
index 745b05ad..f9dd8891 100644
672796
--- a/pcsd/capabilities.xml
672796
+++ b/pcsd/capabilities.xml
672796
@@ -561,13 +561,6 @@
672796
         pcs commands: cluster kill
672796
       </description>
672796
     </capability>
672796
-    <capability id="node.restart" in-pcs="0" in-pcsd="1">
672796
-      <description>
672796
-        Restart one host machine or the local host machine if no host specified.
672796
-
672796
-        daemon urls: node_restart
672796
-      </description>
672796
-    </capability>
672796
 
672796
 
672796
 
672796
diff --git a/pcsd/fenceagent.rb b/pcsd/fenceagent.rb
672796
deleted file mode 100644
672796
index 4a3ba07d..00000000
672796
--- a/pcsd/fenceagent.rb
672796
+++ /dev/null
672796
@@ -1,59 +0,0 @@
672796
-def getFenceAgents(auth_user)
672796
-  fence_agent_list = {}
672796
-  stdout, stderr, retval = run_cmd(
672796
-    auth_user, PCS, "stonith", "list", "--nodesc"
672796
-  )
672796
-  if retval != 0
672796
-    $logger.error("Error running 'pcs stonith list --nodesc")
672796
-    $logger.error(stdout + stderr)
672796
-    return {}
672796
-  end
672796
-
672796
-  agents = stdout
672796
-  agents.each { |a|
672796
-    fa = FenceAgent.new
672796
-    fa.name = a.chomp
672796
-    fence_agent_list[fa.name] = fa
672796
-  }
672796
-  return fence_agent_list
672796
-end
672796
-
672796
-class FenceAgent
672796
-  attr_accessor :name, :resource_class, :required_options, :optional_options, :advanced_options, :info
672796
-  def initialize(name=nil, required_options={}, optional_options={}, resource_class=nil, advanced_options={})
672796
-    @name = name
672796
-    @required_options = {}
672796
-    @optional_options = {}
672796
-    @required_options = required_options
672796
-    @optional_options = optional_options
672796
-    @advanced_options = advanced_options
672796
-    @resource_class = nil
672796
-  end
672796
-
672796
-  def type
672796
-    name
672796
-  end
672796
-
672796
-  def to_json(options = {})
672796
-    JSON.generate({
672796
-      :full_name => "stonith:#{name}",
672796
-      :class => 'stonith',
672796
-      :provider => nil,
672796
-      :type => name,
672796
-    })
672796
-  end
672796
-
672796
-  def long_desc
672796
-    if info && info.length >= 2
672796
-      return info[1]
672796
-    end
672796
-    return ""
672796
-  end
672796
-
672796
-  def short_desc
672796
-    if info && info.length >= 1
672796
-      return info[0]
672796
-    end
672796
-    return ""
672796
-  end
672796
-end
672796
diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb
672796
index 9e26c607..1507bdf5 100644
672796
--- a/pcsd/pcs.rb
672796
+++ b/pcsd/pcs.rb
672796
@@ -1514,21 +1514,6 @@ def allowed_for_superuser(auth_user)
672796
   return true
672796
 end
672796
 
672796
-def get_default_overview_node_list(clustername)
672796
-  nodes = get_cluster_nodes clustername
672796
-  node_list = []
672796
-  nodes.each { |node|
672796
-    node_list << {
672796
-      'error_list' => [],
672796
-      'warning_list' => [],
672796
-      'status' => 'unknown',
672796
-      'quorum' => false,
672796
-      'name' => node
672796
-    }
672796
-  }
672796
-  return node_list
672796
-end
672796
-
672796
 def enable_service(service)
672796
   result = run_pcs_internal(
672796
     PCSAuth.getSuperuserAuth(),
672796
diff --git a/pcsd/pcsd.rb b/pcsd/pcsd.rb
672796
index bf91e906..3297fc5e 100644
672796
--- a/pcsd/pcsd.rb
672796
+++ b/pcsd/pcsd.rb
672796
@@ -11,7 +11,6 @@ require 'cgi'
672796
 require 'bootstrap.rb'
672796
 require 'resource.rb'
672796
 require 'remote.rb'
672796
-require 'fenceagent.rb'
672796
 require 'cluster.rb'
672796
 require 'config.rb'
672796
 require 'pcs.rb'
672796
@@ -54,14 +53,14 @@ end
672796
 before do
672796
   # nobody is logged in yet
672796
   @auth_user = nil
672796
-  @tornado_session_username = Thread.current[:tornado_username]
672796
-  @tornado_session_groups = Thread.current[:tornado_groups]
672796
-  @tornado_is_authenticated = Thread.current[:tornado_is_authenticated]
672796
 
672796
   if(request.path.start_with?('/remote/') and request.path != "/remote/auth") or request.path == '/run_pcs' or request.path.start_with?('/api/')
672796
     # Sets @auth_user to a hash containing info about logged in user or halts
672796
     # the request processing if login credentials are incorrect.
672796
-    protect_by_token!
672796
+    @auth_user = PCSAuth.loginByToken(request.cookies)
672796
+    unless @auth_user
672796
+      halt [401, '{"notauthorized":"true"}']
672796
+    end
672796
   else
672796
     # Set a sane default: nobody is logged in, but we do not need to check both
672796
     # for nil and empty username (if auth_user and auth_user[:username])
672796
@@ -120,37 +119,6 @@ def run_cfgsync
672796
   end
672796
 end
672796
 
672796
-helpers do
672796
-  def is_ajax?
672796
-    return request.env['HTTP_X_REQUESTED_WITH'] == 'XMLHttpRequest'
672796
-  end
672796
-
672796
-  def protect_by_token!
672796
-    @auth_user = PCSAuth.loginByToken(request.cookies)
672796
-    unless @auth_user
672796
-      halt [401, '{"notauthorized":"true"}']
672796
-    end
672796
-  end
672796
-
672796
-  def getParamList(params)
672796
-    param_line = []
672796
-    meta_options = []
672796
-    params.each { |param, val|
672796
-      if param.start_with?("_res_paramne_") or (param.start_with?("_res_paramempty_") and val != "")
672796
-        myparam = param.sub(/^_res_paramne_/,"").sub(/^_res_paramempty_/,"")
672796
-        param_line << "#{myparam}=#{val}"
672796
-      end
672796
-      if param == "disabled"
672796
-        meta_options << 'meta' << 'target-role=Stopped'
672796
-      end
672796
-      if param == "force" and val
672796
-        param_line << "--force"
672796
-      end
672796
-    }
672796
-    return param_line + meta_options
672796
-  end
672796
-end
672796
-
672796
 get '/remote/?:command?' do
672796
   return remote(params, request, @auth_user)
672796
 end
672796
@@ -675,10 +643,6 @@ post '/manage/auth_gui_against_nodes' do
672796
   ]
672796
 end
672796
 
672796
-get '/clusters_overview' do
672796
-  clusters_overview(params, request, getAuthUser())
672796
-end
672796
-
672796
 get '/imported-cluster-list' do
672796
   imported_cluster_list(params, request, getAuthUser())
672796
 end
672796
@@ -693,190 +657,11 @@ post '/managec/:cluster/permissions_save/?' do
672796
   )
672796
 end
672796
 
672796
-get '/managec/:cluster/status_all' do
672796
-  auth_user = getAuthUser()
672796
-  status_all(params, request, auth_user, get_cluster_nodes(params[:cluster]))
672796
-end
672796
-
672796
 get '/managec/:cluster/cluster_status' do
672796
   auth_user = getAuthUser()
672796
   cluster_status_gui(auth_user, params[:cluster])
672796
 end
672796
 
672796
-get '/managec/:cluster/cluster_properties' do
672796
-  auth_user = getAuthUser()
672796
-  cluster = params[:cluster]
672796
-  unless cluster
672796
-    return 200, {}
672796
-  end
672796
-  code, out = send_cluster_request_with_token(auth_user, cluster, 'get_cib')
672796
-  if code == 403
672796
-    return [403, 'Permission denied']
672796
-  elsif code != 200
672796
-    return [400, 'getting CIB failed']
672796
-  end
672796
-  begin
672796
-    properties = getAllSettings(nil, REXML::Document.new(out))
672796
-    code, out = send_cluster_request_with_token(
672796
-      auth_user, cluster, 'get_cluster_properties_definition'
672796
-    )
672796
-
672796
-    if code == 403
672796
-      return [403, 'Permission denied']
672796
-    elsif code == 404
672796
-      definition = {
672796
-        'batch-limit' => {
672796
-          'name' => 'batch-limit',
672796
-          'source' => 'pacemaker-schedulerd',
672796
-          'default' => '0',
672796
-          'type' => 'integer',
672796
-          'shortdesc' => 'The number of jobs that pacemaker is allowed to execute in parallel.',
672796
-          'longdesc' => 'The "correct" value will depend on the speed and load of your network and cluster nodes.',
672796
-          'readable_name' => 'Batch Limit',
672796
-          'advanced' => false
672796
-        },
672796
-        'no-quorum-policy' => {
672796
-          'name' => 'no-quorum-policy',
672796
-          'source' => 'pacemaker-schedulerd',
672796
-          'default' => 'stop',
672796
-          'type' => 'enum',
672796
-          'enum' => ['stop', 'freeze', 'ignore', 'suicide'],
672796
-          'shortdesc' => 'What to do when the cluster does not have quorum.',
672796
-          'longdesc' => 'Allowed values:
672796
-  * ignore - continue all resource management
672796
-  * freeze - continue resource management, but don\'t recover resources from nodes not in the affected partition
672796
-  * stop - stop all resources in the affected cluster partition
672796
-  * suicide - fence all nodes in the affected cluster partition',
672796
-          'readable_name' => 'No Quorum Policy',
672796
-          'advanced' => false
672796
-        },
672796
-        'symmetric-cluster' => {
672796
-          'name' => 'symmetric-cluster',
672796
-          'source' => 'pacemaker-schedulerd',
672796
-          'default' => 'true',
672796
-          'type' => 'boolean',
672796
-          'shortdesc' => 'All resources can run anywhere by default.',
672796
-          'longdesc' => 'All resources can run anywhere by default.',
672796
-          'readable_name' => 'Symmetric',
672796
-          'advanced' => false
672796
-        },
672796
-        'stonith-enabled' => {
672796
-          'name' => 'stonith-enabled',
672796
-          'source' => 'pacemaker-schedulerd',
672796
-          'default' => 'true',
672796
-          'type' => 'boolean',
672796
-          'shortdesc' => 'Failed nodes are STONITH\'d',
672796
-          'longdesc' => 'Failed nodes are STONITH\'d',
672796
-          'readable_name' => 'Stonith Enabled',
672796
-          'advanced' => false
672796
-        },
672796
-        'stonith-action' => {
672796
-          'name' => 'stonith-action',
672796
-          'source' => 'pacemaker-schedulerd',
672796
-          'default' => 'reboot',
672796
-          'type' => 'enum',
672796
-          'enum' => ['reboot', 'poweroff', 'off'],
672796
-          'shortdesc' => 'Action to send to STONITH device',
672796
-          'longdesc' => 'Action to send to STONITH device Allowed values: reboot, poweroff, off',
672796
-          'readable_name' => 'Stonith Action',
672796
-          'advanced' => false
672796
-        },
672796
-        'cluster-delay' => {
672796
-          'name' => 'cluster-delay',
672796
-          'source' => 'pacemaker-schedulerd',
672796
-          'default' => '60s',
672796
-          'type' => 'time',
672796
-          'shortdesc' => 'Round trip delay over the network (excluding action execution)',
672796
-          'longdesc' => 'The "correct" value will depend on the speed and load of your network and cluster nodes.',
672796
-          'readable_name' => 'Cluster Delay',
672796
-          'advanced' => false
672796
-        },
672796
-        'stop-orphan-resources' => {
672796
-          'name' => 'stop-orphan-resources',
672796
-          'source' => 'pacemaker-schedulerd',
672796
-          'default' => 'true',
672796
-          'type' => 'boolean',
672796
-          'shortdesc' => 'Should deleted resources be stopped',
672796
-          'longdesc' => 'Should deleted resources be stopped',
672796
-          'readable_name' => 'Stop Orphan Resources',
672796
-          'advanced' => false
672796
-        },
672796
-        'stop-orphan-actions' => {
672796
-          'name' => 'stop-orphan-actions',
672796
-          'source' => 'pacemaker-schedulerd',
672796
-          'default' => 'true',
672796
-          'type' => 'boolean',
672796
-          'shortdesc' => 'Should deleted actions be cancelled',
672796
-          'longdesc' => 'Should deleted actions be cancelled',
672796
-          'readable_name' => 'Stop Orphan Actions',
672796
-          'advanced' => false
672796
-        },
672796
-        'start-failure-is-fatal' => {
672796
-          'name' => 'start-failure-is-fatal',
672796
-          'source' => 'pacemaker-schedulerd',
672796
-          'default' => 'true',
672796
-          'type' => 'boolean',
672796
-          'shortdesc' => 'Always treat start failures as fatal',
672796
-          'longdesc' => 'This was the old default. However when set to FALSE, the cluster will instead use the resource\'s failcount and value for resource-failure-stickiness',
672796
-          'readable_name' => 'Start Failure is Fatal',
672796
-          'advanced' => false
672796
-        },
672796
-        'pe-error-series-max' => {
672796
-          'name' => 'pe-error-series-max',
672796
-          'source' => 'pacemaker-schedulerd',
672796
-          'default' => '-1',
672796
-          'type' => 'integer',
672796
-          'shortdesc' => 'The number of PE inputs resulting in ERRORs to save',
672796
-          'longdesc' => 'Zero to disable, -1 to store unlimited.',
672796
-          'readable_name' => 'PE Error Storage',
672796
-          'advanced' => false
672796
-        },
672796
-        'pe-warn-series-max' => {
672796
-          'name' => 'pe-warn-series-max',
672796
-          'source' => 'pacemaker-schedulerd',
672796
-          'default' => '5000',
672796
-          'type' => 'integer',
672796
-          'shortdesc' => 'The number of PE inputs resulting in WARNINGs to save',
672796
-          'longdesc' => 'Zero to disable, -1 to store unlimited.',
672796
-          'readable_name' => 'PE Warning Storage',
672796
-          'advanced' => false
672796
-        },
672796
-        'pe-input-series-max' => {
672796
-          'name' => 'pe-input-series-max',
672796
-          'source' => 'pacemaker-schedulerd',
672796
-          'default' => '4000',
672796
-          'type' => 'integer',
672796
-          'shortdesc' => 'The number of other PE inputs to save',
672796
-          'longdesc' => 'Zero to disable, -1 to store unlimited.',
672796
-          'readable_name' => 'PE Input Storage',
672796
-          'advanced' => false
672796
-        },
672796
-        'enable-acl' => {
672796
-          'name' => 'enable-acl',
672796
-          'source' => 'pacemaker-based',
672796
-          'default' => 'false',
672796
-          'type' => 'boolean',
672796
-          'shortdesc' => 'Enable CIB ACL',
672796
-          'longdesc' => 'Should pacemaker use ACLs to determine access to cluster',
672796
-          'readable_name' => 'Enable ACLs',
672796
-          'advanced' => false
672796
-        },
672796
-      }
672796
-    elsif code != 200
672796
-      return [400, 'getting properties definition failed']
672796
-    else
672796
-      definition = JSON.parse(out)
672796
-    end
672796
-
672796
-    definition.each { |name, prop|
672796
-      prop['value'] = properties[name]
672796
-    }
672796
-    return [200, JSON.generate(definition)]
672796
-  rescue
672796
-    return [400, 'unable to get cluster properties']
672796
-  end
672796
-end
672796
-
672796
 get '/managec/:cluster/get_resource_agent_metadata' do
672796
   auth_user = getAuthUser()
672796
   cluster = params[:cluster]
672796
@@ -888,69 +673,7 @@ get '/managec/:cluster/get_resource_agent_metadata' do
672796
     false,
672796
     {:resource_agent => resource_agent}
672796
   )
672796
-  if code != 404
672796
-    return [code, out]
672796
-  end
672796
-
672796
-  code, out = send_cluster_request_with_token(
672796
-    auth_user,
672796
-    cluster,
672796
-    'resource_metadata',
672796
-    false,
672796
-    {
672796
-      :resourcename => resource_agent,
672796
-      :new => true
672796
-    }
672796
-  )
672796
-  if code != 200
672796
-    return [400, 'Unable to get meta-data of specified resource agent.']
672796
-  end
672796
-  desc_regex = Regexp.new(
672796
-    '
672796
-      '
672796
-  )
672796
-  parameters_regex = Regexp.new(
672796
-    '<input type="hidden" name="resource_type"[^>]*>(?<required>[\s\S]*)' +
672796
-      '
Optional Arguments:
(?<optional>[\S\s]*)' +
672796
-      ''
672796
-  )
672796
-  parameter_regex = Regexp.new(
672796
-    ']*>[\s]*\s*' +
672796
-      '(?<name>[^<\s]*)\s*\s*\s*' +
672796
-      '
672796
-  )
672796
-
672796
-  desc = desc_regex.match(out)
672796
-  unless desc
672796
-    return [400, 'Unable to get meta-data of specified resource agent.']
672796
-  end
672796
-  result = {
672796
-    :name => resource_agent,
672796
-    :shortdesc => html2plain(desc[:short]),
672796
-    :longdesc => html2plain(desc[:long]),
672796
-    :parameters => []
672796
-  }
672796
-
672796
-  parameters = parameters_regex.match(out)
672796
-  parameters[:required].scan(parameter_regex) { |match|
672796
-    result[:parameters] << {
672796
-      :name => html2plain(match[1]),
672796
-      :longdesc => html2plain(match[0]),
672796
-      :shortdesc => html2plain(match[2]),
672796
-      :type => 'string',
672796
-      :required => true
672796
-    }
672796
-  }
672796
-  parameters[:optional].scan(parameter_regex) { |match|
672796
-    result[:parameters] << {
672796
-      :name => html2plain(match[1]),
672796
-      :longdesc => html2plain(match[0]),
672796
-      :shortdesc => html2plain(match[2]),
672796
-      :type => 'string',
672796
-      :required => false
672796
-    }
672796
-  }
672796
-  return [200, JSON.generate(result)]
672796
+  return [code, out]
672796
 end
672796
 
672796
 get '/managec/:cluster/get_fence_agent_metadata' do
672796
@@ -964,90 +687,7 @@ get '/managec/:cluster/get_fence_agent_metadata' do
672796
     false,
672796
     {:fence_agent => fence_agent}
672796
   )
672796
-  if code != 404
672796
-    return [code, out]
672796
-  end
672796
-
672796
-  code, out = send_cluster_request_with_token(
672796
-    auth_user,
672796
-    cluster,
672796
-    'fence_device_metadata',
672796
-    false,
672796
-    {
672796
-      :resourcename => fence_agent.sub('stonith:', ''),
672796
-      :new => true
672796
-    }
672796
-  )
672796
-  if code != 200
672796
-    return [400, 'Unable to get meta-data of specified fence agent.']
672796
-  end
672796
-  desc_regex = Regexp.new(
672796
-    '
672796
-      '
672796
-  )
672796
-  parameters_regex = Regexp.new(
672796
-    '<input type="hidden" name="resource_type"[^>]*>(?<required>[\s\S]*)' +
672796
-      '
Optional Arguments:
(?<optional>[\S\s]*)' +
672796
-      '
Advanced Arguments:
(?<advanced>[\S\s]*)' +
672796
-      ''
672796
-  )
672796
-  required_parameter_regex = Regexp.new(
672796
-    ']*>[\s]*' +
672796
-      '\s* (?<name>[^<\s]*)\s*\s*\s*' +
672796
-      '
672796
-  )
672796
-  other_parameter_regex = Regexp.new(
672796
-    '\s* (?<name>[^<\s]*)\s*\s*\s*' +
672796
-      '
672796
-  )
672796
-
672796
-  result = {
672796
-    :name => fence_agent,
672796
-    :shortdesc => '',
672796
-    :longdesc => '',
672796
-    :parameters => []
672796
-  }
672796
-
672796
-  # pcsd in version 0.9.137 (and older) does not provide description for
672796
-  # fence agents
672796
-  desc = desc_regex.match(out)
672796
-  if desc
672796
-    result[:shortdesc] = html2plain(desc[:short])
672796
-    result[:longdesc] = html2plain(desc[:long])
672796
-  end
672796
-
672796
-  parameters = parameters_regex.match(out)
672796
-  parameters[:required].scan(required_parameter_regex) { |match|
672796
-    result[:parameters] << {
672796
-      :name => html2plain(match[1]),
672796
-      :longdesc => html2plain(match[0]),
672796
-      :shortdesc => html2plain(match[2]),
672796
-      :type => 'string',
672796
-      :required => true,
672796
-      :advanced => false
672796
-    }
672796
-  }
672796
-  parameters[:optional].scan(other_parameter_regex) { |match|
672796
-    result[:parameters] << {
672796
-      :name => html2plain(match[0]),
672796
-      :longdesc => '',
672796
-      :shortdesc => html2plain(match[1]),
672796
-      :type => 'string',
672796
-      :required => false,
672796
-      :advanced => false
672796
-    }
672796
-  }
672796
-  parameters[:advanced].scan(other_parameter_regex) { |match|
672796
-    result[:parameters] << {
672796
-      :name => html2plain(match[0]),
672796
-      :longdesc => '',
672796
-      :shortdesc => html2plain(match[1]),
672796
-      :type => 'string',
672796
-      :required => false,
672796
-      :advanced => true
672796
-    }
672796
-  }
672796
-  return [200, JSON.generate(result)]
672796
+  return [code, out]
672796
 end
672796
 
672796
 post '/managec/:cluster/fix_auth_of_cluster' do
672796
@@ -1123,7 +763,6 @@ def pcs_compatibility_layer_known_hosts_add(
672796
   known_hosts = get_known_hosts().select { |name, obj|
672796
     host_list.include?(name)
672796
   }
672796
-  # try the new endpoint provided by pcs-0.10
672796
   known_hosts_request_data = {}
672796
   known_hosts.each { |host_name, host_obj|
672796
     known_hosts_request_data[host_name] = {
672796
@@ -1149,50 +788,14 @@ def pcs_compatibility_layer_known_hosts_add(
672796
     )
672796
   end
672796
 
672796
-  # a remote host supports the endpoint; success
672796
-  if retval == 200
672796
-    return 'success'
672796
-  end
672796
-
672796
-  # a remote host supports the endpoint; error
672796
-  if retval != 404
672796
-    return 'error'
672796
-  end
672796
-
672796
-  # a remote host does not support the endpoint
672796
-  # fallback to the old endpoint provided by pcs-0.9 since 0.9.140
672796
-  request_data = {}
672796
-  known_hosts.each { |host_name, host_obj|
672796
-    addr = host_obj.first_dest()['addr']
672796
-    port = host_obj.first_dest()['port']
672796
-    request_data["node:#{host_name}"] = host_obj.token
672796
-    request_data["port:#{host_name}"] = port
672796
-    request_data["node:#{addr}"] = host_obj.token
672796
-    request_data["port:#{addr}"] = port
672796
-  }
672796
-  if is_cluster_request
672796
-    retval, _out = send_cluster_request_with_token(
672796
-      auth_user, target, '/save_tokens', true, request_data
672796
-    )
672796
-  else
672796
-    retval, _out = send_request_with_token(
672796
-      auth_user, target, '/save_tokens', true, request_data
672796
-    )
672796
-  end
672796
-
672796
-  # a remote host supports the endpoint; success
672796
   if retval == 200
672796
     return 'success'
672796
   end
672796
 
672796
-  # a remote host supports the endpoint; error
672796
-  if retval != 404
672796
-    return 'error'
672796
+  if retval == 404
672796
+    return 'not_supported'
672796
   end
672796
-
672796
-  # a remote host does not support any of the endpoints
672796
-  # there's nothing we can do about it
672796
-  return 'not_supported'
672796
+  return 'error'
672796
 end
672796
 
672796
 def pcs_compatibility_layer_get_cluster_known_hosts(cluster_name, target_node)
672796
@@ -1200,11 +803,9 @@ def pcs_compatibility_layer_get_cluster_known_hosts(cluster_name, target_node)
672796
   known_hosts = []
672796
   auth_user = PCSAuth.getSuperuserAuth()
672796
 
672796
-  # try the new endpoint provided by pcs-0.10
672796
   retval, out = send_request_with_token(
672796
     auth_user, target_node, '/get_cluster_known_hosts'
672796
   )
672796
-  # a remote host supports /get_cluster_known_hosts; data downloaded
672796
   if retval == 200
672796
     begin
672796
       JSON.parse(out).each { |name, data|
672796
@@ -1222,159 +823,21 @@ def pcs_compatibility_layer_get_cluster_known_hosts(cluster_name, target_node)
672796
         "cannot get authentication info from cluster '#{cluster_name}'"
672796
       )
672796
     end
672796
-    return known_hosts, warning_messages
672796
-  end
672796
-
672796
-  # a remote host supports /get_cluster_known_hosts; an error occured
672796
-  if retval != 404
672796
+  elsif retval == 404
672796
     warning_messages << (
672796
       "Unable to automatically authenticate against cluster nodes: " +
672796
-      "cannot get authentication info from cluster '#{cluster_name}'"
672796
+      "cluster '#{cluster_name}' is running an old version of pcs/pcsd"
672796
     )
672796
-    return known_hosts, warning_messages
672796
-  end
672796
-
672796
-  # a remote host does not support /get_cluster_known_hosts
672796
-  # fallback to the old endpoint provided by pcs-0.9 since 0.9.140
672796
-  retval, out = send_request_with_token(
672796
-    auth_user, target_node, '/get_cluster_tokens', false, {'with_ports' => '1'}
672796
-  )
672796
-
672796
-  # a remote host supports /get_cluster_tokens; data downloaded
672796
-  if retval == 200
672796
-    begin
672796
-      data = JSON.parse(out)
672796
-      expected_keys = ['tokens', 'ports']
672796
-      if expected_keys.all? {|i| data.has_key?(i) and data[i].class == Hash}
672796
-        # new format
672796
-        new_tokens = data["tokens"] || {}
672796
-        new_ports = data["ports"] || {}
672796
-      else
672796
-        # old format
672796
-        new_tokens = data
672796
-        new_ports = {}
672796
-      end
672796
-      new_tokens.each { |name_addr, token|
672796
-        known_hosts << PcsKnownHost.new(
672796
-          name_addr,
672796
-          token,
672796
-          [
672796
-            {
672796
-              'addr' => name_addr,
672796
-              'port' => (new_ports[name_addr] || PCSD_DEFAULT_PORT),
672796
-            }
672796
-          ]
672796
-        )
672796
-      }
672796
-    rescue => e
672796
-      $logger.error "Unable to parse the response of /get_cluster_tokens: #{e}"
672796
-      known_hosts = []
672796
-      warning_messages << (
672796
-        "Unable to automatically authenticate against cluster nodes: " +
672796
-        "cannot get authentication info from cluster '#{cluster_name}'"
672796
-      )
672796
-    end
672796
-    return known_hosts, warning_messages
672796
-  end
672796
-
672796
-  # a remote host supports /get_cluster_tokens; an error occured
672796
-  if retval != 404
672796
+  else
672796
     warning_messages << (
672796
       "Unable to automatically authenticate against cluster nodes: " +
672796
       "cannot get authentication info from cluster '#{cluster_name}'"
672796
     )
672796
-    return known_hosts, warning_messages
672796
   end
672796
 
672796
-  # a remote host does not support /get_cluster_tokens
672796
-  # there's nothing we can do about it
672796
-  warning_messages << (
672796
-    "Unable to automatically authenticate against cluster nodes: " +
672796
-    "cluster '#{cluster_name}' is running an old version of pcs/pcsd"
672796
-  )
672796
   return known_hosts, warning_messages
672796
 end
672796
 
672796
-def pcs_0_9_142_resource_change_group(auth_user, params)
672796
-  parameters = {
672796
-    :resource_id => params[:resource_id],
672796
-    :resource_group => '',
672796
-    :_orig_resource_group => '',
672796
-  }
672796
-  parameters[:resource_group] = params[:group_id] if params[:group_id]
672796
-  if params[:old_group_id]
672796
-    parameters[:_orig_resource_group] = params[:old_group_id]
672796
-  end
672796
-  return send_cluster_request_with_token(
672796
-    auth_user, params[:cluster], 'update_resource', true, parameters
672796
-  )
672796
-end
672796
-
672796
-def pcs_0_9_142_resource_clone(auth_user, params)
672796
-  parameters = {
672796
-    :resource_id => params[:resource_id],
672796
-    :resource_clone => true,
672796
-    :_orig_resource_clone => 'false',
672796
-  }
672796
-  return send_cluster_request_with_token(
672796
-    auth_user, params[:cluster], 'update_resource', true, parameters
672796
-  )
672796
-end
672796
-
672796
-def pcs_0_9_142_resource_unclone(auth_user, params)
672796
-  parameters = {
672796
-    :resource_id => params[:resource_id],
672796
-    :resource_clone => nil,
672796
-    :_orig_resource_clone => 'true',
672796
-  }
672796
-  return send_cluster_request_with_token(
672796
-    auth_user, params[:cluster], 'update_resource', true, parameters
672796
-  )
672796
-end
672796
-
672796
-def pcs_0_9_142_resource_master(auth_user, params)
672796
-  parameters = {
672796
-    :resource_id => params[:resource_id],
672796
-    :resource_ms => true,
672796
-    :_orig_resource_ms => 'false',
672796
-  }
672796
-  return send_cluster_request_with_token(
672796
-    auth_user, params[:cluster], 'update_resource', true, parameters
672796
-  )
672796
-end
672796
-
672796
-# There is a bug in pcs-0.9.138 and older in processing the standby and
672796
-# unstandby request. JS of that pcsd always sent nodename in "node"
672796
-# parameter, which caused pcsd daemon to run the standby command locally with
672796
-# param["node"] as node name. This worked fine if the local cluster was
672796
-# managed from JS, as pacemaker simply put the requested node into standby.
672796
-# However it didn't work for managing non-local clusters, as the command was
672796
-# run on the local cluster everytime. Pcsd daemon would send the request to a
672796
-# remote cluster if the param["name"] variable was set, and that never
672796
-# happened. That however wouldn't work either, as then the required parameter
672796
-# "node" wasn't sent in the request causing an exception on the receiving
672796
-# node. This is fixed in commit 053f63ca109d9ef9e7f0416e90aab8e140480f5b
672796
-#
672796
-# In order to be able to put nodes running pcs-0.9.138 into standby, the
672796
-# nodename must be sent in "node" param, and the "name" must not be sent.
672796
-def pcs_0_9_138_node_standby(auth_user, params)
672796
-  translated_params = {
672796
-    'node' => params[:name],
672796
-  }
672796
-  return send_cluster_request_with_token(
672796
-    auth_user, params[:cluster], 'node_standby', true, translated_params
672796
-  )
672796
-end
672796
-
672796
-def pcs_0_9_138_node_unstandby(auth_user, params)
672796
-  translated_params = {
672796
-    'node' => params[:name],
672796
-  }
672796
-  return send_cluster_request_with_token(
672796
-    auth_user, params[:cluster], 'node_unstandby', true, translated_params
672796
-  )
672796
-end
672796
-
672796
 def pcs_0_10_6_get_avail_resource_agents(code, out)
672796
   if code != 200
672796
     return code, out
672796
@@ -1421,99 +884,9 @@ post '/managec/:cluster/?*' do
672796
   if params[:cluster]
672796
     request = "/" + params[:splat].join("/")
672796
 
672796
-    # backward compatibility layer BEGIN
672796
-    translate_for_version = {
672796
-      '/node_standby' => [
672796
-        [[0, 9, 138], method(:pcs_0_9_138_node_standby)],
672796
-      ],
672796
-      '/node_unstandby' => [
672796
-        [[0, 9, 138], method(:pcs_0_9_138_node_unstandby)],
672796
-      ],
672796
-    }
672796
-    if translate_for_version.key?(request)
672796
-      target_pcsd_version = [0, 0, 0]
672796
-      version_code, version_out = send_cluster_request_with_token(
672796
-        auth_user, params[:cluster], 'get_sw_versions'
672796
-      )
672796
-      if version_code == 200
672796
-        begin
672796
-          versions = JSON.parse(version_out)
672796
-          target_pcsd_version = versions['pcs'] if versions['pcs']
672796
-        rescue JSON::ParserError
672796
-        end
672796
-      end
672796
-      translate_function = nil
672796
-      translate_for_version[request].each { |pair|
672796
-        if (target_pcsd_version <=> pair[0]) != 1 # target <= pair
672796
-          translate_function = pair[1]
672796
-          break
672796
-        end
672796
-      }
672796
-    end
672796
-    # backward compatibility layer END
672796
-
672796
-    if translate_function
672796
-      code, out = translate_function.call(auth_user, params)
672796
-    else
672796
-      code, out = send_cluster_request_with_token(
672796
-        auth_user, params[:cluster], request, true, params, true, raw_data
672796
-      )
672796
-    end
672796
-
672796
-    # backward compatibility layer BEGIN
672796
-    if code == 404
672796
-      case request
672796
-        # supported since pcs-0.9.143 (tree view of resources)
672796
-        when '/resource_change_group', 'resource_change_group'
672796
-          code, out =  pcs_0_9_142_resource_change_group(auth_user, params)
672796
-        # supported since pcs-0.9.143 (tree view of resources)
672796
-        when '/resource_clone', 'resource_clone'
672796
-          code, out = pcs_0_9_142_resource_clone(auth_user, params)
672796
-        # supported since pcs-0.9.143 (tree view of resources)
672796
-        when '/resource_unclone', 'resource_unclone'
672796
-          code, out = pcs_0_9_142_resource_unclone(auth_user, params)
672796
-        # supported since pcs-0.9.143 (tree view of resources)
672796
-        when '/resource_master', 'resource_master'
672796
-          # defaults to true for old pcsds without capabilities defined
672796
-          supports_resource_master = true
672796
-          capabilities_code, capabilities_out = send_cluster_request_with_token(
672796
-            auth_user, params[:cluster], 'capabilities'
672796
-          )
672796
-          if capabilities_code == 200
672796
-            begin
672796
-              capabilities_json = JSON.parse(capabilities_out)
672796
-              supports_resource_master = capabilities_json[:pcsd_capabilities].include?(
672796
-                'pcmk.resource.master'
672796
-              )
672796
-            rescue JSON::ParserError
672796
-            end
672796
-          end
672796
-          if supports_resource_master
672796
-            code, out = pcs_0_9_142_resource_master(auth_user, params)
672796
-          end
672796
-        else
672796
-          redirection = {
672796
-            # constraints removal for pcs-0.9.137 and older
672796
-            "/remove_constraint_remote" => "/resource_cmd/rm_constraint",
672796
-            # constraints removal for pcs-0.9.137 and older
672796
-            "/remove_constraint_rule_remote" => "/resource_cmd/rm_constraint_rule"
672796
-          }
672796
-          if redirection.key?(request)
672796
-            code, out = send_cluster_request_with_token(
672796
-              auth_user,
672796
-              params[:cluster],
672796
-              redirection[request],
672796
-              true,
672796
-              params,
672796
-              false,
672796
-              raw_data
672796
-            )
672796
-          end
672796
-      end
672796
-    end
672796
-    # backward compatibility layer END
672796
-
672796
-    return code, out
672796
+    return send_cluster_request_with_token(
672796
+      auth_user, params[:cluster], request, true, params, true, raw_data
672796
+    )
672796
   end
672796
 end
672796
 
672796
@@ -1548,17 +921,3 @@ get '*' do
672796
   redirect "Bad URL"
672796
   call(env.merge("PATH_INFO" => '/nodes'))
672796
 end
672796
-
672796
-def html2plain(text)
672796
-  return CGI.unescapeHTML(text).gsub(/<br[^>]*>/, "\n")
672796
-end
672796
-
672796
-helpers do
672796
-  def h(text)
672796
-    Rack::Utils.escape_html(text)
672796
-  end
672796
-
672796
-  def nl2br(text)
672796
-    text.gsub(/\n/, "
")
672796
-  end
672796
-end
672796
diff --git a/pcsd/remote.rb b/pcsd/remote.rb
672796
index 1c019e98..e36f651f 100644
672796
--- a/pcsd/remote.rb
672796
+++ b/pcsd/remote.rb
672796
@@ -25,14 +25,14 @@ def remote(params, request, auth_user)
672796
   remote_cmd_without_pacemaker = {
672796
       :capabilities => method(:capabilities),
672796
       :status => method(:node_status),
672796
-      :status_all => method(:status_all),
672796
       :cluster_status => method(:cluster_status_remote),
672796
       :cluster_status_plaintext => method(:cluster_status_plaintext),
672796
       :auth => method(:auth),
672796
       :check_auth => method(:check_auth),
672796
+      # lib api:
672796
+      # /api/v1/cluster-setup/v1
672796
       :cluster_setup => method(:cluster_setup),
672796
       :get_quorum_info => method(:get_quorum_info),
672796
-      :get_cib => method(:get_cib),
672796
       :get_corosync_conf => method(:get_corosync_conf_remote),
672796
       :set_corosync_conf => method(:set_corosync_conf),
672796
       :get_sync_capabilities => method(:get_sync_capabilities),
672796
@@ -45,14 +45,6 @@ def remote(params, request, auth_user)
672796
       :cluster_start => method(:cluster_start),
672796
       :cluster_stop => method(:cluster_stop),
672796
       :config_restore => method(:config_restore),
672796
-      # TODO deprecated, remove, not used anymore
672796
-      :node_restart => method(:node_restart),
672796
-      # lib api:
672796
-      # /api/v1/node-standby-unstandby/v1
672796
-      :node_standby => method(:node_standby),
672796
-      # lib api:
672796
-      # /api/v1/node-standby-unstandby/v1
672796
-      :node_unstandby => method(:node_unstandby),
672796
       :cluster_enable => method(:cluster_enable),
672796
       :cluster_disable => method(:cluster_disable),
672796
       :get_sw_versions => method(:get_sw_versions),
672796
@@ -69,12 +61,6 @@ def remote(params, request, auth_user)
672796
       :sbd_enable => method(:sbd_enable),
672796
       :remove_stonith_watchdog_timeout=> method(:remove_stonith_watchdog_timeout),
672796
       :set_stonith_watchdog_timeout_to_zero => method(:set_stonith_watchdog_timeout_to_zero),
672796
-      # lib api:
672796
-      # /api/v1/sbd-enable-sbd/v1
672796
-      :remote_enable_sbd => method(:remote_enable_sbd),
672796
-      # lib api:
672796
-      # /api/v1/sbd-disable-sbd/v1
672796
-      :remote_disable_sbd => method(:remote_disable_sbd),
672796
       :qdevice_net_get_ca_certificate => method(:qdevice_net_get_ca_certificate),
672796
       # lib api:
672796
       # /api/v1/qdevice-qdevice-net-sign-certificate-request/v1
672796
@@ -100,9 +86,6 @@ def remote(params, request, auth_user)
672796
       # lib api:
672796
       # /api/v1/resource-agent-list-agents/v1
672796
       :get_avail_resource_agents => method(:get_avail_resource_agents),
672796
-      # lib api:
672796
-      # /api/v1/stonith-agent-list-agents/v1
672796
-      :get_avail_fence_agents => method(:get_avail_fence_agents),
672796
   }
672796
   remote_cmd_with_pacemaker = {
672796
       :pacemaker_node_status => method(:remote_pacemaker_node_status),
672796
@@ -159,18 +142,6 @@ def remote(params, request, auth_user)
672796
       :get_fence_agent_metadata => method(:get_fence_agent_metadata),
672796
       :manage_resource => method(:manage_resource),
672796
       :unmanage_resource => method(:unmanage_resource),
672796
-      # lib api:
672796
-      # /api/v1/alert-create-alert/v1
672796
-      :create_alert => method(:create_alert),
672796
-      # lib api:
672796
-      # /api/v1/alert-update-alert/v1
672796
-      :update_alert => method(:update_alert),
672796
-      :create_recipient => method(:create_recipient),
672796
-      :update_recipient => method(:update_recipient),
672796
-      # lib api:
672796
-      # /api/v1/alert-remove-alert/v1
672796
-      # /api/v1/alert-remove-recipient/v1
672796
-      :remove_alerts_and_recipients => method("remove_alerts_and_recipients"),
672796
   }
672796
 
672796
   command = params[:command].to_sym
672796
@@ -193,6 +164,24 @@ def remote(params, request, auth_user)
672796
   end
672796
 end
672796
 
672796
+def _get_param_list(params)
672796
+  param_line = []
672796
+  meta_options = []
672796
+  params.each { |param, val|
672796
+    if param.start_with?("_res_paramne_") or (param.start_with?("_res_paramempty_") and val != "")
672796
+      myparam = param.sub(/^_res_paramne_/,"").sub(/^_res_paramempty_/,"")
672796
+      param_line << "#{myparam}=#{val}"
672796
+    end
672796
+    if param == "disabled"
672796
+      meta_options << 'meta' << 'target-role=Stopped'
672796
+    end
672796
+    if param == "force" and val
672796
+      param_line << "--force"
672796
+    end
672796
+  }
672796
+  return param_line + meta_options
672796
+end
672796
+
672796
 def capabilities(params, request, auth_user)
672796
   return JSON.generate({
672796
     :pcsd_capabilities => CAPABILITIES_PCSD,
672796
@@ -394,53 +383,6 @@ def config_restore(params, request, auth_user)
672796
   end
672796
 end
672796
 
672796
-# TODO deprecated, remove, not used anymore
672796
-def node_restart(params, request, auth_user)
672796
-  if params[:name]
672796
-    code, response = send_request_with_token(
672796
-      auth_user, params[:name], 'node_restart', true
672796
-    )
672796
-  else
672796
-    if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
672796
-      return 403, 'Permission denied'
672796
-    end
672796
-    $logger.info "Restarting Node"
672796
-    output =  `/sbin/reboot`
672796
-    $logger.debug output
672796
-    return output
672796
-  end
672796
-end
672796
-
672796
-def node_standby(params, request, auth_user)
672796
-  if params[:name]
672796
-    code, response = send_request_with_token(
672796
-      auth_user, params[:name], 'node_standby', true
672796
-    )
672796
-  else
672796
-    if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
672796
-      return 403, 'Permission denied'
672796
-    end
672796
-    $logger.info "Standby Node"
672796
-    stdout, stderr, retval = run_cmd(auth_user, PCS, "node", "standby")
672796
-    return stdout
672796
-  end
672796
-end
672796
-
672796
-def node_unstandby(params, request, auth_user)
672796
-  if params[:name]
672796
-    code, response = send_request_with_token(
672796
-      auth_user, params[:name], 'node_unstandby', true
672796
-    )
672796
-  else
672796
-    if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
672796
-      return 403, 'Permission denied'
672796
-    end
672796
-    $logger.info "Unstandby Node"
672796
-    stdout, stderr, retval = run_cmd(auth_user, PCS, "node", "unstandby")
672796
-    return stdout
672796
-  end
672796
-end
672796
-
672796
 def cluster_enable(params, request, auth_user)
672796
   if params[:name]
672796
     code, response = send_request_with_token(
672796
@@ -491,21 +433,6 @@ def get_quorum_info(params, request, auth_user)
672796
   end
672796
 end
672796
 
672796
-def get_cib(params, request, auth_user)
672796
-  if not allowed_for_local_cluster(auth_user, Permissions::READ)
672796
-    return 403, 'Permission denied'
672796
-  end
672796
-  cib, stderr, retval = run_cmd(auth_user, CIBADMIN, "-Ql")
672796
-  if retval != 0
672796
-    if not pacemaker_running?
672796
-      return [400, '{"pacemaker_not_running":true}']
672796
-    end
672796
-    return [500, "Unable to get CIB: " + cib.to_s + stderr.to_s]
672796
-  else
672796
-    return [200, cib]
672796
-  end
672796
-end
672796
-
672796
 def get_corosync_conf_remote(params, request, auth_user)
672796
   if not allowed_for_local_cluster(auth_user, Permissions::READ)
672796
     return 403, 'Permission denied'
672796
@@ -912,66 +839,6 @@ def node_status(params, request, auth_user)
672796
   return [400, "Unsupported version '#{version}' of status requested"]
672796
 end
672796
 
672796
-def status_all(params, request, auth_user, nodes=[], dont_update_config=false)
672796
-  if nodes == nil
672796
-    return JSON.generate({"error" => "true"})
672796
-  end
672796
-
672796
-  final_response = {}
672796
-  threads = []
672796
-  forbidden_nodes = {}
672796
-  nodes.each {|node|
672796
-    threads << Thread.new(Thread.current[:pcsd_logger_container]) { |logger|
672796
-      Thread.current[:pcsd_logger_container] = logger
672796
-      code, response = send_request_with_token(auth_user, node, 'status')
672796
-      if 403 == code
672796
-        forbidden_nodes[node] = true
672796
-      end
672796
-      begin
672796
-        final_response[node] = JSON.parse(response)
672796
-      rescue JSON::ParserError => e
672796
-        final_response[node] = {"bad_json" => true}
672796
-        $logger.info("ERROR: Parse Error when parsing status JSON from #{node}")
672796
-      end
672796
-      if final_response[node] and final_response[node]["notoken"] == true
672796
-        $logger.error("ERROR: bad token for #{node}")
672796
-      end
672796
-    }
672796
-  }
672796
-  threads.each { |t| t.join }
672796
-  if forbidden_nodes.length > 0
672796
-    return 403, 'Permission denied'
672796
-  end
672796
-
672796
-  # Get full list of nodes and see if we need to update the configuration
672796
-  node_list = []
672796
-  final_response.each { |fr,n|
672796
-    node_list += n["corosync_offline"] if n["corosync_offline"]
672796
-    node_list += n["corosync_online"] if n["corosync_online"]
672796
-    node_list += n["pacemaker_offline"] if n["pacemaker_offline"]
672796
-    node_list += n["pacemaker_online"] if n["pacemaker_online"]
672796
-  }
672796
-
672796
-  node_list.uniq!
672796
-  if node_list.length > 0
672796
-    config = PCSConfig.new(Cfgsync::PcsdSettings.from_file().text())
672796
-    old_node_list = config.get_nodes(params[:cluster])
672796
-    if !(dont_update_config or config.cluster_nodes_equal?(params[:cluster], node_list))
672796
-      $logger.info("Updating node list for: #{params[:cluster]} #{old_node_list}->#{node_list}")
672796
-      config.update_cluster(params[:cluster], node_list)
672796
-      sync_config = Cfgsync::PcsdSettings.from_text(config.text())
672796
-      # on version conflict just go on, config will be corrected eventually
672796
-      # by displaying the cluster in the web UI
672796
-      Cfgsync::save_sync_new_version(
672796
-        sync_config, get_corosync_nodes_names(), $cluster_name, true
672796
-      )
672796
-      return status_all(params, request, auth_user, node_list, true)
672796
-    end
672796
-  end
672796
-  $logger.debug("NODE LIST: " + node_list.inspect)
672796
-  return JSON.generate(final_response)
672796
-end
672796
-
672796
 def imported_cluster_list(params, request, auth_user)
672796
   config = PCSConfig.new(Cfgsync::PcsdSettings.from_file().text())
672796
   imported_clusters = {"cluster_list" => []}
672796
@@ -981,173 +848,6 @@ def imported_cluster_list(params, request, auth_user)
672796
   return JSON.generate(imported_clusters)
672796
 end
672796
 
672796
-def clusters_overview(params, request, auth_user)
672796
-  cluster_map = {}
672796
-  forbidden_clusters = {}
672796
-  threads = []
672796
-  config = PCSConfig.new(Cfgsync::PcsdSettings.from_file().text())
672796
-  config.clusters.each { |cluster|
672796
-    threads << Thread.new(Thread.current[:pcsd_logger_container]) { |logger|
672796
-      Thread.current[:pcsd_logger_container] = logger
672796
-      cluster_map[cluster.name] = {
672796
-        'cluster_name' => cluster.name,
672796
-        'error_list' => [
672796
-          {'message' => 'Unable to connect to the cluster. Request timeout.'}
672796
-        ],
672796
-        'warning_list' => [],
672796
-        'status' => 'unknown',
672796
-        'node_list' => get_default_overview_node_list(cluster.name),
672796
-        'resource_list' => []
672796
-      }
672796
-      overview_cluster = nil
672796
-      online, offline, not_authorized_nodes = is_auth_against_nodes(
672796
-        auth_user,
672796
-        get_cluster_nodes(cluster.name),
672796
-        3
672796
-      )
672796
-      not_supported = false
672796
-      forbidden = false
672796
-      cluster_nodes_auth = (online + offline).uniq
672796
-      cluster_nodes_all = (cluster_nodes_auth + not_authorized_nodes).uniq
672796
-      nodes_not_in_cluster = []
672796
-      for node in cluster_nodes_auth
672796
-        code, response = send_request_with_token(
672796
-          auth_user, node, 'cluster_status', true, {}, true, nil, 8
672796
-        )
672796
-        if code == 404
672796
-          not_supported = true
672796
-          next
672796
-        end
672796
-        if 403 == code
672796
-          forbidden = true
672796
-          forbidden_clusters[cluster.name] = true
672796
-          break
672796
-        end
672796
-        begin
672796
-          parsed_response = JSON.parse(response)
672796
-          if parsed_response['noresponse'] or parsed_response['pacemaker_not_running']
672796
-            next
672796
-          elsif parsed_response['notoken'] or parsed_response['notauthorized']
672796
-            next
672796
-          elsif parsed_response['cluster_name'] != cluster.name
672796
-            # queried node is not in the cluster (any more)
672796
-            nodes_not_in_cluster << node
672796
-            next
672796
-          else
672796
-            overview_cluster = parsed_response
672796
-            break
672796
-          end
672796
-        rescue JSON::ParserError
672796
-        end
672796
-      end
672796
-
672796
-      if cluster_nodes_all.sort == nodes_not_in_cluster.sort
672796
-        overview_cluster = {
672796
-          'cluster_name' => cluster.name,
672796
-          'error_list' => [],
672796
-          'warning_list' => [],
672796
-          'status' => 'unknown',
672796
-          'node_list' => [],
672796
-          'resource_list' => []
672796
-        }
672796
-      end
672796
-
672796
-      if not overview_cluster
672796
-        overview_cluster = {
672796
-          'cluster_name' => cluster.name,
672796
-          'error_list' => [],
672796
-          'warning_list' => [],
672796
-          'status' => 'unknown',
672796
-          'node_list' => get_default_overview_node_list(cluster.name),
672796
-          'resource_list' => []
672796
-        }
672796
-        if not_supported
672796
-          overview_cluster['warning_list'] = [
672796
-            {
672796
-              'message' => 'Cluster is running an old version of pcs/pcsd which does not provide data for the dashboard.',
672796
-            },
672796
-          ]
672796
-        else
672796
-          if forbidden
672796
-            overview_cluster['error_list'] = [
672796
-              {
672796
-                'message' => 'You do not have permissions to view the cluster.',
672796
-                'type' => 'forbidden',
672796
-              },
672796
-            ]
672796
-            overview_cluster['node_list'] = []
672796
-          else
672796
-            overview_cluster['error_list'] = [
672796
-              {
672796
-                'message' => 'Unable to connect to the cluster.',
672796
-              },
672796
-            ]
672796
-          end
672796
-        end
672796
-      end
672796
-      if not_authorized_nodes.length > 0
672796
-        overview_cluster['warning_list'] << {
672796
-          'message' => 'GUI is not authorized against node(s) '\
672796
-            + not_authorized_nodes.join(', '),
672796
-          'type' => 'nodes_not_authorized',
672796
-          'node_list' => not_authorized_nodes,
672796
-        }
672796
-      end
672796
-
672796
-      overview_cluster['node_list'].each { |node|
672796
-        if node['status_version'] == '1'
672796
-          overview_cluster['warning_list'] << {
672796
-            :message => 'Some nodes are running old version of pcs/pcsd.'
672796
-          }
672796
-          break
672796
-        end
672796
-      }
672796
-
672796
-      cluster_map[cluster.name] = overview_cluster
672796
-    }
672796
-  }
672796
-
672796
-  begin
672796
-    Timeout::timeout(18) {
672796
-      threads.each { |t| t.join }
672796
-    }
672796
-  rescue Timeout::Error
672796
-    threads.each { |t| t.exit }
672796
-  end
672796
-
672796
-  # update clusters in PCSConfig
672796
-  not_current_data = false
672796
-  config = PCSConfig.new(Cfgsync::PcsdSettings.from_file().text())
672796
-  cluster_map.each { |cluster, values|
672796
-    next if forbidden_clusters[cluster]
672796
-    nodes = []
672796
-    values['node_list'].each { |node|
672796
-      nodes << node['name']
672796
-    }
672796
-    if !config.cluster_nodes_equal?(cluster, nodes)
672796
-      $logger.info("Updating node list for: #{cluster} #{config.get_nodes(cluster)}->#{nodes}")
672796
-      config.update_cluster(cluster, nodes)
672796
-      not_current_data = true
672796
-    end
672796
-  }
672796
-  if not_current_data
672796
-    sync_config = Cfgsync::PcsdSettings.from_text(config.text())
672796
-    # on version conflict just go on, config will be corrected eventually
672796
-    # by displaying the cluster in the web UI
672796
-    Cfgsync::save_sync_new_version(
672796
-      sync_config, get_corosync_nodes_names(), $cluster_name, true
672796
-    )
672796
-  end
672796
-
672796
-  overview = {
672796
-    'not_current_data' => not_current_data,
672796
-    'cluster_list' => cluster_map.values.sort { |a, b|
672796
-      a['clustername'] <=> b['clustername']
672796
-    }
672796
-  }
672796
-  return JSON.generate(overview)
672796
-end
672796
-
672796
 def auth(params, request, auth_user)
672796
   # User authentication using username and password is done in python part of
672796
   # pcsd. We will get here only if credentials are correct, so we just need to
672796
@@ -1220,7 +920,7 @@ def update_resource (params, request, auth_user)
672796
     return 403, 'Permission denied'
672796
   end
672796
 
672796
-  param_line = getParamList(params)
672796
+  param_line = _get_param_list(params)
672796
   if not params[:resource_id]
672796
     cmd = [PCS, "resource", "create", params[:name], params[:resource_type]]
672796
     cmd += param_line
672796
@@ -1320,7 +1020,7 @@ def update_fence_device(params, request, auth_user)
672796
 
672796
   $logger.info "Updating fence device"
672796
   $logger.info params
672796
-  param_line = getParamList(params)
672796
+  param_line = _get_param_list(params)
672796
   $logger.info param_line
672796
 
672796
   if not params[:resource_id]
672796
@@ -1353,14 +1053,6 @@ def get_avail_resource_agents(params, request, auth_user)
672796
   return JSON.generate(getResourceAgents(auth_user).map{|a| [a, get_resource_agent_name_structure(a)]}.to_h)
672796
 end
672796
 
672796
-def get_avail_fence_agents(params, request, auth_user)
672796
-  if not allowed_for_local_cluster(auth_user, Permissions::READ)
672796
-    return 403, 'Permission denied'
672796
-  end
672796
-  agents = getFenceAgents(auth_user)
672796
-  return JSON.generate(agents)
672796
-end
672796
-
672796
 def remove_resource(params, request, auth_user)
672796
   if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
672796
     return 403, 'Permission denied'
672796
@@ -1740,18 +1432,6 @@ def update_cluster_settings(params, request, auth_user)
672796
   to_update = []
672796
   current = getAllSettings(auth_user)
672796
 
672796
-  # We need to be able to set cluster properties also from older version GUI.
672796
-  # This code handles proper processing of checkboxes.
672796
-  # === backward compatibility layer start ===
672796
-  params['hidden'].each { |prop, val|
672796
-    next if prop == 'hidden_input'
672796
-    unless properties.include?(prop)
672796
-      properties[prop] = val
672796
-      to_update << prop
672796
-    end
672796
-  }
672796
-  # === backward compatibility layer end ===
672796
-
672796
   properties.each { |prop, val|
672796
     val.strip!
672796
     if not current.include?(prop) and val != '' # add
672796
@@ -2236,62 +1916,6 @@ def set_stonith_watchdog_timeout_to_zero(param, request, auth_user)
672796
   end
672796
 end
672796
 
672796
-def remote_enable_sbd(params, request, auth_user)
672796
-  unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
672796
-    return 403, 'Permission denied'
672796
-  end
672796
-
672796
-  arg_list = []
672796
-
672796
-  if ['true', '1', 'on'].include?(params[:ignore_offline_nodes])
672796
-    arg_list << '--skip-offline'
672796
-  end
672796
-
672796
-  params[:watchdog].each do |node, watchdog|
672796
-    unless watchdog.strip.empty?
672796
-      arg_list << "watchdog=#{watchdog.strip}@#{node}"
672796
-    end
672796
-  end
672796
-
672796
-  params[:config].each do |option, value|
672796
-    unless value.empty?
672796
-      arg_list << "#{option}=#{value}"
672796
-    end
672796
-  end
672796
-
672796
-  _, stderr, retcode = run_cmd(
672796
-    auth_user, PCS, 'stonith', 'sbd', 'enable', *arg_list
672796
-  )
672796
-
672796
-  if retcode != 0
672796
-    return [400, "Unable to enable sbd in cluster:\n#{stderr.join('')}"]
672796
-  end
672796
-
672796
-  return [200, 'Sbd has been enabled.']
672796
-end
672796
-
672796
-def remote_disable_sbd(params, request, auth_user)
672796
-  unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
672796
-    return 403, 'Permission denied'
672796
-  end
672796
-
672796
-  arg_list = []
672796
-
672796
-  if ['true', '1', 'on'].include?(params[:ignore_offline_nodes])
672796
-    arg_list << '--skip-offline'
672796
-  end
672796
-
672796
-  _, stderr, retcode = run_cmd(
672796
-    auth_user, PCS, 'stonith', 'sbd', 'disable', *arg_list
672796
-  )
672796
-
672796
-  if retcode != 0
672796
-    return [400, "Unable to disable sbd in cluster:\n#{stderr.join('')}"]
672796
-  end
672796
-
672796
-  return [200, 'Sbd has been disabled.']
672796
-end
672796
-
672796
 def qdevice_net_get_ca_certificate(params, request, auth_user)
672796
   unless allowed_for_local_cluster(auth_user, Permissions::READ)
672796
     return 403, 'Permission denied'
672796
@@ -2697,145 +2321,6 @@ def manage_services(params, request, auth_user)
672796
   end
672796
 end
672796
 
672796
-def _hash_to_argument_list(hash)
672796
-  result = []
672796
-  if hash.kind_of?(Hash)
672796
-    hash.each {|key, value|
672796
-      value = '' if value.nil?
672796
-      result << "#{key}=#{value}"
672796
-    }
672796
-  end
672796
-  return result
672796
-end
672796
-
672796
-def create_alert(params, request, auth_user)
672796
-  unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
672796
-    return 403, 'Permission denied'
672796
-  end
672796
-  path = params[:path]
672796
-  unless path
672796
-    return [400, 'Missing required parameter: path']
672796
-  end
672796
-  alert_id = params[:alert_id]
672796
-  description = params[:description]
672796
-  meta_attr_list = _hash_to_argument_list(params[:meta_attr])
672796
-  instance_attr_list = _hash_to_argument_list(params[:instance_attr])
672796
-  cmd = [PCS, 'alert', 'create', "path=#{path}"]
672796
-  cmd << "id=#{alert_id}" if alert_id and alert_id != ''
672796
-  cmd << "description=#{description}" if description and description != ''
672796
-  cmd += ['options', *instance_attr_list] if instance_attr_list.any?
672796
-  cmd += ['meta', *meta_attr_list] if meta_attr_list.any?
672796
-  output, stderr, retval = run_cmd(auth_user, *cmd)
672796
-  if retval != 0
672796
-    return [400, "Unable to create alert: #{stderr.join("\n")}"]
672796
-  end
672796
-  return [200, 'Alert created']
672796
-end
672796
-
672796
-def update_alert(params, request, auth_user)
672796
-  unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
672796
-    return 403, 'Permission denied'
672796
-  end
672796
-  alert_id = params[:alert_id]
672796
-  unless alert_id
672796
-    return [400, 'Missing required parameter: alert_id']
672796
-  end
672796
-  path = params[:path]
672796
-  description = params[:description]
672796
-  meta_attr_list = _hash_to_argument_list(params[:meta_attr])
672796
-  instance_attr_list = _hash_to_argument_list(params[:instance_attr])
672796
-  cmd = [PCS, 'alert', 'update', alert_id]
672796
-  cmd << "path=#{path}" if path
672796
-  cmd << "description=#{description}" if description
672796
-  cmd += ['options', *instance_attr_list] if instance_attr_list.any?
672796
-  cmd += ['meta', *meta_attr_list] if meta_attr_list.any?
672796
-  output, stderr, retval = run_cmd(auth_user, *cmd)
672796
-  if retval != 0
672796
-    return [400, "Unable to update alert: #{stderr.join("\n")}"]
672796
-  end
672796
-  return [200, 'Alert updated']
672796
-end
672796
-
672796
-def remove_alerts_and_recipients(params, request, auth_user)
672796
-  unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
672796
-    return 403, 'Permission denied'
672796
-  end
672796
-  alert_list = params[:alert_list]
672796
-  recipient_list = params[:recipient_list]
672796
-  if recipient_list.kind_of?(Array) and recipient_list.any?
672796
-    output, stderr, retval = run_cmd(
672796
-      auth_user, PCS, 'alert', 'recipient', 'remove', *recipient_list
672796
-    )
672796
-    if retval != 0
672796
-      return [400, "Unable to remove recipients: #{stderr.join("\n")}"]
672796
-    end
672796
-  end
672796
-  if alert_list.kind_of?(Array) and alert_list.any?
672796
-    output, stderr, retval = run_cmd(
672796
-      auth_user, PCS, 'alert', 'remove', *alert_list
672796
-    )
672796
-    if retval != 0
672796
-      return [400, "Unable to remove alerts: #{stderr.join("\n")}"]
672796
-    end
672796
-  end
672796
-  return [200, 'All removed']
672796
-end
672796
-
672796
-def create_recipient(params, request, auth_user)
672796
-  unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
672796
-    return 403, 'Permission denied'
672796
-  end
672796
-  alert_id = params[:alert_id]
672796
-  if not alert_id or alert_id.strip! == ''
672796
-    return [400, 'Missing required paramter: alert_id']
672796
-  end
672796
-  value = params[:value]
672796
-  if not value or value == ''
672796
-    return [400, 'Missing required paramter: value']
672796
-  end
672796
-  recipient_id = params[:recipient_id]
672796
-  description = params[:description]
672796
-  meta_attr_list = _hash_to_argument_list(params[:meta_attr])
672796
-  instance_attr_list = _hash_to_argument_list(params[:instance_attr])
672796
-  cmd = [PCS, 'alert', 'recipient', 'add', alert_id, "value=#{value}"]
672796
-  cmd << "id=#{recipient_id}" if recipient_id and recipient_id != ''
672796
-  cmd << "description=#{description}" if description and description != ''
672796
-  cmd += ['options', *instance_attr_list] if instance_attr_list.any?
672796
-  cmd += ['meta', *meta_attr_list] if meta_attr_list.any?
672796
-  output, stderr, retval = run_cmd(auth_user, *cmd)
672796
-  if retval != 0
672796
-    return [400, "Unable to create recipient: #{stderr.join("\n")}"]
672796
-  end
672796
-  return [200, 'Recipient created']
672796
-end
672796
-
672796
-def update_recipient(params, request, auth_user)
672796
-  unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
672796
-    return 403, 'Permission denied'
672796
-  end
672796
-  recipient_id = params[:recipient_id]
672796
-  if not recipient_id or recipient_id.strip! == ''
672796
-    return [400, 'Missing required paramter: recipient_id']
672796
-  end
672796
-  value = params[:value]
672796
-  if value and value.strip! == ''
672796
-    return [400, 'Parameter value canot be empty string']
672796
-  end
672796
-  description = params[:description]
672796
-  meta_attr_list = _hash_to_argument_list(params[:meta_attr])
672796
-  instance_attr_list = _hash_to_argument_list(params[:instance_attr])
672796
-  cmd = [PCS, 'alert', 'recipient', 'update', recipient_id]
672796
-  cmd << "value=#{value}" if value
672796
-  cmd << "description=#{description}" if description
672796
-  cmd += ['options', *instance_attr_list] if instance_attr_list.any?
672796
-  cmd += ['meta', *meta_attr_list] if meta_attr_list.any?
672796
-  output, stderr, retval = run_cmd(auth_user, *cmd)
672796
-  if retval != 0
672796
-    return [400, "Unable to update recipient: #{stderr.join("\n")}"]
672796
-  end
672796
-  return [200, 'Recipient updated']
672796
-end
672796
-
672796
 def pcsd_success(msg)
672796
   $logger.info(msg)
672796
   return [200, msg]
672796
diff --git a/pcsd/resource.rb b/pcsd/resource.rb
672796
index e49422f8..27894cc9 100644
672796
--- a/pcsd/resource.rb
672796
+++ b/pcsd/resource.rb
672796
@@ -103,11 +103,8 @@ def get_resource_agent_name_structure(agent_name)
672796
     match = expression.match(agent_name)
672796
     if match
672796
       provider = match.names.include?('provider') ? match[:provider] : nil
672796
-      class_provider = provider.nil? ? match[:standard] : "#{match[:standard]}:#{provider}"
672796
       return {
672796
         :full_name => agent_name,
672796
-        # TODO remove, this is only used by the old web UI
672796
-        :class_provider => class_provider,
672796
         :class => match[:standard],
672796
         :provider => provider,
672796
         :type => match[:type],
672796
diff --git a/pcsd/rserver.rb b/pcsd/rserver.rb
672796
index c37f9df4..e2c5e2a1 100644
672796
--- a/pcsd/rserver.rb
672796
+++ b/pcsd/rserver.rb
672796
@@ -26,7 +26,6 @@ class TornadoCommunicationMiddleware
672796
           session = JSON.parse(Base64.strict_decode64(env["HTTP_X_PCSD_PAYLOAD"]))
672796
           Thread.current[:tornado_username] = session["username"]
672796
           Thread.current[:tornado_groups] = session["groups"]
672796
-          Thread.current[:tornado_is_authenticated] = session["is_authenticated"]
672796
         end
672796
 
672796
         status, headers, body = @app.call(env)
672796
diff --git a/pcsd/test/test_resource.rb b/pcsd/test/test_resource.rb
672796
index 1eb0d3aa..97679eca 100644
672796
--- a/pcsd/test/test_resource.rb
672796
+++ b/pcsd/test/test_resource.rb
672796
@@ -8,7 +8,6 @@ class GetResourceAgentNameStructure < Test::Unit::TestCase
672796
       get_resource_agent_name_structure('standard:provider:type'),
672796
       {
672796
         :full_name => 'standard:provider:type',
672796
-        :class_provider => 'standard:provider',
672796
         :class => 'standard',
672796
         :provider => 'provider',
672796
         :type => 'type',
672796
@@ -21,7 +20,6 @@ class GetResourceAgentNameStructure < Test::Unit::TestCase
672796
       get_resource_agent_name_structure('standard:type'),
672796
       {
672796
         :full_name => 'standard:type',
672796
-        :class_provider => 'standard',
672796
         :class => 'standard',
672796
         :provider => nil,
672796
         :type => 'type',
672796
@@ -34,7 +32,6 @@ class GetResourceAgentNameStructure < Test::Unit::TestCase
672796
       get_resource_agent_name_structure('systemd:service@instance:name'),
672796
       {
672796
         :full_name => 'systemd:service@instance:name',
672796
-        :class_provider => 'systemd',
672796
         :class => 'systemd',
672796
         :provider => nil,
672796
         :type => 'service@instance:name',
672796
@@ -47,7 +44,6 @@ class GetResourceAgentNameStructure < Test::Unit::TestCase
672796
       get_resource_agent_name_structure('service:service@instance:name'),
672796
       {
672796
         :full_name => 'service:service@instance:name',
672796
-        :class_provider => 'service',
672796
         :class => 'service',
672796
         :provider => nil,
672796
         :type => 'service@instance:name',
672796
-- 
672796
2.31.1
672796