From 672796f7d2cece0c5fc1554650fa2950315a6b63 Mon Sep 17 00:00:00 2001 From: CentOS Sources Date: Nov 04 2021 09:25:20 +0000 Subject: import pcs-0.11.1-3.el9_b --- diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..71afafe --- /dev/null +++ b/.gitignore @@ -0,0 +1,22 @@ +SOURCES/backports-3.17.2.gem +SOURCES/dacite-1.6.0.tar.gz +SOURCES/daemons-1.3.1.gem +SOURCES/ethon-0.12.0.gem +SOURCES/eventmachine-1.2.7.gem +SOURCES/ffi-1.13.1.gem +SOURCES/mustermann-1.1.1.gem +SOURCES/open4-1.3.4-1.gem +SOURCES/pcs-0.11.1.alpha.1.tar.gz +SOURCES/pcs-web-ui-0.1.9.tar.gz +SOURCES/pcs-web-ui-node-modules-0.1.9.tar.xz +SOURCES/pyagentx-0.4.pcs.2.tar.gz +SOURCES/rack-2.2.3.gem +SOURCES/rack-protection-2.0.8.1.gem +SOURCES/rack-test-1.1.0.gem +SOURCES/rexml-3.2.5.gem +SOURCES/ruby2_keywords-0.0.2.gem +SOURCES/sinatra-2.0.8.1.gem +SOURCES/thin-1.7.2.gem +SOURCES/tilt-2.0.10.gem +SOURCES/tornado-6.1.0.tar.gz +SOURCES/webrick-1.7.0.gem diff --git a/.pcs.metadata b/.pcs.metadata new file mode 100644 index 0000000..fe99079 --- /dev/null +++ b/.pcs.metadata @@ -0,0 +1,22 @@ +28b63a742124da6c9575a1c5e7d7331ef93600b2 SOURCES/backports-3.17.2.gem +31546c37fbdc6270d5097687619e9c0db6f1c05c SOURCES/dacite-1.6.0.tar.gz +e28c1e78d1a6e34e80f4933b494f1e0501939dd3 SOURCES/daemons-1.3.1.gem +921ef1be44583a7644ee7f20fe5f26f21d018a04 SOURCES/ethon-0.12.0.gem +7a5b2896e210fac9759c786ee4510f265f75b481 SOURCES/eventmachine-1.2.7.gem +cfa25e7a3760c3ec16723cb8263d9b7a52d0eadf SOURCES/ffi-1.13.1.gem +50a4e37904485810cb05e27d75c9783e5a8f3402 SOURCES/mustermann-1.1.1.gem +41a7fe9f8e3e02da5ae76c821b89c5b376a97746 SOURCES/open4-1.3.4-1.gem +ce3598a2d60895cf66487dc0b6715acfc284c769 SOURCES/pcs-0.11.1.alpha.1.tar.gz +c7effa066c968a3e5f01cfbfe8cedeb22664cef5 SOURCES/pcs-web-ui-0.1.9.tar.gz +81b6170592cdea9272699d7ec48f3624d2f36269 SOURCES/pcs-web-ui-node-modules-0.1.9.tar.xz +3176b2f2b332c2b6bf79fe882e83feecf3d3f011 SOURCES/pyagentx-0.4.pcs.2.tar.gz +345b7169d4d2d62176a225510399963bad62b68f SOURCES/rack-2.2.3.gem +1f046e23baca8beece3b38c60382f44aa2b2cb41 SOURCES/rack-protection-2.0.8.1.gem +b80bc5ca38a885e747271675ba91dd3d02136bf1 SOURCES/rack-test-1.1.0.gem +e7f48fa5fb2d92e6cb21d6b1638fe41a5a7c4287 SOURCES/rexml-3.2.5.gem +0be571aacb5d6a212a30af3f322a7000d8af1ef9 SOURCES/ruby2_keywords-0.0.2.gem +04cca7a5d9d641fe076e4e24dc5b6ff31922f4c3 SOURCES/sinatra-2.0.8.1.gem +41395e86322ffd31f3a7aef1f697bda3e1e2d6b9 SOURCES/thin-1.7.2.gem +d265c822a6b228392d899e9eb5114613d65e6967 SOURCES/tilt-2.0.10.gem +c23c617c7a0205e465bebad5b8cdf289ae8402a2 SOURCES/tornado-6.1.0.tar.gz +10ba51035928541b7713415f1f2e3a41114972fc SOURCES/webrick-1.7.0.gem diff --git a/SOURCES/do-not-support-cluster-setup-with-udp-u-transport.patch b/SOURCES/do-not-support-cluster-setup-with-udp-u-transport.patch new file mode 100644 index 0000000..c486138 --- /dev/null +++ b/SOURCES/do-not-support-cluster-setup-with-udp-u-transport.patch @@ -0,0 +1,38 @@ +From e5413e1afa3114673867bc6b3037434bb4109ce9 Mon Sep 17 00:00:00 2001 +From: Ivan Devat +Date: Tue, 20 Nov 2018 15:03:56 +0100 +Subject: [PATCH] do not support cluster setup with udp(u) transport in RHEL9 + +--- + pcs/pcs.8.in | 2 ++ + pcs/usage.py | 1 + + 2 files changed, 3 insertions(+) + +diff --git a/pcs/pcs.8.in b/pcs/pcs.8.in +index 77201a90..b311bee3 100644 +--- a/pcs/pcs.8.in ++++ b/pcs/pcs.8.in +@@ -451,6 +451,8 @@ By default, encryption is enabled with cipher=aes256 and hash=sha256. To disable + + Transports udp and udpu: + .br ++WARNING: These transports are not supported in RHEL 9. ++.br + These transports are limited to one address per node. They do not support traffic encryption nor compression. + .br + Transport options are: ip_version, netmtu +diff --git a/pcs/usage.py b/pcs/usage.py +index ccab8f93..055556c8 100644 +--- a/pcs/usage.py ++++ b/pcs/usage.py +@@ -903,6 +903,7 @@ Commands: + hash=sha256. To disable encryption, set cipher=none and hash=none. + + Transports udp and udpu: ++ WARNING: These transports are not supported in RHEL 9. + These transports are limited to one address per node. They do not + support traffic encryption nor compression. + Transport options are: +-- +2.31.1 + diff --git a/SOURCES/fix-changelog.patch b/SOURCES/fix-changelog.patch new file mode 100644 index 0000000..2645101 --- /dev/null +++ b/SOURCES/fix-changelog.patch @@ -0,0 +1,24 @@ +From 2bf9e3cbcd27405bcea019de6026d6d8400ac1a3 Mon Sep 17 00:00:00 2001 +From: Miroslav Lisik +Date: Thu, 26 Aug 2021 16:46:05 +0200 +Subject: [PATCH 2/2] fix changelog + +--- + CHANGELOG.md | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/CHANGELOG.md b/CHANGELOG.md +index 0a049fc2..8c31cbb3 100644 +--- a/CHANGELOG.md ++++ b/CHANGELOG.md +@@ -1,6 +1,6 @@ + # Change Log + +-## [0.11.1.alpha.1] - 2021-08-26 ++## [Unreleased] + + ### Added + - Explicit confirmation is now required to prevent accidental destroying +-- +2.31.1 + diff --git a/SOURCES/fix-version.patch b/SOURCES/fix-version.patch new file mode 100644 index 0000000..c45474e --- /dev/null +++ b/SOURCES/fix-version.patch @@ -0,0 +1,19 @@ +From aef3d9f6f4e8c0119497f1ff29439c1e96cb6c04 Mon Sep 17 00:00:00 2001 +From: Miroslav Lisik +Date: Thu, 26 Aug 2021 16:36:19 +0200 +Subject: [PATCH] fix version + +--- + .gitarchivever | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/.gitarchivever b/.gitarchivever +index d9441fc..3c5bc79 100644 +--- a/.gitarchivever ++++ b/.gitarchivever +@@ -1 +1 @@ +-ref names: (HEAD -> master, tag: v0.11.1.alpha.1) ++ref names: (HEAD -> master, tag: v0.11.1) +-- +2.31.1 + diff --git a/SOURCES/update.patch b/SOURCES/update.patch new file mode 100644 index 0000000..6064290 --- /dev/null +++ b/SOURCES/update.patch @@ -0,0 +1,3261 @@ +From db91fd68f1baa7b19f06dc8156822430decce4e7 Mon Sep 17 00:00:00 2001 +From: Miroslav Lisik +Date: Thu, 2 Sep 2021 10:29:59 +0200 +Subject: [PATCH 1/2] update + +--- + Makefile.am | 9 +- + configure.ac | 5 + + pcs/config.py | 13 +- + pcs/lib/communication/corosync.py | 8 +- + pcs/utils.py | 4 +- + pcs_test/suite.py | 70 ++ + .../cluster/test_add_nodes_validation.py | 18 +- + .../test_stonith_update_scsi_devices.py | 11 +- + pcs_test/tier0/lib/test_env_corosync.py | 618 ++++++++-------- + pcs_test/tier1/legacy/test_constraints.py | 76 +- + pcs_test/tier1/legacy/test_resource.py | 48 +- + pcs_test/tier1/legacy/test_stonith.py | 71 +- + .../tools/command_env/config_http_corosync.py | 23 +- + pcs_test/tools/fixture_cib.py | 65 ++ + pcsd/Makefile.am | 1 - + pcsd/capabilities.xml | 7 - + pcsd/fenceagent.rb | 59 -- + pcsd/pcs.rb | 15 - + pcsd/pcsd.rb | 671 +----------------- + pcsd/remote.rb | 559 +-------------- + pcsd/resource.rb | 3 - + pcsd/rserver.rb | 1 - + pcsd/test/test_resource.rb | 4 - + 23 files changed, 634 insertions(+), 1725 deletions(-) + delete mode 100644 pcsd/fenceagent.rb + +diff --git a/Makefile.am b/Makefile.am +index 6aede970..34692969 100644 +--- a/Makefile.am ++++ b/Makefile.am +@@ -188,8 +188,13 @@ endif + + pylint: + if DEV_TESTS ++if PARALLEL_PYLINT ++pylint_options = --jobs=0 ++else ++pylint_options = ++endif + export PYTHONPATH=${abs_top_builddir}/${PCS_BUNDLED_DIR_LOCAL}/packages && \ +- $(TIME) $(PYTHON) -m pylint --rcfile pylintrc --persistent=n --reports=n --score=n --disable similarities ${PCS_PYTHON_PACKAGES} ++ $(TIME) $(PYTHON) -m pylint --rcfile pylintrc --persistent=n --reports=n --score=n --disable similarities ${pylint_options} ${PCS_PYTHON_PACKAGES} + endif + + +@@ -213,7 +218,7 @@ endif + + tests_tier0: + export PYTHONPATH=${abs_top_builddir}/${PCS_BUNDLED_DIR_LOCAL}/packages && \ +- $(PYTHON) ${abs_builddir}/pcs_test/suite.py $(python_test_options) --tier0 ++ $(PYTHON) ${abs_builddir}/pcs_test/suite.py ${python_test_options} --tier0 + + tests_tier1: + if EXECUTE_TIER1_TESTS +diff --git a/configure.ac b/configure.ac +index f7b9d1ad..75d65616 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -148,6 +148,11 @@ AC_ARG_ENABLE([parallel-tests], + [parallel_tests="yes"]) + AM_CONDITIONAL([PARALLEL_TESTS], [test "x$parallel_tests" = "xyes"]) + ++AC_ARG_ENABLE([parallel-pylint], ++ [AS_HELP_STRING([--enable-parallel-pylint], [Enable running pylint in multiple threads (default: no)])], ++ [parallel_pylint="yes"]) ++AM_CONDITIONAL([PARALLEL_PYLINT], [test "x$parallel_pylint" = "xyes"]) ++ + AC_ARG_ENABLE([local-build], + [AS_HELP_STRING([--enable-local-build], [Download and install all dependencies as user / bundles])], + [local_build="yes"]) +diff --git a/pcs/config.py b/pcs/config.py +index a0290499..a3e7e164 100644 +--- a/pcs/config.py ++++ b/pcs/config.py +@@ -345,12 +345,13 @@ def config_restore_remote(infile_name, infile_obj): + err_msgs.append(output) + continue + _status = json.loads(output) +- if ( +- _status["corosync"] +- or _status["pacemaker"] +- or +- # not supported by older pcsd, do not fail if not present +- _status.get("pacemaker_remote", False) ++ if any( ++ _status["node"]["services"][service_name]["running"] ++ for service_name in ( ++ "corosync", ++ "pacemaker", ++ "pacemaker_remote", ++ ) + ): + err_msgs.append( + "Cluster is currently running on node %s. You need to stop " +diff --git a/pcs/lib/communication/corosync.py b/pcs/lib/communication/corosync.py +index fab8e38f..e2a2949c 100644 +--- a/pcs/lib/communication/corosync.py ++++ b/pcs/lib/communication/corosync.py +@@ -28,7 +28,7 @@ class CheckCorosyncOffline( + self._set_skip_offline(skip_offline_targets) + + def _get_request_data(self): +- return RequestData("remote/status") ++ return RequestData("remote/status", [("version", "2")]) + + def _process_response(self, response): + report_item = self._get_response_report(response) +@@ -53,7 +53,7 @@ class CheckCorosyncOffline( + return + try: + status = response.data +- if not json.loads(status)["corosync"]: ++ if not json.loads(status)["node"]["corosync"]: + report_item = ReportItem.info( + reports.messages.CorosyncNotRunningOnNode(node_label), + ) +@@ -94,7 +94,7 @@ class GetCorosyncOnlineTargets( + self._corosync_online_target_list = [] + + def _get_request_data(self): +- return RequestData("remote/status") ++ return RequestData("remote/status", [("version", "2")]) + + def _process_response(self, response): + report_item = self._get_response_report(response) +@@ -103,7 +103,7 @@ class GetCorosyncOnlineTargets( + return + try: + status = response.data +- if json.loads(status)["corosync"]: ++ if json.loads(status)["node"]["corosync"]: + self._corosync_online_target_list.append( + response.request.target + ) +diff --git a/pcs/utils.py b/pcs/utils.py +index ef778b52..7774016e 100644 +--- a/pcs/utils.py ++++ b/pcs/utils.py +@@ -186,7 +186,9 @@ def checkStatus(node): + Commandline options: + * --request-timeout - timeout for HTTP requests + """ +- return sendHTTPRequest(node, "remote/status", None, False, False) ++ return sendHTTPRequest( ++ node, "remote/status", urlencode({"version": "2"}), False, False ++ ) + + + # Check and see if we're authorized (faster than a status check) +diff --git a/pcs_test/suite.py b/pcs_test/suite.py +index 75ab66cd..bd98b8b0 100644 +--- a/pcs_test/suite.py ++++ b/pcs_test/suite.py +@@ -1,6 +1,8 @@ + import importlib + import os + import sys ++from threading import Thread ++import time + import unittest + + try: +@@ -84,6 +86,67 @@ def discover_tests( + return unittest.TestLoader().loadTestsFromNames(explicitly_enumerated_tests) + + ++def tier1_fixtures_needed(test_list): ++ for test_name in tests_from_suite(test_list): ++ if test_name.startswith("pcs_test.tier1.legacy."): ++ return True ++ return False ++ ++ ++def run_tier1_fixtures(run_concurrently=True): ++ # pylint: disable=import-outside-toplevel ++ from pcs_test.tier1.legacy.test_constraints import ( ++ CONSTRAINT_TEST_CIB_FIXTURE, ++ ) ++ from pcs_test.tier1.legacy.test_resource import RESOURCE_TEST_CIB_FIXTURE ++ from pcs_test.tier1.legacy.test_stonith import ( ++ STONITH_LEVEL_TEST_CIB_FIXTURE, ++ ) ++ ++ fixture_instances = [ ++ CONSTRAINT_TEST_CIB_FIXTURE, ++ RESOURCE_TEST_CIB_FIXTURE, ++ STONITH_LEVEL_TEST_CIB_FIXTURE, ++ ] ++ print("Preparing tier1 fixtures...") ++ time_start = time.time() ++ if run_concurrently: ++ thread_list = [] ++ for instance in fixture_instances: ++ thread = Thread(target=instance.set_up) ++ thread.daemon = True ++ thread.start() ++ thread_list.append(thread) ++ timeout_counter = 30 # 30 * 10s = 5min ++ while thread_list: ++ if timeout_counter < 0: ++ raise AssertionError("Fixture threads seem to be stuck :(") ++ for thread in thread_list: ++ thread.join(timeout=10) ++ sys.stdout.write(".") ++ sys.stdout.flush() ++ timeout_counter -= 1 ++ if not thread.is_alive(): ++ thread_list.remove(thread) ++ continue ++ ++ else: ++ for instance in fixture_instances: ++ instance.set_up() ++ time_stop = time.time() ++ time_taken = time_stop - time_start ++ sys.stdout.write("Tier1 fixtures prepared in %.3fs\n" % (time_taken)) ++ sys.stdout.flush() ++ ++ def cleanup(): ++ print("Cleaning tier1 fixtures...", end=" ") ++ for instance in fixture_instances: ++ instance.clean_up() ++ print("done") ++ ++ return cleanup ++ ++ + def main(): + # pylint: disable=import-outside-toplevel + if "BUNDLED_LIB_LOCATION" in os.environ: +@@ -141,6 +204,11 @@ def main(): + sys.exit() + + tests_to_run = discovered_tests ++ tier1_fixtures_cleanup = None ++ if tier1_fixtures_needed(tests_to_run): ++ tier1_fixtures_cleanup = run_tier1_fixtures( ++ run_concurrently=run_concurrently ++ ) + if run_concurrently: + tests_to_run = ConcurrentTestSuite( + discovered_tests, +@@ -174,6 +242,8 @@ def main(): + verbosity=2 if "-v" in sys.argv else 1, resultclass=ResultClass + ) + test_result = test_runner.run(tests_to_run) ++ if tier1_fixtures_cleanup: ++ tier1_fixtures_cleanup() + if not test_result.wasSuccessful(): + sys.exit(1) + +diff --git a/pcs_test/tier0/lib/commands/cluster/test_add_nodes_validation.py b/pcs_test/tier0/lib/commands/cluster/test_add_nodes_validation.py +index c66a5dff..69cdeed2 100644 +--- a/pcs_test/tier0/lib/commands/cluster/test_add_nodes_validation.py ++++ b/pcs_test/tier0/lib/commands/cluster/test_add_nodes_validation.py +@@ -14,6 +14,9 @@ from pcs_test.tier0.lib.commands.cluster.test_add_nodes import ( + ) + from pcs_test.tools import fixture + from pcs_test.tools.command_env import get_env_tools ++from pcs_test.tools.command_env.config_http_corosync import ( ++ corosync_running_check_response, ++) + from pcs_test.tools.custom_mock import patch_getaddrinfo + + from pcs import settings +@@ -1170,7 +1173,10 @@ class ClusterStatus(TestCase): + .local.read_sbd_config(name_sufix="_2") + .http.corosync.check_corosync_offline( + communication_list=[ +- {"label": "node1", "output": '{"corosync":true}'}, ++ { ++ "label": "node1", ++ "output": corosync_running_check_response(True), ++ }, + {"label": "node2", "output": "an error"}, + { + "label": "node3", +@@ -1178,8 +1184,14 @@ class ClusterStatus(TestCase): + "errno": 7, + "error_msg": "an error", + }, +- {"label": "node4", "output": '{"corosync":true}'}, +- {"label": "node5", "output": '{"corosync":false}'}, ++ { ++ "label": "node4", ++ "output": corosync_running_check_response(True), ++ }, ++ { ++ "label": "node5", ++ "output": corosync_running_check_response(False), ++ }, + ] + ) + .local.get_host_info(new_nodes) +diff --git a/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py b/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py +index 3bc51325..593757d8 100644 +--- a/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py ++++ b/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py +@@ -4,6 +4,9 @@ from unittest import mock, TestCase + + from pcs_test.tools import fixture + from pcs_test.tools.command_env import get_env_tools ++from pcs_test.tools.command_env.config_http_corosync import ( ++ corosync_running_check_response, ++) + from pcs_test.tools.misc import get_test_resource as rc + + from pcs import settings +@@ -1013,7 +1016,7 @@ class TestUpdateScsiDevicesFailures(TestCase): + communication_list=[ + dict( + label=self.existing_nodes[0], +- output='{"corosync":true}', ++ output=corosync_running_check_response(True), + ), + ] + + [ +@@ -1052,11 +1055,11 @@ class TestUpdateScsiDevicesFailures(TestCase): + communication_list=[ + dict( + label=self.existing_nodes[0], +- output='{"corosync":true}', ++ output=corosync_running_check_response(True), + ), + dict( + label=self.existing_nodes[1], +- output='{"corosync":false}', ++ output=corosync_running_check_response(False), + ), + dict( + label=self.existing_nodes[2], +@@ -1122,7 +1125,7 @@ class TestUpdateScsiDevicesFailures(TestCase): + ), + dict( + label=self.existing_nodes[2], +- output='{"corosync":false}', ++ output=corosync_running_check_response(False), + ), + ] + ) +diff --git a/pcs_test/tier0/lib/test_env_corosync.py b/pcs_test/tier0/lib/test_env_corosync.py +index dafc63a0..7063ee80 100644 +--- a/pcs_test/tier0/lib/test_env_corosync.py ++++ b/pcs_test/tier0/lib/test_env_corosync.py +@@ -14,6 +14,9 @@ from pcs.lib.corosync.config_parser import ( + from pcs_test.tools import fixture + from pcs_test.tools.assertions import assert_raise_library_error + from pcs_test.tools.command_env import get_env_tools ++from pcs_test.tools.command_env.config_http_corosync import ( ++ corosync_running_check_response, ++) + + + class PushCorosyncConfLiveBase(TestCase): +@@ -92,12 +95,11 @@ class PushCorosyncConfLiveNoQdeviceTest(PushCorosyncConfLiveBase): + ) + + def test_dont_need_stopped_cluster(self): +- ( +- self.config.http.corosync.set_corosync_conf( +- self.corosync_conf_text, node_labels=self.node_labels +- ).http.corosync.reload_corosync_conf( +- node_labels=self.node_labels[:1] +- ) ++ self.config.http.corosync.set_corosync_conf( ++ self.corosync_conf_text, node_labels=self.node_labels ++ ) ++ self.config.http.corosync.reload_corosync_conf( ++ node_labels=self.node_labels[:1] + ) + self.env_assistant.get_env().push_corosync_conf( + self.corosync_conf_facade +@@ -114,26 +116,19 @@ class PushCorosyncConfLiveNoQdeviceTest(PushCorosyncConfLiveBase): + node="node-2", + ), + fixture.info( +- report_codes.COROSYNC_CONFIG_RELOADED, node="node-1" ++ report_codes.COROSYNC_CONFIG_RELOADED, ++ node="node-1", + ), + ] + ) + + def test_dont_need_stopped_cluster_error(self): +- ( +- self.config.http.corosync.set_corosync_conf( +- self.corosync_conf_text, +- communication_list=[ +- { +- "label": "node-1", +- }, +- { +- "label": "node-2", +- "response_code": 400, +- "output": "Failed", +- }, +- ], +- ) ++ self.config.http.corosync.set_corosync_conf( ++ self.corosync_conf_text, ++ communication_list=[ ++ {"label": "node-1"}, ++ {"label": "node-2", "response_code": 400, "output": "Failed"}, ++ ], + ) + env = self.env_assistant.get_env() + self.env_assistant.assert_raise_library_error( +@@ -162,35 +157,28 @@ class PushCorosyncConfLiveNoQdeviceTest(PushCorosyncConfLiveBase): + ) + + def test_dont_need_stopped_cluster_error_skip_offline(self): +- ( +- self.config.http.corosync.set_corosync_conf( +- self.corosync_conf_text, +- communication_list=[ ++ self.config.http.corosync.set_corosync_conf( ++ self.corosync_conf_text, ++ communication_list=[ ++ { ++ "label": "node-1", ++ "response_code": 400, ++ "output": "Failed", ++ }, ++ {"label": "node-2"}, ++ ], ++ ) ++ self.config.http.corosync.reload_corosync_conf( ++ communication_list=[ ++ [ + { +- "label": "node-1", ++ "label": self.node_labels[0], + "response_code": 400, + "output": "Failed", + }, +- { +- "label": "node-2", +- }, + ], +- ).http.corosync.reload_corosync_conf( +- communication_list=[ +- [ +- { +- "label": self.node_labels[0], +- "response_code": 400, +- "output": "Failed", +- }, +- ], +- [ +- { +- "label": self.node_labels[1], +- }, +- ], +- ] +- ) ++ [{"label": self.node_labels[1]}], ++ ] + ) + self.env_assistant.get_env().push_corosync_conf( + self.corosync_conf_facade, skip_offline_nodes=True +@@ -219,33 +207,29 @@ class PushCorosyncConfLiveNoQdeviceTest(PushCorosyncConfLiveBase): + reason="Failed", + ), + fixture.info( +- report_codes.COROSYNC_CONFIG_RELOADED, node="node-2" ++ report_codes.COROSYNC_CONFIG_RELOADED, ++ node="node-2", + ), + ] + ) + + def test_reload_on_another_node(self): +- ( +- self.config.http.corosync.set_corosync_conf( +- self.corosync_conf_text, node_labels=self.node_labels +- ).http.corosync.reload_corosync_conf( +- communication_list=[ +- [ +- { +- "label": self.node_labels[0], +- "response_code": 200, +- "output": json.dumps( +- dict(code="not_running", message="not running") +- ), +- }, +- ], +- [ +- { +- "label": self.node_labels[1], +- }, +- ], +- ] +- ) ++ self.config.http.corosync.set_corosync_conf( ++ self.corosync_conf_text, node_labels=self.node_labels ++ ) ++ self.config.http.corosync.reload_corosync_conf( ++ communication_list=[ ++ [ ++ { ++ "label": self.node_labels[0], ++ "response_code": 200, ++ "output": json.dumps( ++ dict(code="not_running", message="not running") ++ ), ++ }, ++ ], ++ [{"label": self.node_labels[1]}], ++ ] + ) + self.env_assistant.get_env().push_corosync_conf( + self.corosync_conf_facade +@@ -266,35 +250,35 @@ class PushCorosyncConfLiveNoQdeviceTest(PushCorosyncConfLiveBase): + node="node-1", + ), + fixture.info( +- report_codes.COROSYNC_CONFIG_RELOADED, node="node-2" ++ report_codes.COROSYNC_CONFIG_RELOADED, ++ node="node-2", + ), + ] + ) + + def test_reload_not_successful(self): +- ( +- self.config.http.corosync.set_corosync_conf( +- self.corosync_conf_text, node_labels=self.node_labels +- ).http.corosync.reload_corosync_conf( +- communication_list=[ +- [ +- { +- "label": self.node_labels[0], +- "response_code": 200, +- "output": json.dumps( +- dict(code="not_running", message="not running") +- ), +- }, +- ], +- [ +- { +- "label": self.node_labels[1], +- "response_code": 200, +- "output": "not a json", +- }, +- ], +- ] +- ) ++ self.config.http.corosync.set_corosync_conf( ++ self.corosync_conf_text, node_labels=self.node_labels ++ ) ++ self.config.http.corosync.reload_corosync_conf( ++ communication_list=[ ++ [ ++ { ++ "label": self.node_labels[0], ++ "response_code": 200, ++ "output": json.dumps( ++ dict(code="not_running", message="not running") ++ ), ++ }, ++ ], ++ [ ++ { ++ "label": self.node_labels[1], ++ "response_code": 200, ++ "output": "not a json", ++ }, ++ ], ++ ] + ) + self.env_assistant.assert_raise_library_error( + lambda: self.env_assistant.get_env().push_corosync_conf( +@@ -318,7 +302,8 @@ class PushCorosyncConfLiveNoQdeviceTest(PushCorosyncConfLiveBase): + node="node-1", + ), + fixture.warn( +- report_codes.INVALID_RESPONSE_FORMAT, node="node-2" ++ report_codes.INVALID_RESPONSE_FORMAT, ++ node="node-2", + ), + fixture.error( + report_codes.UNABLE_TO_PERFORM_OPERATION_ON_ANY_NODE +@@ -327,23 +312,22 @@ class PushCorosyncConfLiveNoQdeviceTest(PushCorosyncConfLiveBase): + ) + + def test_reload_corosync_not_running_anywhere(self): +- ( +- self.config.http.corosync.set_corosync_conf( +- self.corosync_conf_text, node_labels=self.node_labels +- ).http.corosync.reload_corosync_conf( +- communication_list=[ +- [ +- { +- "label": node, +- "response_code": 200, +- "output": json.dumps( +- dict(code="not_running", message="not running") +- ), +- }, +- ] +- for node in self.node_labels ++ self.config.http.corosync.set_corosync_conf( ++ self.corosync_conf_text, node_labels=self.node_labels ++ ) ++ self.config.http.corosync.reload_corosync_conf( ++ communication_list=[ ++ [ ++ { ++ "label": node, ++ "response_code": 200, ++ "output": json.dumps( ++ dict(code="not_running", message="not running") ++ ), ++ }, + ] +- ) ++ for node in self.node_labels ++ ] + ) + self.env_assistant.get_env().push_corosync_conf( + self.corosync_conf_facade +@@ -372,12 +356,11 @@ class PushCorosyncConfLiveNoQdeviceTest(PushCorosyncConfLiveBase): + + def test_need_stopped_cluster(self): + self.corosync_conf_facade.need_stopped_cluster = True +- ( +- self.config.http.corosync.check_corosync_offline( +- node_labels=self.node_labels +- ).http.corosync.set_corosync_conf( +- self.corosync_conf_text, node_labels=self.node_labels +- ) ++ self.config.http.corosync.check_corosync_offline( ++ node_labels=self.node_labels ++ ) ++ self.config.http.corosync.set_corosync_conf( ++ self.corosync_conf_text, node_labels=self.node_labels + ) + self.env_assistant.get_env().push_corosync_conf( + self.corosync_conf_facade +@@ -407,21 +390,14 @@ class PushCorosyncConfLiveNoQdeviceTest(PushCorosyncConfLiveBase): + + def test_need_stopped_cluster_not_stopped(self): + self.corosync_conf_facade.need_stopped_cluster = True +- ( +- self.config.http.corosync.check_corosync_offline( +- communication_list=[ +- { +- "label": self.node_labels[0], +- "output": '{"corosync":true}', +- } +- ] +- + [ +- { +- "label": node, +- } +- for node in self.node_labels[1:] +- ] +- ) ++ self.config.http.corosync.check_corosync_offline( ++ communication_list=[ ++ { ++ "label": self.node_labels[0], ++ "output": corosync_running_check_response(True), ++ } ++ ] ++ + [{"label": node} for node in self.node_labels[1:]] + ) + env = self.env_assistant.get_env() + self.env_assistant.assert_raise_library_error( +@@ -445,18 +421,14 @@ class PushCorosyncConfLiveNoQdeviceTest(PushCorosyncConfLiveBase): + # If we know for sure that corosync is running, skip_offline doesn't + # matter. + self.corosync_conf_facade.need_stopped_cluster = True +- ( +- self.config.http.corosync.check_corosync_offline( +- communication_list=[ +- dict( +- label="node-1", +- output='{"corosync":true}', +- ), +- dict( +- label="node-2", +- ), +- ] +- ) ++ self.config.http.corosync.check_corosync_offline( ++ communication_list=[ ++ dict( ++ label="node-1", ++ output=corosync_running_check_response(True), ++ ), ++ dict(label="node-2"), ++ ] + ) + env = self.env_assistant.get_env() + self.env_assistant.assert_raise_library_error( +@@ -481,19 +453,17 @@ class PushCorosyncConfLiveNoQdeviceTest(PushCorosyncConfLiveBase): + + def test_need_stopped_cluster_json_error(self): + self.corosync_conf_facade.need_stopped_cluster = True +- ( +- self.config.http.corosync.check_corosync_offline( +- communication_list=[ +- dict(label="node-1", output="{"), # not valid json +- dict( +- label="node-2", +- # The expected key (/corosync) is missing, we don't +- # care about version 2 status key +- # (/services/corosync/running) +- output='{"services":{"corosync":{"running":true}}}', +- ), +- ] +- ) ++ self.config.http.corosync.check_corosync_offline( ++ communication_list=[ ++ dict(label="node-1", output="{"), # not valid json ++ dict( ++ label="node-2", ++ # The expected key (/corosync) is missing, tested code ++ # doesn't care about a new key added in version 2 status ++ # (/services/corosync/running) ++ output='{"services":{"corosync":{"running":true}}}', ++ ), ++ ] + ) + env = self.env_assistant.get_env() + self.env_assistant.assert_raise_library_error( +@@ -517,19 +487,15 @@ class PushCorosyncConfLiveNoQdeviceTest(PushCorosyncConfLiveBase): + + def test_need_stopped_cluster_comunnication_failure(self): + self.corosync_conf_facade.need_stopped_cluster = True +- ( +- self.config.http.corosync.check_corosync_offline( +- communication_list=[ +- dict( +- label="node-1", +- ), +- dict( +- label="node-2", +- response_code=401, +- output="""{"notauthorized":"true"}""", +- ), +- ] +- ) ++ self.config.http.corosync.check_corosync_offline( ++ communication_list=[ ++ dict(label="node-1"), ++ dict( ++ label="node-2", ++ response_code=401, ++ output='{"notauthorized":"true"}', ++ ), ++ ] + ) + env = self.env_assistant.get_env() + self.env_assistant.assert_raise_library_error( +@@ -560,29 +526,26 @@ class PushCorosyncConfLiveNoQdeviceTest(PushCorosyncConfLiveBase): + def test_need_stopped_cluster_comunnication_failures_skip_offline(self): + # If we don't know if corosync is running, skip_offline matters. + self.corosync_conf_facade.need_stopped_cluster = True +- ( +- self.config.http.corosync.check_corosync_offline( +- communication_list=[ +- dict( +- label="node-1", +- response_code=401, +- output="""{"notauthorized":"true"}""", +- ), +- dict(label="node-2", output="{"), # not valid json +- ] +- ).http.corosync.set_corosync_conf( +- self.corosync_conf_text, +- communication_list=[ +- dict( +- label="node-1", +- response_code=401, +- output="""{"notauthorized":"true"}""", +- ), +- dict( +- label="node-2", +- ), +- ], +- ) ++ self.config.http.corosync.check_corosync_offline( ++ communication_list=[ ++ dict( ++ label="node-1", ++ response_code=401, ++ output='{"notauthorized":"true"}', ++ ), ++ dict(label="node-2", output="{"), # not valid json ++ ] ++ ) ++ self.config.http.corosync.set_corosync_conf( ++ self.corosync_conf_text, ++ communication_list=[ ++ dict( ++ label="node-1", ++ response_code=401, ++ output='{"notauthorized":"true"}', ++ ), ++ dict(label="node-2"), ++ ], + ) + self.env_assistant.get_env().push_corosync_conf( + self.corosync_conf_facade, skip_offline_nodes=True +@@ -662,15 +625,17 @@ class PushCorosyncConfLiveWithQdeviceTest(PushCorosyncConfLiveBase): + + def test_qdevice_reload(self): + self.corosync_conf_facade.need_qdevice_reload = True +- ( +- self.config.http.corosync.set_corosync_conf( +- self.corosync_conf_text, node_labels=self.node_labels +- ) +- .http.corosync.reload_corosync_conf( +- node_labels=self.node_labels[:1] +- ) +- .http.corosync.qdevice_client_stop(node_labels=self.node_labels) +- .http.corosync.qdevice_client_start(node_labels=self.node_labels) ++ self.config.http.corosync.set_corosync_conf( ++ self.corosync_conf_text, node_labels=self.node_labels ++ ) ++ self.config.http.corosync.reload_corosync_conf( ++ node_labels=self.node_labels[:1] ++ ) ++ self.config.http.corosync.qdevice_client_stop( ++ node_labels=self.node_labels ++ ) ++ self.config.http.corosync.qdevice_client_start( ++ node_labels=self.node_labels + ) + + self.env_assistant.get_env().push_corosync_conf( +@@ -689,7 +654,8 @@ class PushCorosyncConfLiveWithQdeviceTest(PushCorosyncConfLiveBase): + node="node-2", + ), + fixture.info( +- report_codes.COROSYNC_CONFIG_RELOADED, node="node-1" ++ report_codes.COROSYNC_CONFIG_RELOADED, ++ node="node-1", + ), + fixture.info(report_codes.QDEVICE_CLIENT_RELOAD_STARTED), + fixture.info( +@@ -725,34 +691,34 @@ class PushCorosyncConfLiveWithQdeviceTest(PushCorosyncConfLiveBase): + + def test_qdevice_reload_corosync_stopped(self): + self.corosync_conf_facade.need_qdevice_reload = True +- ( +- self.config.http.corosync.set_corosync_conf( +- self.corosync_conf_text, node_labels=self.node_labels +- ) +- .http.corosync.reload_corosync_conf( +- communication_list=[ +- [ +- { +- "label": label, +- "response_code": 200, +- "output": json.dumps( +- dict(code="not_running", message="") +- ), +- }, +- ] +- for label in self.node_labels +- ] +- ) +- .http.corosync.qdevice_client_stop(node_labels=self.node_labels) +- .http.corosync.qdevice_client_start( +- communication_list=[ ++ self.config.http.corosync.set_corosync_conf( ++ self.corosync_conf_text, node_labels=self.node_labels ++ ) ++ self.config.http.corosync.reload_corosync_conf( ++ communication_list=[ ++ [ + { + "label": label, +- "output": "corosync is not running, skipping", +- } +- for label in self.node_labels ++ "response_code": 200, ++ "output": json.dumps( ++ dict(code="not_running", message="") ++ ), ++ }, + ] +- ) ++ for label in self.node_labels ++ ] ++ ) ++ self.config.http.corosync.qdevice_client_stop( ++ node_labels=self.node_labels ++ ) ++ self.config.http.corosync.qdevice_client_start( ++ communication_list=[ ++ { ++ "label": label, ++ "output": "corosync is not running, skipping", ++ } ++ for label in self.node_labels ++ ] + ) + + self.env_assistant.get_env().push_corosync_conf( +@@ -816,38 +782,28 @@ class PushCorosyncConfLiveWithQdeviceTest(PushCorosyncConfLiveBase): + # This also tests that failing to stop qdevice on a node doesn't prevent + # starting qdevice on the same node. + self.corosync_conf_facade.need_qdevice_reload = True +- ( +- self.config.http.corosync.set_corosync_conf( +- self.corosync_conf_text, node_labels=self.node_labels +- ) +- .http.corosync.reload_corosync_conf( +- node_labels=self.node_labels[:1] +- ) +- .http.corosync.qdevice_client_stop( +- communication_list=[ +- dict( +- label="node-1", +- ), +- dict( +- label="node-2", +- response_code=400, +- output="error", +- ), +- ] +- ) +- .http.corosync.qdevice_client_start( +- communication_list=[ +- dict( +- label="node-1", +- errno=8, +- error_msg="failure", +- was_connected=False, +- ), +- dict( +- label="node-2", +- ), +- ] +- ) ++ self.config.http.corosync.set_corosync_conf( ++ self.corosync_conf_text, node_labels=self.node_labels ++ ) ++ self.config.http.corosync.reload_corosync_conf( ++ node_labels=self.node_labels[:1] ++ ) ++ self.config.http.corosync.qdevice_client_stop( ++ communication_list=[ ++ dict(label="node-1"), ++ dict(label="node-2", response_code=400, output="error"), ++ ] ++ ) ++ self.config.http.corosync.qdevice_client_start( ++ communication_list=[ ++ dict( ++ label="node-1", ++ errno=8, ++ error_msg="failure", ++ was_connected=False, ++ ), ++ dict(label="node-2"), ++ ] + ) + + env = self.env_assistant.get_env() +@@ -867,7 +823,8 @@ class PushCorosyncConfLiveWithQdeviceTest(PushCorosyncConfLiveBase): + node="node-2", + ), + fixture.info( +- report_codes.COROSYNC_CONFIG_RELOADED, node="node-1" ++ report_codes.COROSYNC_CONFIG_RELOADED, ++ node="node-1", + ), + fixture.info(report_codes.QDEVICE_CLIENT_RELOAD_STARTED), + fixture.info( +@@ -903,62 +860,46 @@ class PushCorosyncConfLiveWithQdeviceTest(PushCorosyncConfLiveBase): + + def test_qdevice_reload_failures_skip_offline(self): + self.corosync_conf_facade.need_qdevice_reload = True +- ( +- self.config.http.corosync.set_corosync_conf( +- self.corosync_conf_text, +- communication_list=[ +- dict( +- label="node-1", +- ), +- dict( +- label="node-2", +- errno=8, +- error_msg="failure", +- was_connected=False, +- ), ++ self.config.http.corosync.set_corosync_conf( ++ self.corosync_conf_text, ++ communication_list=[ ++ dict(label="node-1"), ++ dict( ++ label="node-2", ++ errno=8, ++ error_msg="failure", ++ was_connected=False, ++ ), ++ ], ++ ) ++ self.config.http.corosync.reload_corosync_conf( ++ communication_list=[ ++ [ ++ { ++ "label": self.node_labels[0], ++ "response_code": 400, ++ "output": "Failed", ++ }, + ], +- ) +- .http.corosync.reload_corosync_conf( +- communication_list=[ +- [ +- { +- "label": self.node_labels[0], +- "response_code": 400, +- "output": "Failed", +- }, +- ], +- [ +- { +- "label": self.node_labels[1], +- }, +- ], +- ] +- ) +- .http.corosync.qdevice_client_stop( +- communication_list=[ +- dict( +- label="node-1", +- ), +- dict( +- label="node-2", +- response_code=400, +- output="error", +- ), +- ] +- ) +- .http.corosync.qdevice_client_start( +- communication_list=[ +- dict( +- label="node-1", +- errno=8, +- error_msg="failure", +- was_connected=False, +- ), +- dict( +- label="node-2", +- ), +- ] +- ) ++ [{"label": self.node_labels[1]}], ++ ] ++ ) ++ self.config.http.corosync.qdevice_client_stop( ++ communication_list=[ ++ dict(label="node-1"), ++ dict(label="node-2", response_code=400, output="error"), ++ ] ++ ) ++ self.config.http.corosync.qdevice_client_start( ++ communication_list=[ ++ dict( ++ label="node-1", ++ errno=8, ++ error_msg="failure", ++ was_connected=False, ++ ), ++ dict(label="node-2"), ++ ] + ) + + env = self.env_assistant.get_env() +@@ -990,7 +931,8 @@ class PushCorosyncConfLiveWithQdeviceTest(PushCorosyncConfLiveBase): + reason="Failed", + ), + fixture.info( +- report_codes.COROSYNC_CONFIG_RELOADED, node="node-2" ++ report_codes.COROSYNC_CONFIG_RELOADED, ++ node="node-2", + ), + fixture.info(report_codes.QDEVICE_CLIENT_RELOAD_STARTED), + fixture.info( +@@ -1024,29 +966,28 @@ class PushCorosyncConfLiveWithQdeviceTest(PushCorosyncConfLiveBase): + + def test_reload_not_successful(self): + self.corosync_conf_facade.need_qdevice_reload = True +- ( +- self.config.http.corosync.set_corosync_conf( +- self.corosync_conf_text, node_labels=self.node_labels +- ).http.corosync.reload_corosync_conf( +- communication_list=[ +- [ +- { +- "label": self.node_labels[0], +- "response_code": 200, +- "output": json.dumps( +- dict(code="not_running", message="not running") +- ), +- }, +- ], +- [ +- { +- "label": self.node_labels[1], +- "response_code": 200, +- "output": "not a json", +- }, +- ], +- ] +- ) ++ self.config.http.corosync.set_corosync_conf( ++ self.corosync_conf_text, node_labels=self.node_labels ++ ) ++ self.config.http.corosync.reload_corosync_conf( ++ communication_list=[ ++ [ ++ { ++ "label": self.node_labels[0], ++ "response_code": 200, ++ "output": json.dumps( ++ dict(code="not_running", message="not running") ++ ), ++ }, ++ ], ++ [ ++ { ++ "label": self.node_labels[1], ++ "response_code": 200, ++ "output": "not a json", ++ }, ++ ], ++ ] + ) + self.env_assistant.assert_raise_library_error( + lambda: self.env_assistant.get_env().push_corosync_conf( +@@ -1070,7 +1011,8 @@ class PushCorosyncConfLiveWithQdeviceTest(PushCorosyncConfLiveBase): + node="node-1", + ), + fixture.warn( +- report_codes.INVALID_RESPONSE_FORMAT, node="node-2" ++ report_codes.INVALID_RESPONSE_FORMAT, ++ node="node-2", + ), + fixture.error( + report_codes.UNABLE_TO_PERFORM_OPERATION_ON_ANY_NODE +diff --git a/pcs_test/tier1/legacy/test_constraints.py b/pcs_test/tier1/legacy/test_constraints.py +index 36924925..49b413a8 100644 +--- a/pcs_test/tier1/legacy/test_constraints.py ++++ b/pcs_test/tier1/legacy/test_constraints.py +@@ -13,9 +13,11 @@ from pcs_test.tools.assertions import ( + from pcs_test.tools.bin_mock import get_mock_settings + from pcs_test.tools.cib import get_assert_pcs_effect_mixin + from pcs_test.tools.fixture_cib import ( ++ CachedCibFixture, + fixture_master_xml, + fixture_to_cib, + wrap_element_by_master, ++ wrap_element_by_master_file, + ) + from pcs_test.tools.misc import ( + get_test_resource as rc, +@@ -23,7 +25,6 @@ from pcs_test.tools.misc import ( + skip_unless_crm_rule, + outdent, + ParametrizedTestMetaClass, +- write_data_to_tmpfile, + write_file_to_tmpfile, + ) + from pcs_test.tools.pcs_runner import pcs, PcsRunner +@@ -54,70 +55,63 @@ empty_cib = rc("cib-empty-3.7.xml") + large_cib = rc("cib-large.xml") + + +-@skip_unless_crm_rule() +-class ConstraintTest(unittest.TestCase): +- def setUp(self): +- self.temp_cib = get_tmp_file("tier1_constraints") +- write_file_to_tmpfile(empty_cib, self.temp_cib) +- self.temp_corosync_conf = None +- +- def tearDown(self): +- self.temp_cib.close() +- if self.temp_corosync_conf: +- self.temp_corosync_conf.close() +- +- def fixture_resources(self): +- write_data_to_tmpfile(self.fixture_cib_cache(), self.temp_cib) +- +- def fixture_cib_cache(self): +- if not hasattr(self.__class__, "cib_cache"): +- self.__class__.cib_cache = self.fixture_cib() +- return self.__class__.cib_cache +- +- def fixture_cib(self): +- write_file_to_tmpfile(empty_cib, self.temp_cib) +- self.setupClusterA() +- self.temp_cib.flush() +- self.temp_cib.seek(0) +- cib_content = self.temp_cib.read() +- self.temp_cib.seek(0) +- write_file_to_tmpfile(empty_cib, self.temp_cib) +- return cib_content +- +- # Sets up a cluster with Resources, groups, master/slave resource and clones +- def setupClusterA(self): ++class ConstraintTestCibFixture(CachedCibFixture): ++ def _setup_cib(self): + line = "resource create D1 ocf:heartbeat:Dummy".split() +- output, returnVal = pcs(self.temp_cib.name, line) ++ output, returnVal = pcs(self.cache_path, line) + assert returnVal == 0 and output == "" + + line = "resource create D2 ocf:heartbeat:Dummy".split() +- output, returnVal = pcs(self.temp_cib.name, line) ++ output, returnVal = pcs(self.cache_path, line) + assert returnVal == 0 and output == "" + + line = "resource create D3 ocf:heartbeat:Dummy".split() +- output, returnVal = pcs(self.temp_cib.name, line) ++ output, returnVal = pcs(self.cache_path, line) + assert returnVal == 0 and output == "" + + line = "resource create D4 ocf:heartbeat:Dummy".split() +- output, returnVal = pcs(self.temp_cib.name, line) ++ output, returnVal = pcs(self.cache_path, line) + assert returnVal == 0 and output == "" + + line = "resource create D5 ocf:heartbeat:Dummy".split() +- output, returnVal = pcs(self.temp_cib.name, line) ++ output, returnVal = pcs(self.cache_path, line) + assert returnVal == 0 and output == "" + + line = "resource create D6 ocf:heartbeat:Dummy".split() +- output, returnVal = pcs(self.temp_cib.name, line) ++ output, returnVal = pcs(self.cache_path, line) + assert returnVal == 0 and output == "" + + line = "resource clone D3".split() +- output, returnVal = pcs(self.temp_cib.name, line) ++ output, returnVal = pcs(self.cache_path, line) + assert returnVal == 0 and output == "" + + # pcs no longer allows turning resources into masters but supports + # existing ones. In order to test it, we need to put a master in the + # CIB without pcs. +- wrap_element_by_master(self.temp_cib, "D4", master_id="Master") ++ wrap_element_by_master_file(self.cache_path, "D4", master_id="Master") ++ ++ ++CONSTRAINT_TEST_CIB_FIXTURE = ConstraintTestCibFixture( ++ "fixture_tier1_constraints", empty_cib ++) ++ ++ ++@skip_unless_crm_rule() ++class ConstraintTest(unittest.TestCase): ++ def setUp(self): ++ self.temp_cib = get_tmp_file("tier1_constraints") ++ write_file_to_tmpfile(empty_cib, self.temp_cib) ++ self.temp_corosync_conf = None ++ ++ def tearDown(self): ++ self.temp_cib.close() ++ if self.temp_corosync_conf: ++ self.temp_corosync_conf.close() ++ ++ def fixture_resources(self): ++ write_file_to_tmpfile( ++ CONSTRAINT_TEST_CIB_FIXTURE.cache_path, self.temp_cib ++ ) + + def testConstraintRules(self): + self.fixture_resources() +diff --git a/pcs_test/tier1/legacy/test_resource.py b/pcs_test/tier1/legacy/test_resource.py +index 8b043260..ecf0d23d 100644 +--- a/pcs_test/tier1/legacy/test_resource.py ++++ b/pcs_test/tier1/legacy/test_resource.py +@@ -12,8 +12,10 @@ from pcs_test.tools.assertions import ( + from pcs_test.tools.bin_mock import get_mock_settings + from pcs_test.tools.cib import get_assert_pcs_effect_mixin + from pcs_test.tools.fixture_cib import ( ++ CachedCibFixture, + fixture_master_xml, + fixture_to_cib, ++ wrap_element_by_master_file, + wrap_element_by_master, + ) + from pcs_test.tools.misc import ( +@@ -154,21 +156,8 @@ class ResourceDescribe(TestCase, AssertPcsMixin): + ) + + +-class Resource(TestCase, AssertPcsMixin): +- def setUp(self): +- self.temp_cib = get_tmp_file("tier1_resource") +- self.temp_large_cib = get_tmp_file("tier1_resource_large") +- write_file_to_tmpfile(empty_cib, self.temp_cib) +- write_file_to_tmpfile(large_cib, self.temp_large_cib) +- self.pcs_runner = PcsRunner(self.temp_cib.name) +- self.pcs_runner.mock_settings = get_mock_settings("crm_resource_binary") +- +- def tearDown(self): +- self.temp_cib.close() +- self.temp_large_cib.close() +- +- # Setups up a cluster with Resources, groups, master/slave resource & clones +- def setupClusterA(self): ++class ResourceTestCibFixture(CachedCibFixture): ++ def _setup_cib(self): + self.assert_pcs_success( + ( + "resource create --no-default-ops ClusterIP ocf:heartbeat:IPaddr2" +@@ -215,7 +204,34 @@ class Resource(TestCase, AssertPcsMixin): + # pcs no longer allows turning resources into masters but supports + # existing ones. In order to test it, we need to put a master in the + # CIB without pcs. +- wrap_element_by_master(self.temp_cib, "ClusterIP5", master_id="Master") ++ wrap_element_by_master_file( ++ self.cache_path, "ClusterIP5", master_id="Master" ++ ) ++ ++ ++RESOURCE_TEST_CIB_FIXTURE = ResourceTestCibFixture( ++ "fixture_tier1_resource", empty_cib ++) ++ ++ ++class Resource(TestCase, AssertPcsMixin): ++ def setUp(self): ++ self.temp_cib = get_tmp_file("tier1_resource") ++ self.temp_large_cib = get_tmp_file("tier1_resource_large") ++ write_file_to_tmpfile(empty_cib, self.temp_cib) ++ write_file_to_tmpfile(large_cib, self.temp_large_cib) ++ self.pcs_runner = PcsRunner(self.temp_cib.name) ++ self.pcs_runner.mock_settings = get_mock_settings("crm_resource_binary") ++ ++ def tearDown(self): ++ self.temp_cib.close() ++ self.temp_large_cib.close() ++ ++ # Setups up a cluster with Resources, groups, master/slave resource & clones ++ def setupClusterA(self): ++ write_file_to_tmpfile( ++ RESOURCE_TEST_CIB_FIXTURE.cache_path, self.temp_cib ++ ) + + def testCaseInsensitive(self): + o, r = pcs( +diff --git a/pcs_test/tier1/legacy/test_stonith.py b/pcs_test/tier1/legacy/test_stonith.py +index b3def2d4..f6b93f01 100644 +--- a/pcs_test/tier1/legacy/test_stonith.py ++++ b/pcs_test/tier1/legacy/test_stonith.py +@@ -8,6 +8,7 @@ from pcs.common.str_tools import indent + from pcs_test.tier1.cib_resource.common import ResourceTest + from pcs_test.tools.assertions import AssertPcsMixin + from pcs_test.tools.bin_mock import get_mock_settings ++from pcs_test.tools.fixture_cib import CachedCibFixture + from pcs_test.tools.misc import ( + get_test_resource as rc, + get_tmp_file, +@@ -840,6 +841,46 @@ _fixture_stonith_level_cache = None + _fixture_stonith_level_cache_lock = Lock() + + ++class StonithLevelTestCibFixture(CachedCibFixture): ++ def _fixture_stonith_resource(self, name): ++ self.assert_pcs_success( ++ [ ++ "stonith", ++ "create", ++ name, ++ "fence_apc", ++ "pcmk_host_list=rh7-1 rh7-2", ++ "ip=i", ++ "username=u", ++ ] ++ ) ++ ++ def _setup_cib(self): ++ self._fixture_stonith_resource("F1") ++ self._fixture_stonith_resource("F2") ++ self._fixture_stonith_resource("F3") ++ ++ self.assert_pcs_success("stonith level add 1 rh7-1 F1".split()) ++ self.assert_pcs_success("stonith level add 2 rh7-1 F2".split()) ++ self.assert_pcs_success("stonith level add 2 rh7-2 F1".split()) ++ self.assert_pcs_success("stonith level add 1 rh7-2 F2".split()) ++ self.assert_pcs_success("stonith level add 4 regexp%rh7-\\d F3".split()) ++ self.assert_pcs_success( ++ "stonith level add 3 regexp%rh7-\\d F2 F1".split() ++ ) ++ self.assert_pcs_success( ++ "stonith level add 5 attrib%fencewith=levels1 F3 F2".split() ++ ) ++ self.assert_pcs_success( ++ "stonith level add 6 attrib%fencewith=levels2 F3 F1".split() ++ ) ++ ++ ++STONITH_LEVEL_TEST_CIB_FIXTURE = StonithLevelTestCibFixture( ++ "fixture_tier1_stonith_level_tests", rc("cib-empty-withnodes.xml") ++) ++ ++ + class LevelTestsBase(TestCase, AssertPcsMixin): + def setUp(self): + self.temp_cib = get_tmp_file("tier1_test_stonith_level") +@@ -877,26 +918,11 @@ class LevelTestsBase(TestCase, AssertPcsMixin): + _fixture_stonith_level_cache = self.fixture_cib_config() + return _fixture_stonith_level_cache + +- def fixture_cib_config(self): +- self.fixture_stonith_resource("F1") +- self.fixture_stonith_resource("F2") +- self.fixture_stonith_resource("F3") +- +- self.assert_pcs_success("stonith level add 1 rh7-1 F1".split()) +- self.assert_pcs_success("stonith level add 2 rh7-1 F2".split()) +- self.assert_pcs_success("stonith level add 2 rh7-2 F1".split()) +- self.assert_pcs_success("stonith level add 1 rh7-2 F2".split()) +- self.assert_pcs_success("stonith level add 4 regexp%rh7-\\d F3".split()) +- self.assert_pcs_success( +- "stonith level add 3 regexp%rh7-\\d F2 F1".split() +- ) +- self.assert_pcs_success( +- "stonith level add 5 attrib%fencewith=levels1 F3 F2".split() +- ) +- self.assert_pcs_success( +- "stonith level add 6 attrib%fencewith=levels2 F3 F1".split() +- ) +- ++ @staticmethod ++ def fixture_cib_config(): ++ cib_content = "" ++ with open(STONITH_LEVEL_TEST_CIB_FIXTURE.cache_path, "r") as cib_file: ++ cib_content = cib_file.read() + config = outdent( + """\ + Target: rh7-1 +@@ -914,12 +940,7 @@ class LevelTestsBase(TestCase, AssertPcsMixin): + Level 6 - F3,F1 + """ + ) +- + config_lines = config.splitlines() +- self.temp_cib.flush() +- self.temp_cib.seek(0) +- cib_content = self.temp_cib.read() +- self.temp_cib.seek(0) + return cib_content, config, config_lines + + +diff --git a/pcs_test/tools/command_env/config_http_corosync.py b/pcs_test/tools/command_env/config_http_corosync.py +index cdaf65ff..7f84f406 100644 +--- a/pcs_test/tools/command_env/config_http_corosync.py ++++ b/pcs_test/tools/command_env/config_http_corosync.py +@@ -6,6 +6,23 @@ from pcs_test.tools.command_env.mock_node_communicator import ( + ) + + ++def corosync_running_check_response(running): ++ return json.dumps( ++ { ++ "node": { ++ "corosync": running, ++ "services": { ++ "corosync": { ++ "installed": True, ++ "enabled": not running, ++ "running": running, ++ } ++ }, ++ } ++ } ++ ) ++ ++ + class CorosyncShortcuts: + def __init__(self, calls): + self.__calls = calls +@@ -29,7 +46,8 @@ class CorosyncShortcuts: + node_labels, + communication_list, + action="remote/status", +- output='{"corosync":false}', ++ param_list=[("version", "2")], ++ output=corosync_running_check_response(False), + ) + + def get_corosync_online_targets( +@@ -51,7 +69,8 @@ class CorosyncShortcuts: + node_labels, + communication_list, + action="remote/status", +- output='{"corosync":true}', ++ param_list=[("version", "2")], ++ output=corosync_running_check_response(True), + ) + + def get_corosync_conf( +diff --git a/pcs_test/tools/fixture_cib.py b/pcs_test/tools/fixture_cib.py +index 730b0e33..602491c8 100644 +--- a/pcs_test/tools/fixture_cib.py ++++ b/pcs_test/tools/fixture_cib.py +@@ -3,7 +3,14 @@ import os + from unittest import mock + from lxml import etree + ++from pcs_test.tools.assertions import AssertPcsMixin + from pcs_test.tools.custom_mock import MockLibraryReportProcessor ++from pcs_test.tools.misc import ( ++ get_test_resource, ++ get_tmp_file, ++ write_file_to_tmpfile, ++) ++from pcs_test.tools.pcs_runner import PcsRunner + from pcs_test.tools.xml import etree_to_str + + from pcs import settings +@@ -12,6 +19,54 @@ from pcs.lib.external import CommandRunner + # pylint: disable=line-too-long + + ++class CachedCibFixture(AssertPcsMixin): ++ def __init__(self, cache_name, empty_cib_path): ++ self._empty_cib_path = empty_cib_path ++ self._cache_name = cache_name ++ self._cache_path = None ++ self._pcs_runner = None ++ ++ def _setup_cib(self): ++ raise NotImplementedError() ++ ++ def set_up(self): ++ fixture_dir = get_test_resource("temp_fixtures") ++ os.makedirs(fixture_dir, exist_ok=True) ++ self._cache_path = os.path.join(fixture_dir, self._cache_name) ++ self._pcs_runner = PcsRunner(self._cache_path) ++ ++ with open(self._empty_cib_path, "r") as template_file, open( ++ self.cache_path, "w" ++ ) as cache_file: ++ cache_file.write(template_file.read()) ++ self._setup_cib() ++ ++ def clean_up(self): ++ if os.path.isfile(self.cache_path): ++ os.unlink(self.cache_path) ++ ++ @property ++ def cache_path(self): ++ if self._cache_path is None: ++ raise AssertionError("Cache has not been initiialized") ++ return self._cache_path ++ ++ # methods for supporting assert_pcs_success ++ @property ++ def pcs_runner(self): ++ if self._pcs_runner is None: ++ raise AssertionError("Cache has not been initialized") ++ return self._pcs_runner ++ ++ def assertEqual(self, first, second, msg=None): ++ # pylint: disable=invalid-name ++ # pylint: disable=no-self-use ++ if first != second: ++ raise AssertionError( ++ f"{msg}\n{first} != {second}" if msg else f"{first} != {second}" ++ ) ++ ++ + def wrap_element_by_master(cib_file, resource_id, master_id=None): + cib_file.seek(0) + cib_tree = etree.parse(cib_file, etree.XMLParser(huge_tree=True)).getroot() +@@ -49,6 +104,16 @@ def wrap_element_by_master(cib_file, resource_id, master_id=None): + ) + + ++def wrap_element_by_master_file(filepath, resource_id, master_id=None): ++ cib_tmp = get_tmp_file("wrap_by_master") ++ write_file_to_tmpfile(filepath, cib_tmp) ++ wrap_element_by_master(cib_tmp, resource_id, master_id=master_id) ++ cib_tmp.seek(0) ++ with open(filepath, "w") as target: ++ target.write(cib_tmp.read()) ++ cib_tmp.close() ++ ++ + def fixture_master_xml(name, all_ops=True, meta_dict=None): + default_ops = f""" + + +- +- +- Restart one host machine or the local host machine if no host specified. +- +- daemon urls: node_restart +- +- + + + +diff --git a/pcsd/fenceagent.rb b/pcsd/fenceagent.rb +deleted file mode 100644 +index 4a3ba07d..00000000 +--- a/pcsd/fenceagent.rb ++++ /dev/null +@@ -1,59 +0,0 @@ +-def getFenceAgents(auth_user) +- fence_agent_list = {} +- stdout, stderr, retval = run_cmd( +- auth_user, PCS, "stonith", "list", "--nodesc" +- ) +- if retval != 0 +- $logger.error("Error running 'pcs stonith list --nodesc") +- $logger.error(stdout + stderr) +- return {} +- end +- +- agents = stdout +- agents.each { |a| +- fa = FenceAgent.new +- fa.name = a.chomp +- fence_agent_list[fa.name] = fa +- } +- return fence_agent_list +-end +- +-class FenceAgent +- attr_accessor :name, :resource_class, :required_options, :optional_options, :advanced_options, :info +- def initialize(name=nil, required_options={}, optional_options={}, resource_class=nil, advanced_options={}) +- @name = name +- @required_options = {} +- @optional_options = {} +- @required_options = required_options +- @optional_options = optional_options +- @advanced_options = advanced_options +- @resource_class = nil +- end +- +- def type +- name +- end +- +- def to_json(options = {}) +- JSON.generate({ +- :full_name => "stonith:#{name}", +- :class => 'stonith', +- :provider => nil, +- :type => name, +- }) +- end +- +- def long_desc +- if info && info.length >= 2 +- return info[1] +- end +- return "" +- end +- +- def short_desc +- if info && info.length >= 1 +- return info[0] +- end +- return "" +- end +-end +diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb +index 9e26c607..1507bdf5 100644 +--- a/pcsd/pcs.rb ++++ b/pcsd/pcs.rb +@@ -1514,21 +1514,6 @@ def allowed_for_superuser(auth_user) + return true + end + +-def get_default_overview_node_list(clustername) +- nodes = get_cluster_nodes clustername +- node_list = [] +- nodes.each { |node| +- node_list << { +- 'error_list' => [], +- 'warning_list' => [], +- 'status' => 'unknown', +- 'quorum' => false, +- 'name' => node +- } +- } +- return node_list +-end +- + def enable_service(service) + result = run_pcs_internal( + PCSAuth.getSuperuserAuth(), +diff --git a/pcsd/pcsd.rb b/pcsd/pcsd.rb +index bf91e906..3297fc5e 100644 +--- a/pcsd/pcsd.rb ++++ b/pcsd/pcsd.rb +@@ -11,7 +11,6 @@ require 'cgi' + require 'bootstrap.rb' + require 'resource.rb' + require 'remote.rb' +-require 'fenceagent.rb' + require 'cluster.rb' + require 'config.rb' + require 'pcs.rb' +@@ -54,14 +53,14 @@ end + before do + # nobody is logged in yet + @auth_user = nil +- @tornado_session_username = Thread.current[:tornado_username] +- @tornado_session_groups = Thread.current[:tornado_groups] +- @tornado_is_authenticated = Thread.current[:tornado_is_authenticated] + + if(request.path.start_with?('/remote/') and request.path != "/remote/auth") or request.path == '/run_pcs' or request.path.start_with?('/api/') + # Sets @auth_user to a hash containing info about logged in user or halts + # the request processing if login credentials are incorrect. +- protect_by_token! ++ @auth_user = PCSAuth.loginByToken(request.cookies) ++ unless @auth_user ++ halt [401, '{"notauthorized":"true"}'] ++ end + else + # Set a sane default: nobody is logged in, but we do not need to check both + # for nil and empty username (if auth_user and auth_user[:username]) +@@ -120,37 +119,6 @@ def run_cfgsync + end + end + +-helpers do +- def is_ajax? +- return request.env['HTTP_X_REQUESTED_WITH'] == 'XMLHttpRequest' +- end +- +- def protect_by_token! +- @auth_user = PCSAuth.loginByToken(request.cookies) +- unless @auth_user +- halt [401, '{"notauthorized":"true"}'] +- end +- end +- +- def getParamList(params) +- param_line = [] +- meta_options = [] +- params.each { |param, val| +- if param.start_with?("_res_paramne_") or (param.start_with?("_res_paramempty_") and val != "") +- myparam = param.sub(/^_res_paramne_/,"").sub(/^_res_paramempty_/,"") +- param_line << "#{myparam}=#{val}" +- end +- if param == "disabled" +- meta_options << 'meta' << 'target-role=Stopped' +- end +- if param == "force" and val +- param_line << "--force" +- end +- } +- return param_line + meta_options +- end +-end +- + get '/remote/?:command?' do + return remote(params, request, @auth_user) + end +@@ -675,10 +643,6 @@ post '/manage/auth_gui_against_nodes' do + ] + end + +-get '/clusters_overview' do +- clusters_overview(params, request, getAuthUser()) +-end +- + get '/imported-cluster-list' do + imported_cluster_list(params, request, getAuthUser()) + end +@@ -693,190 +657,11 @@ post '/managec/:cluster/permissions_save/?' do + ) + end + +-get '/managec/:cluster/status_all' do +- auth_user = getAuthUser() +- status_all(params, request, auth_user, get_cluster_nodes(params[:cluster])) +-end +- + get '/managec/:cluster/cluster_status' do + auth_user = getAuthUser() + cluster_status_gui(auth_user, params[:cluster]) + end + +-get '/managec/:cluster/cluster_properties' do +- auth_user = getAuthUser() +- cluster = params[:cluster] +- unless cluster +- return 200, {} +- end +- code, out = send_cluster_request_with_token(auth_user, cluster, 'get_cib') +- if code == 403 +- return [403, 'Permission denied'] +- elsif code != 200 +- return [400, 'getting CIB failed'] +- end +- begin +- properties = getAllSettings(nil, REXML::Document.new(out)) +- code, out = send_cluster_request_with_token( +- auth_user, cluster, 'get_cluster_properties_definition' +- ) +- +- if code == 403 +- return [403, 'Permission denied'] +- elsif code == 404 +- definition = { +- 'batch-limit' => { +- 'name' => 'batch-limit', +- 'source' => 'pacemaker-schedulerd', +- 'default' => '0', +- 'type' => 'integer', +- 'shortdesc' => 'The number of jobs that pacemaker is allowed to execute in parallel.', +- 'longdesc' => 'The "correct" value will depend on the speed and load of your network and cluster nodes.', +- 'readable_name' => 'Batch Limit', +- 'advanced' => false +- }, +- 'no-quorum-policy' => { +- 'name' => 'no-quorum-policy', +- 'source' => 'pacemaker-schedulerd', +- 'default' => 'stop', +- 'type' => 'enum', +- 'enum' => ['stop', 'freeze', 'ignore', 'suicide'], +- 'shortdesc' => 'What to do when the cluster does not have quorum.', +- 'longdesc' => 'Allowed values: +- * ignore - continue all resource management +- * freeze - continue resource management, but don\'t recover resources from nodes not in the affected partition +- * stop - stop all resources in the affected cluster partition +- * suicide - fence all nodes in the affected cluster partition', +- 'readable_name' => 'No Quorum Policy', +- 'advanced' => false +- }, +- 'symmetric-cluster' => { +- 'name' => 'symmetric-cluster', +- 'source' => 'pacemaker-schedulerd', +- 'default' => 'true', +- 'type' => 'boolean', +- 'shortdesc' => 'All resources can run anywhere by default.', +- 'longdesc' => 'All resources can run anywhere by default.', +- 'readable_name' => 'Symmetric', +- 'advanced' => false +- }, +- 'stonith-enabled' => { +- 'name' => 'stonith-enabled', +- 'source' => 'pacemaker-schedulerd', +- 'default' => 'true', +- 'type' => 'boolean', +- 'shortdesc' => 'Failed nodes are STONITH\'d', +- 'longdesc' => 'Failed nodes are STONITH\'d', +- 'readable_name' => 'Stonith Enabled', +- 'advanced' => false +- }, +- 'stonith-action' => { +- 'name' => 'stonith-action', +- 'source' => 'pacemaker-schedulerd', +- 'default' => 'reboot', +- 'type' => 'enum', +- 'enum' => ['reboot', 'poweroff', 'off'], +- 'shortdesc' => 'Action to send to STONITH device', +- 'longdesc' => 'Action to send to STONITH device Allowed values: reboot, poweroff, off', +- 'readable_name' => 'Stonith Action', +- 'advanced' => false +- }, +- 'cluster-delay' => { +- 'name' => 'cluster-delay', +- 'source' => 'pacemaker-schedulerd', +- 'default' => '60s', +- 'type' => 'time', +- 'shortdesc' => 'Round trip delay over the network (excluding action execution)', +- 'longdesc' => 'The "correct" value will depend on the speed and load of your network and cluster nodes.', +- 'readable_name' => 'Cluster Delay', +- 'advanced' => false +- }, +- 'stop-orphan-resources' => { +- 'name' => 'stop-orphan-resources', +- 'source' => 'pacemaker-schedulerd', +- 'default' => 'true', +- 'type' => 'boolean', +- 'shortdesc' => 'Should deleted resources be stopped', +- 'longdesc' => 'Should deleted resources be stopped', +- 'readable_name' => 'Stop Orphan Resources', +- 'advanced' => false +- }, +- 'stop-orphan-actions' => { +- 'name' => 'stop-orphan-actions', +- 'source' => 'pacemaker-schedulerd', +- 'default' => 'true', +- 'type' => 'boolean', +- 'shortdesc' => 'Should deleted actions be cancelled', +- 'longdesc' => 'Should deleted actions be cancelled', +- 'readable_name' => 'Stop Orphan Actions', +- 'advanced' => false +- }, +- 'start-failure-is-fatal' => { +- 'name' => 'start-failure-is-fatal', +- 'source' => 'pacemaker-schedulerd', +- 'default' => 'true', +- 'type' => 'boolean', +- 'shortdesc' => 'Always treat start failures as fatal', +- 'longdesc' => 'This was the old default. However when set to FALSE, the cluster will instead use the resource\'s failcount and value for resource-failure-stickiness', +- 'readable_name' => 'Start Failure is Fatal', +- 'advanced' => false +- }, +- 'pe-error-series-max' => { +- 'name' => 'pe-error-series-max', +- 'source' => 'pacemaker-schedulerd', +- 'default' => '-1', +- 'type' => 'integer', +- 'shortdesc' => 'The number of PE inputs resulting in ERRORs to save', +- 'longdesc' => 'Zero to disable, -1 to store unlimited.', +- 'readable_name' => 'PE Error Storage', +- 'advanced' => false +- }, +- 'pe-warn-series-max' => { +- 'name' => 'pe-warn-series-max', +- 'source' => 'pacemaker-schedulerd', +- 'default' => '5000', +- 'type' => 'integer', +- 'shortdesc' => 'The number of PE inputs resulting in WARNINGs to save', +- 'longdesc' => 'Zero to disable, -1 to store unlimited.', +- 'readable_name' => 'PE Warning Storage', +- 'advanced' => false +- }, +- 'pe-input-series-max' => { +- 'name' => 'pe-input-series-max', +- 'source' => 'pacemaker-schedulerd', +- 'default' => '4000', +- 'type' => 'integer', +- 'shortdesc' => 'The number of other PE inputs to save', +- 'longdesc' => 'Zero to disable, -1 to store unlimited.', +- 'readable_name' => 'PE Input Storage', +- 'advanced' => false +- }, +- 'enable-acl' => { +- 'name' => 'enable-acl', +- 'source' => 'pacemaker-based', +- 'default' => 'false', +- 'type' => 'boolean', +- 'shortdesc' => 'Enable CIB ACL', +- 'longdesc' => 'Should pacemaker use ACLs to determine access to cluster', +- 'readable_name' => 'Enable ACLs', +- 'advanced' => false +- }, +- } +- elsif code != 200 +- return [400, 'getting properties definition failed'] +- else +- definition = JSON.parse(out) +- end +- +- definition.each { |name, prop| +- prop['value'] = properties[name] +- } +- return [200, JSON.generate(definition)] +- rescue +- return [400, 'unable to get cluster properties'] +- end +-end +- + get '/managec/:cluster/get_resource_agent_metadata' do + auth_user = getAuthUser() + cluster = params[:cluster] +@@ -888,69 +673,7 @@ get '/managec/:cluster/get_resource_agent_metadata' do + false, + {:resource_agent => resource_agent} + ) +- if code != 404 +- return [code, out] +- end +- +- code, out = send_cluster_request_with_token( +- auth_user, +- cluster, +- 'resource_metadata', +- false, +- { +- :resourcename => resource_agent, +- :new => true +- } +- ) +- if code != 200 +- return [400, 'Unable to get meta-data of specified resource agent.'] +- end +- desc_regex = Regexp.new( +- '[^"]*)"' +- ) +- parameters_regex = Regexp.new( +- ']*>(?[\s\S]*)' + +- '
Optional Arguments:
(?[\S\s]*)' + +- '' +- ) +- parameter_regex = Regexp.new( +- ']*>[\s]*\s*' + +- '(?[^<\s]*)\s*\s*\s*' + +- ' resource_agent, +- :shortdesc => html2plain(desc[:short]), +- :longdesc => html2plain(desc[:long]), +- :parameters => [] +- } +- +- parameters = parameters_regex.match(out) +- parameters[:required].scan(parameter_regex) { |match| +- result[:parameters] << { +- :name => html2plain(match[1]), +- :longdesc => html2plain(match[0]), +- :shortdesc => html2plain(match[2]), +- :type => 'string', +- :required => true +- } +- } +- parameters[:optional].scan(parameter_regex) { |match| +- result[:parameters] << { +- :name => html2plain(match[1]), +- :longdesc => html2plain(match[0]), +- :shortdesc => html2plain(match[2]), +- :type => 'string', +- :required => false +- } +- } +- return [200, JSON.generate(result)] ++ return [code, out] + end + + get '/managec/:cluster/get_fence_agent_metadata' do +@@ -964,90 +687,7 @@ get '/managec/:cluster/get_fence_agent_metadata' do + false, + {:fence_agent => fence_agent} + ) +- if code != 404 +- return [code, out] +- end +- +- code, out = send_cluster_request_with_token( +- auth_user, +- cluster, +- 'fence_device_metadata', +- false, +- { +- :resourcename => fence_agent.sub('stonith:', ''), +- :new => true +- } +- ) +- if code != 200 +- return [400, 'Unable to get meta-data of specified fence agent.'] +- end +- desc_regex = Regexp.new( +- '[^"]*)"' +- ) +- parameters_regex = Regexp.new( +- ']*>(?[\s\S]*)' + +- '
Optional Arguments:
(?[\S\s]*)' + +- '
Advanced Arguments:
(?[\S\s]*)' + +- '' +- ) +- required_parameter_regex = Regexp.new( +- ']*>[\s]*' + +- '\s* (?[^<\s]*)\s*\s*\s*' + +- '\s* (?[^<\s]*)\s*\s*\s*' + +- ' fence_agent, +- :shortdesc => '', +- :longdesc => '', +- :parameters => [] +- } +- +- # pcsd in version 0.9.137 (and older) does not provide description for +- # fence agents +- desc = desc_regex.match(out) +- if desc +- result[:shortdesc] = html2plain(desc[:short]) +- result[:longdesc] = html2plain(desc[:long]) +- end +- +- parameters = parameters_regex.match(out) +- parameters[:required].scan(required_parameter_regex) { |match| +- result[:parameters] << { +- :name => html2plain(match[1]), +- :longdesc => html2plain(match[0]), +- :shortdesc => html2plain(match[2]), +- :type => 'string', +- :required => true, +- :advanced => false +- } +- } +- parameters[:optional].scan(other_parameter_regex) { |match| +- result[:parameters] << { +- :name => html2plain(match[0]), +- :longdesc => '', +- :shortdesc => html2plain(match[1]), +- :type => 'string', +- :required => false, +- :advanced => false +- } +- } +- parameters[:advanced].scan(other_parameter_regex) { |match| +- result[:parameters] << { +- :name => html2plain(match[0]), +- :longdesc => '', +- :shortdesc => html2plain(match[1]), +- :type => 'string', +- :required => false, +- :advanced => true +- } +- } +- return [200, JSON.generate(result)] ++ return [code, out] + end + + post '/managec/:cluster/fix_auth_of_cluster' do +@@ -1123,7 +763,6 @@ def pcs_compatibility_layer_known_hosts_add( + known_hosts = get_known_hosts().select { |name, obj| + host_list.include?(name) + } +- # try the new endpoint provided by pcs-0.10 + known_hosts_request_data = {} + known_hosts.each { |host_name, host_obj| + known_hosts_request_data[host_name] = { +@@ -1149,50 +788,14 @@ def pcs_compatibility_layer_known_hosts_add( + ) + end + +- # a remote host supports the endpoint; success +- if retval == 200 +- return 'success' +- end +- +- # a remote host supports the endpoint; error +- if retval != 404 +- return 'error' +- end +- +- # a remote host does not support the endpoint +- # fallback to the old endpoint provided by pcs-0.9 since 0.9.140 +- request_data = {} +- known_hosts.each { |host_name, host_obj| +- addr = host_obj.first_dest()['addr'] +- port = host_obj.first_dest()['port'] +- request_data["node:#{host_name}"] = host_obj.token +- request_data["port:#{host_name}"] = port +- request_data["node:#{addr}"] = host_obj.token +- request_data["port:#{addr}"] = port +- } +- if is_cluster_request +- retval, _out = send_cluster_request_with_token( +- auth_user, target, '/save_tokens', true, request_data +- ) +- else +- retval, _out = send_request_with_token( +- auth_user, target, '/save_tokens', true, request_data +- ) +- end +- +- # a remote host supports the endpoint; success + if retval == 200 + return 'success' + end + +- # a remote host supports the endpoint; error +- if retval != 404 +- return 'error' ++ if retval == 404 ++ return 'not_supported' + end +- +- # a remote host does not support any of the endpoints +- # there's nothing we can do about it +- return 'not_supported' ++ return 'error' + end + + def pcs_compatibility_layer_get_cluster_known_hosts(cluster_name, target_node) +@@ -1200,11 +803,9 @@ def pcs_compatibility_layer_get_cluster_known_hosts(cluster_name, target_node) + known_hosts = [] + auth_user = PCSAuth.getSuperuserAuth() + +- # try the new endpoint provided by pcs-0.10 + retval, out = send_request_with_token( + auth_user, target_node, '/get_cluster_known_hosts' + ) +- # a remote host supports /get_cluster_known_hosts; data downloaded + if retval == 200 + begin + JSON.parse(out).each { |name, data| +@@ -1222,159 +823,21 @@ def pcs_compatibility_layer_get_cluster_known_hosts(cluster_name, target_node) + "cannot get authentication info from cluster '#{cluster_name}'" + ) + end +- return known_hosts, warning_messages +- end +- +- # a remote host supports /get_cluster_known_hosts; an error occured +- if retval != 404 ++ elsif retval == 404 + warning_messages << ( + "Unable to automatically authenticate against cluster nodes: " + +- "cannot get authentication info from cluster '#{cluster_name}'" ++ "cluster '#{cluster_name}' is running an old version of pcs/pcsd" + ) +- return known_hosts, warning_messages +- end +- +- # a remote host does not support /get_cluster_known_hosts +- # fallback to the old endpoint provided by pcs-0.9 since 0.9.140 +- retval, out = send_request_with_token( +- auth_user, target_node, '/get_cluster_tokens', false, {'with_ports' => '1'} +- ) +- +- # a remote host supports /get_cluster_tokens; data downloaded +- if retval == 200 +- begin +- data = JSON.parse(out) +- expected_keys = ['tokens', 'ports'] +- if expected_keys.all? {|i| data.has_key?(i) and data[i].class == Hash} +- # new format +- new_tokens = data["tokens"] || {} +- new_ports = data["ports"] || {} +- else +- # old format +- new_tokens = data +- new_ports = {} +- end +- new_tokens.each { |name_addr, token| +- known_hosts << PcsKnownHost.new( +- name_addr, +- token, +- [ +- { +- 'addr' => name_addr, +- 'port' => (new_ports[name_addr] || PCSD_DEFAULT_PORT), +- } +- ] +- ) +- } +- rescue => e +- $logger.error "Unable to parse the response of /get_cluster_tokens: #{e}" +- known_hosts = [] +- warning_messages << ( +- "Unable to automatically authenticate against cluster nodes: " + +- "cannot get authentication info from cluster '#{cluster_name}'" +- ) +- end +- return known_hosts, warning_messages +- end +- +- # a remote host supports /get_cluster_tokens; an error occured +- if retval != 404 ++ else + warning_messages << ( + "Unable to automatically authenticate against cluster nodes: " + + "cannot get authentication info from cluster '#{cluster_name}'" + ) +- return known_hosts, warning_messages + end + +- # a remote host does not support /get_cluster_tokens +- # there's nothing we can do about it +- warning_messages << ( +- "Unable to automatically authenticate against cluster nodes: " + +- "cluster '#{cluster_name}' is running an old version of pcs/pcsd" +- ) + return known_hosts, warning_messages + end + +-def pcs_0_9_142_resource_change_group(auth_user, params) +- parameters = { +- :resource_id => params[:resource_id], +- :resource_group => '', +- :_orig_resource_group => '', +- } +- parameters[:resource_group] = params[:group_id] if params[:group_id] +- if params[:old_group_id] +- parameters[:_orig_resource_group] = params[:old_group_id] +- end +- return send_cluster_request_with_token( +- auth_user, params[:cluster], 'update_resource', true, parameters +- ) +-end +- +-def pcs_0_9_142_resource_clone(auth_user, params) +- parameters = { +- :resource_id => params[:resource_id], +- :resource_clone => true, +- :_orig_resource_clone => 'false', +- } +- return send_cluster_request_with_token( +- auth_user, params[:cluster], 'update_resource', true, parameters +- ) +-end +- +-def pcs_0_9_142_resource_unclone(auth_user, params) +- parameters = { +- :resource_id => params[:resource_id], +- :resource_clone => nil, +- :_orig_resource_clone => 'true', +- } +- return send_cluster_request_with_token( +- auth_user, params[:cluster], 'update_resource', true, parameters +- ) +-end +- +-def pcs_0_9_142_resource_master(auth_user, params) +- parameters = { +- :resource_id => params[:resource_id], +- :resource_ms => true, +- :_orig_resource_ms => 'false', +- } +- return send_cluster_request_with_token( +- auth_user, params[:cluster], 'update_resource', true, parameters +- ) +-end +- +-# There is a bug in pcs-0.9.138 and older in processing the standby and +-# unstandby request. JS of that pcsd always sent nodename in "node" +-# parameter, which caused pcsd daemon to run the standby command locally with +-# param["node"] as node name. This worked fine if the local cluster was +-# managed from JS, as pacemaker simply put the requested node into standby. +-# However it didn't work for managing non-local clusters, as the command was +-# run on the local cluster everytime. Pcsd daemon would send the request to a +-# remote cluster if the param["name"] variable was set, and that never +-# happened. That however wouldn't work either, as then the required parameter +-# "node" wasn't sent in the request causing an exception on the receiving +-# node. This is fixed in commit 053f63ca109d9ef9e7f0416e90aab8e140480f5b +-# +-# In order to be able to put nodes running pcs-0.9.138 into standby, the +-# nodename must be sent in "node" param, and the "name" must not be sent. +-def pcs_0_9_138_node_standby(auth_user, params) +- translated_params = { +- 'node' => params[:name], +- } +- return send_cluster_request_with_token( +- auth_user, params[:cluster], 'node_standby', true, translated_params +- ) +-end +- +-def pcs_0_9_138_node_unstandby(auth_user, params) +- translated_params = { +- 'node' => params[:name], +- } +- return send_cluster_request_with_token( +- auth_user, params[:cluster], 'node_unstandby', true, translated_params +- ) +-end +- + def pcs_0_10_6_get_avail_resource_agents(code, out) + if code != 200 + return code, out +@@ -1421,99 +884,9 @@ post '/managec/:cluster/?*' do + if params[:cluster] + request = "/" + params[:splat].join("/") + +- # backward compatibility layer BEGIN +- translate_for_version = { +- '/node_standby' => [ +- [[0, 9, 138], method(:pcs_0_9_138_node_standby)], +- ], +- '/node_unstandby' => [ +- [[0, 9, 138], method(:pcs_0_9_138_node_unstandby)], +- ], +- } +- if translate_for_version.key?(request) +- target_pcsd_version = [0, 0, 0] +- version_code, version_out = send_cluster_request_with_token( +- auth_user, params[:cluster], 'get_sw_versions' +- ) +- if version_code == 200 +- begin +- versions = JSON.parse(version_out) +- target_pcsd_version = versions['pcs'] if versions['pcs'] +- rescue JSON::ParserError +- end +- end +- translate_function = nil +- translate_for_version[request].each { |pair| +- if (target_pcsd_version <=> pair[0]) != 1 # target <= pair +- translate_function = pair[1] +- break +- end +- } +- end +- # backward compatibility layer END +- +- if translate_function +- code, out = translate_function.call(auth_user, params) +- else +- code, out = send_cluster_request_with_token( +- auth_user, params[:cluster], request, true, params, true, raw_data +- ) +- end +- +- # backward compatibility layer BEGIN +- if code == 404 +- case request +- # supported since pcs-0.9.143 (tree view of resources) +- when '/resource_change_group', 'resource_change_group' +- code, out = pcs_0_9_142_resource_change_group(auth_user, params) +- # supported since pcs-0.9.143 (tree view of resources) +- when '/resource_clone', 'resource_clone' +- code, out = pcs_0_9_142_resource_clone(auth_user, params) +- # supported since pcs-0.9.143 (tree view of resources) +- when '/resource_unclone', 'resource_unclone' +- code, out = pcs_0_9_142_resource_unclone(auth_user, params) +- # supported since pcs-0.9.143 (tree view of resources) +- when '/resource_master', 'resource_master' +- # defaults to true for old pcsds without capabilities defined +- supports_resource_master = true +- capabilities_code, capabilities_out = send_cluster_request_with_token( +- auth_user, params[:cluster], 'capabilities' +- ) +- if capabilities_code == 200 +- begin +- capabilities_json = JSON.parse(capabilities_out) +- supports_resource_master = capabilities_json[:pcsd_capabilities].include?( +- 'pcmk.resource.master' +- ) +- rescue JSON::ParserError +- end +- end +- if supports_resource_master +- code, out = pcs_0_9_142_resource_master(auth_user, params) +- end +- else +- redirection = { +- # constraints removal for pcs-0.9.137 and older +- "/remove_constraint_remote" => "/resource_cmd/rm_constraint", +- # constraints removal for pcs-0.9.137 and older +- "/remove_constraint_rule_remote" => "/resource_cmd/rm_constraint_rule" +- } +- if redirection.key?(request) +- code, out = send_cluster_request_with_token( +- auth_user, +- params[:cluster], +- redirection[request], +- true, +- params, +- false, +- raw_data +- ) +- end +- end +- end +- # backward compatibility layer END +- +- return code, out ++ return send_cluster_request_with_token( ++ auth_user, params[:cluster], request, true, params, true, raw_data ++ ) + end + end + +@@ -1548,17 +921,3 @@ get '*' do + redirect "Bad URL" + call(env.merge("PATH_INFO" => '/nodes')) + end +- +-def html2plain(text) +- return CGI.unescapeHTML(text).gsub(/]*>/, "\n") +-end +- +-helpers do +- def h(text) +- Rack::Utils.escape_html(text) +- end +- +- def nl2br(text) +- text.gsub(/\n/, "
") +- end +-end +diff --git a/pcsd/remote.rb b/pcsd/remote.rb +index 1c019e98..e36f651f 100644 +--- a/pcsd/remote.rb ++++ b/pcsd/remote.rb +@@ -25,14 +25,14 @@ def remote(params, request, auth_user) + remote_cmd_without_pacemaker = { + :capabilities => method(:capabilities), + :status => method(:node_status), +- :status_all => method(:status_all), + :cluster_status => method(:cluster_status_remote), + :cluster_status_plaintext => method(:cluster_status_plaintext), + :auth => method(:auth), + :check_auth => method(:check_auth), ++ # lib api: ++ # /api/v1/cluster-setup/v1 + :cluster_setup => method(:cluster_setup), + :get_quorum_info => method(:get_quorum_info), +- :get_cib => method(:get_cib), + :get_corosync_conf => method(:get_corosync_conf_remote), + :set_corosync_conf => method(:set_corosync_conf), + :get_sync_capabilities => method(:get_sync_capabilities), +@@ -45,14 +45,6 @@ def remote(params, request, auth_user) + :cluster_start => method(:cluster_start), + :cluster_stop => method(:cluster_stop), + :config_restore => method(:config_restore), +- # TODO deprecated, remove, not used anymore +- :node_restart => method(:node_restart), +- # lib api: +- # /api/v1/node-standby-unstandby/v1 +- :node_standby => method(:node_standby), +- # lib api: +- # /api/v1/node-standby-unstandby/v1 +- :node_unstandby => method(:node_unstandby), + :cluster_enable => method(:cluster_enable), + :cluster_disable => method(:cluster_disable), + :get_sw_versions => method(:get_sw_versions), +@@ -69,12 +61,6 @@ def remote(params, request, auth_user) + :sbd_enable => method(:sbd_enable), + :remove_stonith_watchdog_timeout=> method(:remove_stonith_watchdog_timeout), + :set_stonith_watchdog_timeout_to_zero => method(:set_stonith_watchdog_timeout_to_zero), +- # lib api: +- # /api/v1/sbd-enable-sbd/v1 +- :remote_enable_sbd => method(:remote_enable_sbd), +- # lib api: +- # /api/v1/sbd-disable-sbd/v1 +- :remote_disable_sbd => method(:remote_disable_sbd), + :qdevice_net_get_ca_certificate => method(:qdevice_net_get_ca_certificate), + # lib api: + # /api/v1/qdevice-qdevice-net-sign-certificate-request/v1 +@@ -100,9 +86,6 @@ def remote(params, request, auth_user) + # lib api: + # /api/v1/resource-agent-list-agents/v1 + :get_avail_resource_agents => method(:get_avail_resource_agents), +- # lib api: +- # /api/v1/stonith-agent-list-agents/v1 +- :get_avail_fence_agents => method(:get_avail_fence_agents), + } + remote_cmd_with_pacemaker = { + :pacemaker_node_status => method(:remote_pacemaker_node_status), +@@ -159,18 +142,6 @@ def remote(params, request, auth_user) + :get_fence_agent_metadata => method(:get_fence_agent_metadata), + :manage_resource => method(:manage_resource), + :unmanage_resource => method(:unmanage_resource), +- # lib api: +- # /api/v1/alert-create-alert/v1 +- :create_alert => method(:create_alert), +- # lib api: +- # /api/v1/alert-update-alert/v1 +- :update_alert => method(:update_alert), +- :create_recipient => method(:create_recipient), +- :update_recipient => method(:update_recipient), +- # lib api: +- # /api/v1/alert-remove-alert/v1 +- # /api/v1/alert-remove-recipient/v1 +- :remove_alerts_and_recipients => method("remove_alerts_and_recipients"), + } + + command = params[:command].to_sym +@@ -193,6 +164,24 @@ def remote(params, request, auth_user) + end + end + ++def _get_param_list(params) ++ param_line = [] ++ meta_options = [] ++ params.each { |param, val| ++ if param.start_with?("_res_paramne_") or (param.start_with?("_res_paramempty_") and val != "") ++ myparam = param.sub(/^_res_paramne_/,"").sub(/^_res_paramempty_/,"") ++ param_line << "#{myparam}=#{val}" ++ end ++ if param == "disabled" ++ meta_options << 'meta' << 'target-role=Stopped' ++ end ++ if param == "force" and val ++ param_line << "--force" ++ end ++ } ++ return param_line + meta_options ++end ++ + def capabilities(params, request, auth_user) + return JSON.generate({ + :pcsd_capabilities => CAPABILITIES_PCSD, +@@ -394,53 +383,6 @@ def config_restore(params, request, auth_user) + end + end + +-# TODO deprecated, remove, not used anymore +-def node_restart(params, request, auth_user) +- if params[:name] +- code, response = send_request_with_token( +- auth_user, params[:name], 'node_restart', true +- ) +- else +- if not allowed_for_local_cluster(auth_user, Permissions::WRITE) +- return 403, 'Permission denied' +- end +- $logger.info "Restarting Node" +- output = `/sbin/reboot` +- $logger.debug output +- return output +- end +-end +- +-def node_standby(params, request, auth_user) +- if params[:name] +- code, response = send_request_with_token( +- auth_user, params[:name], 'node_standby', true +- ) +- else +- if not allowed_for_local_cluster(auth_user, Permissions::WRITE) +- return 403, 'Permission denied' +- end +- $logger.info "Standby Node" +- stdout, stderr, retval = run_cmd(auth_user, PCS, "node", "standby") +- return stdout +- end +-end +- +-def node_unstandby(params, request, auth_user) +- if params[:name] +- code, response = send_request_with_token( +- auth_user, params[:name], 'node_unstandby', true +- ) +- else +- if not allowed_for_local_cluster(auth_user, Permissions::WRITE) +- return 403, 'Permission denied' +- end +- $logger.info "Unstandby Node" +- stdout, stderr, retval = run_cmd(auth_user, PCS, "node", "unstandby") +- return stdout +- end +-end +- + def cluster_enable(params, request, auth_user) + if params[:name] + code, response = send_request_with_token( +@@ -491,21 +433,6 @@ def get_quorum_info(params, request, auth_user) + end + end + +-def get_cib(params, request, auth_user) +- if not allowed_for_local_cluster(auth_user, Permissions::READ) +- return 403, 'Permission denied' +- end +- cib, stderr, retval = run_cmd(auth_user, CIBADMIN, "-Ql") +- if retval != 0 +- if not pacemaker_running? +- return [400, '{"pacemaker_not_running":true}'] +- end +- return [500, "Unable to get CIB: " + cib.to_s + stderr.to_s] +- else +- return [200, cib] +- end +-end +- + def get_corosync_conf_remote(params, request, auth_user) + if not allowed_for_local_cluster(auth_user, Permissions::READ) + return 403, 'Permission denied' +@@ -912,66 +839,6 @@ def node_status(params, request, auth_user) + return [400, "Unsupported version '#{version}' of status requested"] + end + +-def status_all(params, request, auth_user, nodes=[], dont_update_config=false) +- if nodes == nil +- return JSON.generate({"error" => "true"}) +- end +- +- final_response = {} +- threads = [] +- forbidden_nodes = {} +- nodes.each {|node| +- threads << Thread.new(Thread.current[:pcsd_logger_container]) { |logger| +- Thread.current[:pcsd_logger_container] = logger +- code, response = send_request_with_token(auth_user, node, 'status') +- if 403 == code +- forbidden_nodes[node] = true +- end +- begin +- final_response[node] = JSON.parse(response) +- rescue JSON::ParserError => e +- final_response[node] = {"bad_json" => true} +- $logger.info("ERROR: Parse Error when parsing status JSON from #{node}") +- end +- if final_response[node] and final_response[node]["notoken"] == true +- $logger.error("ERROR: bad token for #{node}") +- end +- } +- } +- threads.each { |t| t.join } +- if forbidden_nodes.length > 0 +- return 403, 'Permission denied' +- end +- +- # Get full list of nodes and see if we need to update the configuration +- node_list = [] +- final_response.each { |fr,n| +- node_list += n["corosync_offline"] if n["corosync_offline"] +- node_list += n["corosync_online"] if n["corosync_online"] +- node_list += n["pacemaker_offline"] if n["pacemaker_offline"] +- node_list += n["pacemaker_online"] if n["pacemaker_online"] +- } +- +- node_list.uniq! +- if node_list.length > 0 +- config = PCSConfig.new(Cfgsync::PcsdSettings.from_file().text()) +- old_node_list = config.get_nodes(params[:cluster]) +- if !(dont_update_config or config.cluster_nodes_equal?(params[:cluster], node_list)) +- $logger.info("Updating node list for: #{params[:cluster]} #{old_node_list}->#{node_list}") +- config.update_cluster(params[:cluster], node_list) +- sync_config = Cfgsync::PcsdSettings.from_text(config.text()) +- # on version conflict just go on, config will be corrected eventually +- # by displaying the cluster in the web UI +- Cfgsync::save_sync_new_version( +- sync_config, get_corosync_nodes_names(), $cluster_name, true +- ) +- return status_all(params, request, auth_user, node_list, true) +- end +- end +- $logger.debug("NODE LIST: " + node_list.inspect) +- return JSON.generate(final_response) +-end +- + def imported_cluster_list(params, request, auth_user) + config = PCSConfig.new(Cfgsync::PcsdSettings.from_file().text()) + imported_clusters = {"cluster_list" => []} +@@ -981,173 +848,6 @@ def imported_cluster_list(params, request, auth_user) + return JSON.generate(imported_clusters) + end + +-def clusters_overview(params, request, auth_user) +- cluster_map = {} +- forbidden_clusters = {} +- threads = [] +- config = PCSConfig.new(Cfgsync::PcsdSettings.from_file().text()) +- config.clusters.each { |cluster| +- threads << Thread.new(Thread.current[:pcsd_logger_container]) { |logger| +- Thread.current[:pcsd_logger_container] = logger +- cluster_map[cluster.name] = { +- 'cluster_name' => cluster.name, +- 'error_list' => [ +- {'message' => 'Unable to connect to the cluster. Request timeout.'} +- ], +- 'warning_list' => [], +- 'status' => 'unknown', +- 'node_list' => get_default_overview_node_list(cluster.name), +- 'resource_list' => [] +- } +- overview_cluster = nil +- online, offline, not_authorized_nodes = is_auth_against_nodes( +- auth_user, +- get_cluster_nodes(cluster.name), +- 3 +- ) +- not_supported = false +- forbidden = false +- cluster_nodes_auth = (online + offline).uniq +- cluster_nodes_all = (cluster_nodes_auth + not_authorized_nodes).uniq +- nodes_not_in_cluster = [] +- for node in cluster_nodes_auth +- code, response = send_request_with_token( +- auth_user, node, 'cluster_status', true, {}, true, nil, 8 +- ) +- if code == 404 +- not_supported = true +- next +- end +- if 403 == code +- forbidden = true +- forbidden_clusters[cluster.name] = true +- break +- end +- begin +- parsed_response = JSON.parse(response) +- if parsed_response['noresponse'] or parsed_response['pacemaker_not_running'] +- next +- elsif parsed_response['notoken'] or parsed_response['notauthorized'] +- next +- elsif parsed_response['cluster_name'] != cluster.name +- # queried node is not in the cluster (any more) +- nodes_not_in_cluster << node +- next +- else +- overview_cluster = parsed_response +- break +- end +- rescue JSON::ParserError +- end +- end +- +- if cluster_nodes_all.sort == nodes_not_in_cluster.sort +- overview_cluster = { +- 'cluster_name' => cluster.name, +- 'error_list' => [], +- 'warning_list' => [], +- 'status' => 'unknown', +- 'node_list' => [], +- 'resource_list' => [] +- } +- end +- +- if not overview_cluster +- overview_cluster = { +- 'cluster_name' => cluster.name, +- 'error_list' => [], +- 'warning_list' => [], +- 'status' => 'unknown', +- 'node_list' => get_default_overview_node_list(cluster.name), +- 'resource_list' => [] +- } +- if not_supported +- overview_cluster['warning_list'] = [ +- { +- 'message' => 'Cluster is running an old version of pcs/pcsd which does not provide data for the dashboard.', +- }, +- ] +- else +- if forbidden +- overview_cluster['error_list'] = [ +- { +- 'message' => 'You do not have permissions to view the cluster.', +- 'type' => 'forbidden', +- }, +- ] +- overview_cluster['node_list'] = [] +- else +- overview_cluster['error_list'] = [ +- { +- 'message' => 'Unable to connect to the cluster.', +- }, +- ] +- end +- end +- end +- if not_authorized_nodes.length > 0 +- overview_cluster['warning_list'] << { +- 'message' => 'GUI is not authorized against node(s) '\ +- + not_authorized_nodes.join(', '), +- 'type' => 'nodes_not_authorized', +- 'node_list' => not_authorized_nodes, +- } +- end +- +- overview_cluster['node_list'].each { |node| +- if node['status_version'] == '1' +- overview_cluster['warning_list'] << { +- :message => 'Some nodes are running old version of pcs/pcsd.' +- } +- break +- end +- } +- +- cluster_map[cluster.name] = overview_cluster +- } +- } +- +- begin +- Timeout::timeout(18) { +- threads.each { |t| t.join } +- } +- rescue Timeout::Error +- threads.each { |t| t.exit } +- end +- +- # update clusters in PCSConfig +- not_current_data = false +- config = PCSConfig.new(Cfgsync::PcsdSettings.from_file().text()) +- cluster_map.each { |cluster, values| +- next if forbidden_clusters[cluster] +- nodes = [] +- values['node_list'].each { |node| +- nodes << node['name'] +- } +- if !config.cluster_nodes_equal?(cluster, nodes) +- $logger.info("Updating node list for: #{cluster} #{config.get_nodes(cluster)}->#{nodes}") +- config.update_cluster(cluster, nodes) +- not_current_data = true +- end +- } +- if not_current_data +- sync_config = Cfgsync::PcsdSettings.from_text(config.text()) +- # on version conflict just go on, config will be corrected eventually +- # by displaying the cluster in the web UI +- Cfgsync::save_sync_new_version( +- sync_config, get_corosync_nodes_names(), $cluster_name, true +- ) +- end +- +- overview = { +- 'not_current_data' => not_current_data, +- 'cluster_list' => cluster_map.values.sort { |a, b| +- a['clustername'] <=> b['clustername'] +- } +- } +- return JSON.generate(overview) +-end +- + def auth(params, request, auth_user) + # User authentication using username and password is done in python part of + # pcsd. We will get here only if credentials are correct, so we just need to +@@ -1220,7 +920,7 @@ def update_resource (params, request, auth_user) + return 403, 'Permission denied' + end + +- param_line = getParamList(params) ++ param_line = _get_param_list(params) + if not params[:resource_id] + cmd = [PCS, "resource", "create", params[:name], params[:resource_type]] + cmd += param_line +@@ -1320,7 +1020,7 @@ def update_fence_device(params, request, auth_user) + + $logger.info "Updating fence device" + $logger.info params +- param_line = getParamList(params) ++ param_line = _get_param_list(params) + $logger.info param_line + + if not params[:resource_id] +@@ -1353,14 +1053,6 @@ def get_avail_resource_agents(params, request, auth_user) + return JSON.generate(getResourceAgents(auth_user).map{|a| [a, get_resource_agent_name_structure(a)]}.to_h) + end + +-def get_avail_fence_agents(params, request, auth_user) +- if not allowed_for_local_cluster(auth_user, Permissions::READ) +- return 403, 'Permission denied' +- end +- agents = getFenceAgents(auth_user) +- return JSON.generate(agents) +-end +- + def remove_resource(params, request, auth_user) + if not allowed_for_local_cluster(auth_user, Permissions::WRITE) + return 403, 'Permission denied' +@@ -1740,18 +1432,6 @@ def update_cluster_settings(params, request, auth_user) + to_update = [] + current = getAllSettings(auth_user) + +- # We need to be able to set cluster properties also from older version GUI. +- # This code handles proper processing of checkboxes. +- # === backward compatibility layer start === +- params['hidden'].each { |prop, val| +- next if prop == 'hidden_input' +- unless properties.include?(prop) +- properties[prop] = val +- to_update << prop +- end +- } +- # === backward compatibility layer end === +- + properties.each { |prop, val| + val.strip! + if not current.include?(prop) and val != '' # add +@@ -2236,62 +1916,6 @@ def set_stonith_watchdog_timeout_to_zero(param, request, auth_user) + end + end + +-def remote_enable_sbd(params, request, auth_user) +- unless allowed_for_local_cluster(auth_user, Permissions::WRITE) +- return 403, 'Permission denied' +- end +- +- arg_list = [] +- +- if ['true', '1', 'on'].include?(params[:ignore_offline_nodes]) +- arg_list << '--skip-offline' +- end +- +- params[:watchdog].each do |node, watchdog| +- unless watchdog.strip.empty? +- arg_list << "watchdog=#{watchdog.strip}@#{node}" +- end +- end +- +- params[:config].each do |option, value| +- unless value.empty? +- arg_list << "#{option}=#{value}" +- end +- end +- +- _, stderr, retcode = run_cmd( +- auth_user, PCS, 'stonith', 'sbd', 'enable', *arg_list +- ) +- +- if retcode != 0 +- return [400, "Unable to enable sbd in cluster:\n#{stderr.join('')}"] +- end +- +- return [200, 'Sbd has been enabled.'] +-end +- +-def remote_disable_sbd(params, request, auth_user) +- unless allowed_for_local_cluster(auth_user, Permissions::WRITE) +- return 403, 'Permission denied' +- end +- +- arg_list = [] +- +- if ['true', '1', 'on'].include?(params[:ignore_offline_nodes]) +- arg_list << '--skip-offline' +- end +- +- _, stderr, retcode = run_cmd( +- auth_user, PCS, 'stonith', 'sbd', 'disable', *arg_list +- ) +- +- if retcode != 0 +- return [400, "Unable to disable sbd in cluster:\n#{stderr.join('')}"] +- end +- +- return [200, 'Sbd has been disabled.'] +-end +- + def qdevice_net_get_ca_certificate(params, request, auth_user) + unless allowed_for_local_cluster(auth_user, Permissions::READ) + return 403, 'Permission denied' +@@ -2697,145 +2321,6 @@ def manage_services(params, request, auth_user) + end + end + +-def _hash_to_argument_list(hash) +- result = [] +- if hash.kind_of?(Hash) +- hash.each {|key, value| +- value = '' if value.nil? +- result << "#{key}=#{value}" +- } +- end +- return result +-end +- +-def create_alert(params, request, auth_user) +- unless allowed_for_local_cluster(auth_user, Permissions::WRITE) +- return 403, 'Permission denied' +- end +- path = params[:path] +- unless path +- return [400, 'Missing required parameter: path'] +- end +- alert_id = params[:alert_id] +- description = params[:description] +- meta_attr_list = _hash_to_argument_list(params[:meta_attr]) +- instance_attr_list = _hash_to_argument_list(params[:instance_attr]) +- cmd = [PCS, 'alert', 'create', "path=#{path}"] +- cmd << "id=#{alert_id}" if alert_id and alert_id != '' +- cmd << "description=#{description}" if description and description != '' +- cmd += ['options', *instance_attr_list] if instance_attr_list.any? +- cmd += ['meta', *meta_attr_list] if meta_attr_list.any? +- output, stderr, retval = run_cmd(auth_user, *cmd) +- if retval != 0 +- return [400, "Unable to create alert: #{stderr.join("\n")}"] +- end +- return [200, 'Alert created'] +-end +- +-def update_alert(params, request, auth_user) +- unless allowed_for_local_cluster(auth_user, Permissions::WRITE) +- return 403, 'Permission denied' +- end +- alert_id = params[:alert_id] +- unless alert_id +- return [400, 'Missing required parameter: alert_id'] +- end +- path = params[:path] +- description = params[:description] +- meta_attr_list = _hash_to_argument_list(params[:meta_attr]) +- instance_attr_list = _hash_to_argument_list(params[:instance_attr]) +- cmd = [PCS, 'alert', 'update', alert_id] +- cmd << "path=#{path}" if path +- cmd << "description=#{description}" if description +- cmd += ['options', *instance_attr_list] if instance_attr_list.any? +- cmd += ['meta', *meta_attr_list] if meta_attr_list.any? +- output, stderr, retval = run_cmd(auth_user, *cmd) +- if retval != 0 +- return [400, "Unable to update alert: #{stderr.join("\n")}"] +- end +- return [200, 'Alert updated'] +-end +- +-def remove_alerts_and_recipients(params, request, auth_user) +- unless allowed_for_local_cluster(auth_user, Permissions::WRITE) +- return 403, 'Permission denied' +- end +- alert_list = params[:alert_list] +- recipient_list = params[:recipient_list] +- if recipient_list.kind_of?(Array) and recipient_list.any? +- output, stderr, retval = run_cmd( +- auth_user, PCS, 'alert', 'recipient', 'remove', *recipient_list +- ) +- if retval != 0 +- return [400, "Unable to remove recipients: #{stderr.join("\n")}"] +- end +- end +- if alert_list.kind_of?(Array) and alert_list.any? +- output, stderr, retval = run_cmd( +- auth_user, PCS, 'alert', 'remove', *alert_list +- ) +- if retval != 0 +- return [400, "Unable to remove alerts: #{stderr.join("\n")}"] +- end +- end +- return [200, 'All removed'] +-end +- +-def create_recipient(params, request, auth_user) +- unless allowed_for_local_cluster(auth_user, Permissions::WRITE) +- return 403, 'Permission denied' +- end +- alert_id = params[:alert_id] +- if not alert_id or alert_id.strip! == '' +- return [400, 'Missing required paramter: alert_id'] +- end +- value = params[:value] +- if not value or value == '' +- return [400, 'Missing required paramter: value'] +- end +- recipient_id = params[:recipient_id] +- description = params[:description] +- meta_attr_list = _hash_to_argument_list(params[:meta_attr]) +- instance_attr_list = _hash_to_argument_list(params[:instance_attr]) +- cmd = [PCS, 'alert', 'recipient', 'add', alert_id, "value=#{value}"] +- cmd << "id=#{recipient_id}" if recipient_id and recipient_id != '' +- cmd << "description=#{description}" if description and description != '' +- cmd += ['options', *instance_attr_list] if instance_attr_list.any? +- cmd += ['meta', *meta_attr_list] if meta_attr_list.any? +- output, stderr, retval = run_cmd(auth_user, *cmd) +- if retval != 0 +- return [400, "Unable to create recipient: #{stderr.join("\n")}"] +- end +- return [200, 'Recipient created'] +-end +- +-def update_recipient(params, request, auth_user) +- unless allowed_for_local_cluster(auth_user, Permissions::WRITE) +- return 403, 'Permission denied' +- end +- recipient_id = params[:recipient_id] +- if not recipient_id or recipient_id.strip! == '' +- return [400, 'Missing required paramter: recipient_id'] +- end +- value = params[:value] +- if value and value.strip! == '' +- return [400, 'Parameter value canot be empty string'] +- end +- description = params[:description] +- meta_attr_list = _hash_to_argument_list(params[:meta_attr]) +- instance_attr_list = _hash_to_argument_list(params[:instance_attr]) +- cmd = [PCS, 'alert', 'recipient', 'update', recipient_id] +- cmd << "value=#{value}" if value +- cmd << "description=#{description}" if description +- cmd += ['options', *instance_attr_list] if instance_attr_list.any? +- cmd += ['meta', *meta_attr_list] if meta_attr_list.any? +- output, stderr, retval = run_cmd(auth_user, *cmd) +- if retval != 0 +- return [400, "Unable to update recipient: #{stderr.join("\n")}"] +- end +- return [200, 'Recipient updated'] +-end +- + def pcsd_success(msg) + $logger.info(msg) + return [200, msg] +diff --git a/pcsd/resource.rb b/pcsd/resource.rb +index e49422f8..27894cc9 100644 +--- a/pcsd/resource.rb ++++ b/pcsd/resource.rb +@@ -103,11 +103,8 @@ def get_resource_agent_name_structure(agent_name) + match = expression.match(agent_name) + if match + provider = match.names.include?('provider') ? match[:provider] : nil +- class_provider = provider.nil? ? match[:standard] : "#{match[:standard]}:#{provider}" + return { + :full_name => agent_name, +- # TODO remove, this is only used by the old web UI +- :class_provider => class_provider, + :class => match[:standard], + :provider => provider, + :type => match[:type], +diff --git a/pcsd/rserver.rb b/pcsd/rserver.rb +index c37f9df4..e2c5e2a1 100644 +--- a/pcsd/rserver.rb ++++ b/pcsd/rserver.rb +@@ -26,7 +26,6 @@ class TornadoCommunicationMiddleware + session = JSON.parse(Base64.strict_decode64(env["HTTP_X_PCSD_PAYLOAD"])) + Thread.current[:tornado_username] = session["username"] + Thread.current[:tornado_groups] = session["groups"] +- Thread.current[:tornado_is_authenticated] = session["is_authenticated"] + end + + status, headers, body = @app.call(env) +diff --git a/pcsd/test/test_resource.rb b/pcsd/test/test_resource.rb +index 1eb0d3aa..97679eca 100644 +--- a/pcsd/test/test_resource.rb ++++ b/pcsd/test/test_resource.rb +@@ -8,7 +8,6 @@ class GetResourceAgentNameStructure < Test::Unit::TestCase + get_resource_agent_name_structure('standard:provider:type'), + { + :full_name => 'standard:provider:type', +- :class_provider => 'standard:provider', + :class => 'standard', + :provider => 'provider', + :type => 'type', +@@ -21,7 +20,6 @@ class GetResourceAgentNameStructure < Test::Unit::TestCase + get_resource_agent_name_structure('standard:type'), + { + :full_name => 'standard:type', +- :class_provider => 'standard', + :class => 'standard', + :provider => nil, + :type => 'type', +@@ -34,7 +32,6 @@ class GetResourceAgentNameStructure < Test::Unit::TestCase + get_resource_agent_name_structure('systemd:service@instance:name'), + { + :full_name => 'systemd:service@instance:name', +- :class_provider => 'systemd', + :class => 'systemd', + :provider => nil, + :type => 'service@instance:name', +@@ -47,7 +44,6 @@ class GetResourceAgentNameStructure < Test::Unit::TestCase + get_resource_agent_name_structure('service:service@instance:name'), + { + :full_name => 'service:service@instance:name', +- :class_provider => 'service', + :class => 'service', + :provider => nil, + :type => 'service@instance:name', +-- +2.31.1 + diff --git a/SPECS/pcs.spec b/SPECS/pcs.spec new file mode 100644 index 0000000..e15d458 --- /dev/null +++ b/SPECS/pcs.spec @@ -0,0 +1,957 @@ +Name: pcs +Version: 0.11.1 +Release: 3%{?dist} +# https://docs.fedoraproject.org/en-US/packaging-guidelines/LicensingGuidelines/ +# https://fedoraproject.org/wiki/Licensing:Main?rd=Licensing#Good_Licenses +# GPLv2: pcs +# ASL 2.0: tornado +# MIT: backports, dacite, daemons, ember, ethon, handlebars, jquery, jquery-ui, +# mustermann, rack, rack-protection, rack-test, sinatra, tilt +# GPLv2 or Ruby: eventmachne +# (GPLv2 or Ruby) and BSD: thin +# BSD or Ruby: open4, rexml, ruby2_keywords, webrick +# BSD and MIT: ffi +License: GPLv2 and ASL 2.0 and MIT and BSD and (GPLv2 or Ruby) and (BSD or Ruby) +URL: https://github.com/ClusterLabs/pcs +Group: System Environment/Base +Summary: Pacemaker Configuration System +#building only for architectures with pacemaker and corosync available +ExclusiveArch: i686 x86_64 s390x ppc64le aarch64 + +%global version_or_commit %{version} +# %%global version_or_commit %%{version}.210-9862 + +%global pcs_source_name %{name}-%{version_or_commit}.alpha.1 + +# ui_commit can be determined by hash, tag or branch +%global ui_commit 0.1.9 +%global ui_modules_version 0.1.9 +%global ui_src_name pcs-web-ui-%{ui_commit} + +%global pcs_snmp_pkg_name pcs-snmp + +%global pyagentx_version 0.4.pcs.2 +%global tornado_version 6.1.0 +%global dacite_version 1.6.0 +%global version_rubygem_backports 3.17.2 +%global version_rubygem_daemons 1.3.1 +%global version_rubygem_ethon 0.12.0 +%global version_rubygem_eventmachine 1.2.7 +%global version_rubygem_ffi 1.13.1 +%global version_rubygem_mustermann 1.1.1 +%global version_rubygem_open4 1.3.4 +%global version_rubygem_rack 2.2.3 +%global version_rubygem_rack_protection 2.0.8.1 +%global version_rubygem_rack_test 1.1.0 +%global version_rubygem_rexml 3.2.5 +%global version_rubygem_ruby2_keywords 0.0.2 +%global version_rubygem_sinatra 2.0.8.1 +%global version_rubygem_thin 1.7.2 +%global version_rubygem_tilt 2.0.10 +%global version_rubygem_webrick 1.7.0 + +%global pcs_bundled_dir pcs_bundled +%global pcsd_public_dir pcsd/public +%global rubygem_bundle_dir pcsd/vendor/bundle +%global rubygem_cache_dir %{rubygem_bundle_dir}/cache + +# mangling shebang in /usr/lib/pcsd/vendor/bundle/ruby/gems/rack-2.0.5/test/cgi/test from /usr/bin/env ruby to #!/usr/bin/ruby +#*** ERROR: ./usr/lib/pcsd/vendor/bundle/ruby/gems/rack-2.0.5/test/cgi/test.ru has shebang which doesn't start with '/' (../../bin/rackup) +#mangling shebang in /usr/lib/pcsd/vendor/bundle/ruby/gems/rack-2.0.5/test/cgi/rackup_stub.rb from /usr/bin/env ruby to #!/usr/bin/ruby +#*** WARNING: ./usr/lib/pcsd/vendor/bundle/ruby/gems/rack-2.0.5/test/cgi/sample_rackup.ru is executable but has empty or no shebang, removing executable bit +#*** WARNING: ./usr/lib/pcsd/vendor/bundle/ruby/gems/rack-2.0.5/test/cgi/lighttpd.conf is executable but has empty or no shebang, removing executable bit +#*** ERROR: ambiguous python shebang in /usr/lib/pcsd/vendor/bundle/ruby/gems/ffi-1.9.25/ext/ffi_c/libffi/generate-darwin-source-and-headers.py: #!/usr/bin/env python. Change it to python3 (or python2) explicitly. +%undefine __brp_mangle_shebangs + +# https://fedoraproject.org/wiki/Changes/Avoid_usr_bin_python_in_RPM_Build#Python_bytecompilation +# Enforce python3 because bytecompilation of tornado produced warnings: +# DEPRECATION WARNING: python2 invoked with /usr/bin/python. +# Use /usr/bin/python3 or /usr/bin/python2 +# /usr/bin/python will be removed or switched to Python 3 in the future. +%global __python %{__python3} + +Source0: %{url}/archive/%{version_or_commit}/%{pcs_source_name}.tar.gz + +Source41: https://github.com/ondrejmular/pyagentx/archive/v%{pyagentx_version}/pyagentx-%{pyagentx_version}.tar.gz +Source42: https://github.com/tornadoweb/tornado/archive/v%{tornado_version}/tornado-%{tornado_version}.tar.gz +Source44: https://github.com/konradhalas/dacite/archive/v%{dacite_version}/dacite-%{dacite_version}.tar.gz + +Source81: https://rubygems.org/downloads/backports-%{version_rubygem_backports}.gem +Source82: https://rubygems.org/downloads/ethon-%{version_rubygem_ethon}.gem +Source83: https://rubygems.org/downloads/ffi-%{version_rubygem_ffi}.gem +Source85: https://rubygems.org/downloads/rexml-%{version_rubygem_rexml}.gem +Source86: https://rubygems.org/downloads/mustermann-%{version_rubygem_mustermann}.gem +# We needed to re-upload open4 rubygem because of issues with sources in gating. +# Unfortunately, there was no newer version available, therefore we had to +# change its 'version' ourselves. +Source87: https://rubygems.org/downloads/open4-%{version_rubygem_open4}.gem#/open4-%{version_rubygem_open4}-1.gem +Source88: https://rubygems.org/downloads/rack-%{version_rubygem_rack}.gem +Source89: https://rubygems.org/downloads/rack-protection-%{version_rubygem_rack_protection}.gem +Source90: https://rubygems.org/downloads/rack-test-%{version_rubygem_rack_test}.gem +Source91: https://rubygems.org/downloads/sinatra-%{version_rubygem_sinatra}.gem +Source92: https://rubygems.org/downloads/tilt-%{version_rubygem_tilt}.gem +Source93: https://rubygems.org/downloads/eventmachine-%{version_rubygem_eventmachine}.gem +Source94: https://rubygems.org/downloads/daemons-%{version_rubygem_daemons}.gem +Source95: https://rubygems.org/downloads/thin-%{version_rubygem_thin}.gem +Source96: https://rubygems.org/downloads/ruby2_keywords-%{version_rubygem_ruby2_keywords}.gem +Source97: https://rubygems.org/downloads/webrick-%{version_rubygem_webrick}.gem + +Source100: https://github.com/ClusterLabs/pcs-web-ui/archive/%{ui_commit}/%{ui_src_name}.tar.gz +Source101: https://github.com/ClusterLabs/pcs-web-ui/releases/download/%{ui_commit}/pcs-web-ui-node-modules-%{ui_modules_version}.tar.xz + +# Patches from upstream. +# They should come before downstream patches to avoid unnecessary conflicts. +# Z-streams are exception here: they can come from upstream but should be +# applied at the end to keep z-stream changes as straightforward as possible. +# Patch1: bzNUMBER-01-name.patch + +# Downstream patches do not come from upstream. They adapt pcs for specific +# RHEL needs. + +Patch1: fix-version.patch +Patch2: do-not-support-cluster-setup-with-udp-u-transport.patch +Patch3: update.patch +Patch4: fix-changelog.patch + +# git for patches +BuildRequires: git-core +#printf from coreutils is used in makefile +BuildRequires: coreutils +# python for pcs +BuildRequires: python3 >= 3.6 +BuildRequires: python3-cryptography +BuildRequires: python3-dateutil >= 2.7.0 +BuildRequires: python3-devel +BuildRequires: python3-setuptools +BuildRequires: python3-pycurl +BuildRequires: python3-pip +BuildRequires: python3-pyparsing +BuildRequires: python3-cryptography +BuildRequires: python3-lxml +# for building bundled python packages +BuildRequires: python3-wheel +# for bundled python dateutil +BuildRequires: python3-setuptools_scm +# gcc for compiling custom rubygems +BuildRequires: gcc +BuildRequires: gcc-c++ +# ruby and gems for pcsd +BuildRequires: ruby >= 2.2.0 +BuildRequires: ruby-devel +BuildRequires: rubygems +BuildRequires: rubygem-bundler +# ruby libraries for tests +BuildRequires: rubygem-json +BuildRequires: rubygem-test-unit +# for touching patch files (sanitization function) +BuildRequires: diffstat +# for post, preun and postun macros +BuildRequires: systemd +BuildRequires: make +# Red Hat logo for creating symlink of favicon +BuildRequires: redhat-logos +# for building web ui +BuildRequires: npm +# cluster stack packages for pkg-config +BuildRequires: booth +BuildRequires: corosync-qdevice-devel +BuildRequires: corosynclib-devel >= 3.0 +BuildRequires: fence-agents-common +BuildRequires: pacemaker-libs-devel >= 2.0.0 +BuildRequires: resource-agents +BuildRequires: sbd + +# python and libraries for pcs, setuptools for pcs entrypoint +Requires: python3 >= 3.6 +Requires: python3-cryptography +Requires: python3-dateutil >= 2.7.0 +Requires: python3-lxml +Requires: python3-setuptools +Requires: python3-pycurl +Requires: python3-pyparsing +Requires: python3-cryptography +# ruby and gems for pcsd +Requires: ruby >= 2.2.0 +Requires: rubygems +Requires: rubygem-json +# for killall +Requires: psmisc +# cluster stack and related packages +Requires: pcmk-cluster-manager >= 2.0.0 +Suggests: pacemaker +Requires: (corosync >= 2.99 if pacemaker) +# pcs enables corosync encryption by default so we require libknet1-plugins-all +Requires: (libknet1-plugins-all if corosync) +Requires: pacemaker-cli >= 2.0.0 +# for post, preun and postun macros +Requires(post): systemd +Requires(preun): systemd +Requires(postun): systemd +# pam is used for authentication inside daemon (python ctypes) +# more details: https://bugzilla.redhat.com/show_bug.cgi?id=1717113 +Requires: pam +# favicon Red Hat logo +Requires: redhat-logos +# needs logrotate for /etc/logrotate.d/pcsd +Requires: logrotate + +Provides: bundled(tornado) = %{tornado_version} +Provides: bundled(dacite) = %{dacite_version} +Provides: bundled(backports) = %{version_rubygem_backports} +Provides: bundled(daemons) = %{version_rubygem_daemons} +Provides: bundled(ethon) = %{version_rubygem_ethon} +Provides: bundled(eventmachine) = %{version_rubygem_eventmachine} +Provides: bundled(ffi) = %{version_rubygem_ffi} +Provides: bundled(mustermann) = %{version_rubygem_mustermann} +Provides: bundled(open4) = %{version_rubygem_open4} +Provides: bundled(rack) = %{version_rubygem_rack} +Provides: bundled(rack_protection) = %{version_rubygem_rack_protection} +Provides: bundled(rack_test) = %{version_rubygem_rack_test} +Provides: bundled(rexml) = %{version_rubygem_rexml} +Provides: bundled(ruby2_keywords) = %{version_rubygem_ruby2_keywords} +Provides: bundled(sinatra) = %{version_rubygem_sinatra} +Provides: bundled(thin) = %{version_rubygem_thin} +Provides: bundled(tilt) = %{version_rubygem_tilt} +Provides: bundled(webrick) = %{version_rubygem_webrick} + +%description +pcs is a corosync and pacemaker configuration tool. It permits users to +easily view, modify and create pacemaker based clusters. + +# pcs-snmp package definition +%package -n %{pcs_snmp_pkg_name} +Group: System Environment/Base +Summary: Pacemaker cluster SNMP agent +# https://fedoraproject.org/wiki/Licensing:Main?rd=Licensing#Good_Licenses +# GPLv2: pcs +# BSD-2-Clause: pyagentx +License: GPLv2 and BSD-2-Clause +URL: https://github.com/ClusterLabs/pcs + +# tar for unpacking pyagetx source tar ball +BuildRequires: tar + +Requires: pcs = %{version}-%{release} +Requires: pacemaker +Requires: net-snmp + +Provides: bundled(pyagentx) = %{pyagentx_version} + +%description -n %{pcs_snmp_pkg_name} +SNMP agent that provides information about pacemaker cluster to the master agent (snmpd) + +%prep +%autosetup -p1 -S git -n %{pcs_source_name} + +# -- following is inspired by python-simplejon.el5 -- +# Update timestamps on the files touched by a patch, to avoid non-equal +# .pyc/.pyo files across the multilib peers within a build + +update_times(){ + # update_times ... + # set the access and modification times of each file_to_touch to the times + # of reference_file + + # put all args to file_list + file_list=("$@") + # first argument is reference_file: so take it and remove from file_list + reference_file=${file_list[0]} + unset file_list[0] + + for fname in ${file_list[@]}; do + # some files could be deleted by a patch therefore we test file for + # existance before touch to avoid exit with error: No such file or + # directory + # diffstat cannot create list of files without deleted files + test -e $fname && touch -r $reference_file $fname + done +} + +update_times_patch(){ + # update_times_patch + # set the access and modification times of each file in patch to the times + # of patch_file_name + + patch_file_name=$1 + + # diffstat + # -l lists only the filenames. No histogram is generated. + # -p override the logic that strips common pathnames, + # simulating the patch "-p" option. (Strip the smallest prefix containing + # num leading slashes from each file name found in the patch file) + update_times ${patch_file_name} `diffstat -p1 -l ${patch_file_name}` +} + +# update_times_patch %%{PATCH1} +update_times_patch %{PATCH1} +update_times_patch %{PATCH2} +update_times_patch %{PATCH3} +update_times_patch %{PATCH4} + +# prepare dirs/files necessary for building web ui +# inside SOURCE100 is only directory %%{ui_src_name} +tar -xzf %SOURCE100 -C %{pcsd_public_dir} +tar -xf %SOURCE101 -C %{pcsd_public_dir}/%{ui_src_name} + +# prepare dirs/files necessary for building all bundles +# ----------------------------------------------------- +# 1) rubygems sources + +mkdir -p %{rubygem_cache_dir} +cp -f %SOURCE81 %{rubygem_cache_dir} +cp -f %SOURCE82 %{rubygem_cache_dir} +cp -f %SOURCE83 %{rubygem_cache_dir} +cp -f %SOURCE85 %{rubygem_cache_dir} +cp -f %SOURCE86 %{rubygem_cache_dir} +# For reason why we are renaming open4 rubygem, see comment of source +# definition above. +cp -f %SOURCE87 %{rubygem_cache_dir}/open4-%{version_rubygem_open4}.gem +cp -f %SOURCE88 %{rubygem_cache_dir} +cp -f %SOURCE89 %{rubygem_cache_dir} +cp -f %SOURCE90 %{rubygem_cache_dir} +cp -f %SOURCE91 %{rubygem_cache_dir} +cp -f %SOURCE92 %{rubygem_cache_dir} +cp -f %SOURCE93 %{rubygem_cache_dir} +cp -f %SOURCE94 %{rubygem_cache_dir} +cp -f %SOURCE95 %{rubygem_cache_dir} +cp -f %SOURCE96 %{rubygem_cache_dir} +cp -f %SOURCE97 %{rubygem_cache_dir} + + +# 2) prepare python bundles +mkdir -p %{pcs_bundled_dir}/src +cp -f %SOURCE41 rpm/ +cp -f %SOURCE42 rpm/ +cp -f %SOURCE44 rpm/ + +%build +%define debug_package %{nil} + +./autogen.sh +%{configure} --enable-local-build --enable-use-local-cache-only --enable-individual-bundling PYTHON=%{__python3} +make all + +%install +rm -rf $RPM_BUILD_ROOT +pwd + +%make_install + +# build web ui and put it to pcsd +make -C %{pcsd_public_dir}/%{ui_src_name} build BUILD_USE_EXISTING_NODE_MODULES=true +mv %{pcsd_public_dir}/%{ui_src_name}/build ${RPM_BUILD_ROOT}%{_libdir}/%{pcsd_public_dir}/ui +rm -r %{pcsd_public_dir}/%{ui_src_name} + +# symlink favicon into pcsd directories +mkdir -p ${RPM_BUILD_ROOT}%{_libdir}/%{pcsd_public_dir}/images/ +ln -fs /etc/favicon.png ${RPM_BUILD_ROOT}%{_libdir}/%{pcsd_public_dir}/images/favicon.png + +# prepare license files +# some rubygems do not have a license file (ruby2_keywords, thin) +mv %{rubygem_bundle_dir}/gems/backports-%{version_rubygem_backports}/LICENSE.txt backports_LICENSE.txt +mv %{rubygem_bundle_dir}/gems/daemons-%{version_rubygem_daemons}/LICENSE daemons_LICENSE +mv %{rubygem_bundle_dir}/gems/ethon-%{version_rubygem_ethon}/LICENSE ethon_LICENSE +mv %{rubygem_bundle_dir}/gems/eventmachine-%{version_rubygem_eventmachine}/LICENSE eventmachine_LICENSE +mv %{rubygem_bundle_dir}/gems/eventmachine-%{version_rubygem_eventmachine}/GNU eventmachine_GNU +mv %{rubygem_bundle_dir}/gems/ffi-%{version_rubygem_ffi}/COPYING ffi_COPYING +mv %{rubygem_bundle_dir}/gems/ffi-%{version_rubygem_ffi}/LICENSE ffi_LICENSE +mv %{rubygem_bundle_dir}/gems/ffi-%{version_rubygem_ffi}/LICENSE.SPECS ffi_LICENSE.SPECS +mv %{rubygem_bundle_dir}/gems/mustermann-%{version_rubygem_mustermann}/LICENSE mustermann_LICENSE +mv %{rubygem_bundle_dir}/gems/open4-%{version_rubygem_open4}/LICENSE open4_LICENSE +mv %{rubygem_bundle_dir}/gems/rack-%{version_rubygem_rack}/MIT-LICENSE rack_MIT-LICENSE +mv %{rubygem_bundle_dir}/gems/rexml-%{version_rubygem_rexml}/LICENSE.txt rexml_LICENSE.txt +mv %{rubygem_bundle_dir}/gems/rack-protection-%{version_rubygem_rack_protection}/License rack-protection_License +mv %{rubygem_bundle_dir}/gems/rack-test-%{version_rubygem_rack_test}/MIT-LICENSE.txt rack-test_MIT-LICENSE.txt +mv %{rubygem_bundle_dir}/gems/sinatra-%{version_rubygem_sinatra}/LICENSE sinatra_LICENSE +mv %{rubygem_bundle_dir}/gems/tilt-%{version_rubygem_tilt}/COPYING tilt_COPYING +mv %{rubygem_bundle_dir}/gems/webrick-%{version_rubygem_webrick}/LICENSE.txt webrick_LICENSE.txt + +cp %{pcs_bundled_dir}/src/pyagentx-*/LICENSE.txt pyagentx_LICENSE.txt +cp %{pcs_bundled_dir}/src/pyagentx-*/CONTRIBUTORS.txt pyagentx_CONTRIBUTORS.txt +cp %{pcs_bundled_dir}/src/pyagentx-*/README.md pyagentx_README.md + +cp %{pcs_bundled_dir}/src/tornado-*/LICENSE tornado_LICENSE +cp %{pcs_bundled_dir}/src/tornado-*/README.rst tornado_README.rst + +cp %{pcs_bundled_dir}/src/dacite-*/LICENSE dacite_LICENSE +cp %{pcs_bundled_dir}/src/dacite-*/README.md dacite_README.md + +# We are not building debug package for pcs but we need to add MiniDebuginfo +# to the bundled shared libraries from rubygem extensions in order to satisfy +# rpmdiff's binary stripping checker. +# Therefore we call find-debuginfo.sh script manually in order to strip +# binaries and add MiniDebugInfo with .gnu_debugdata section +/usr/lib/rpm/find-debuginfo.sh -j2 -m -i -S debugsourcefiles.list +# find-debuginfo.sh generated some files into /usr/lib/debug and +# /usr/src/debug/ that we don't want in the package +rm -rf $RPM_BUILD_ROOT%{_libdir}/debug +rm -rf $RPM_BUILD_ROOT/usr/lib/debug +rm -rf $RPM_BUILD_ROOT%{_prefix}/src/debug + +# We can remove files required for gem compilation +rm -rf $RPM_BUILD_ROOT%{_libdir}/%{rubygem_bundle_dir}/gems/eventmachine-%{version_rubygem_eventmachine}/ext +rm -rf $RPM_BUILD_ROOT%{_libdir}/%{rubygem_bundle_dir}/gems/ffi-%{version_rubygem_ffi}/ext +rm -rf $RPM_BUILD_ROOT%{_libdir}/%{rubygem_bundle_dir}/gems/thin-%{version_rubygem_thin}/ext + +%check +# In the building environment LC_CTYPE is set to C which causes tests to fail +# due to python prints a warning about it to stderr. The following environment +# variable disables the warning. +# On the live system either UTF8 locale is set or the warning is emmited +# which breaks pcs. That is the correct behavior since with wrong locales it +# would be probably broken anyway. +# The main concern here is to make the tests pass. +# See https://fedoraproject.org/wiki/Changes/python3_c.utf-8_locale for details. +export PYTHONCOERCECLOCALE=0 + +run_all_tests(){ + #run pcs tests + + # disabled tests: + # + # pcs_test.tier0.lib.commands.test_resource_agent.DescribeAgentUtf8.test_describe + # For an unknown reason this test is failing in mock environment and + # passing outside the mock environment. + # TODO: Investigate the issue + + %{__python3} pcs_test/suite --tier0 -v --vanilla --all-but \ + pcs_test.tier0.lib.commands.test_resource_agent.DescribeAgentUtf8.test_describe \ + pcs_test.tier0.daemon.app.test_app_remote.SyncConfigMutualExclusive.test_get_not_locked \ + pcs_test.tier0.daemon.app.test_app_remote.SyncConfigMutualExclusive.test_post_not_locked \ + + test_result_python=$? + + #run pcsd tests and remove them + GEM_HOME=$RPM_BUILD_ROOT%{_libdir}/%{rubygem_bundle_dir} ruby \ + -I$RPM_BUILD_ROOT%{_libdir}/pcsd \ + -Ipcsd/test \ + pcsd/test/test_all_suite.rb + test_result_ruby=$? + + if [ $test_result_python -ne 0 ]; then + return $test_result_python + fi + return $test_result_ruby +} + +run_all_tests + +%posttrans +# Make sure the new version of the daemon is runnning. +# Also, make sure to start pcsd-ruby if it hasn't been started or even +# installed before. This is done by restarting pcsd.service. +%{_bindir}/systemctl daemon-reload +%{_bindir}/systemctl try-restart pcsd.service + + +%post +%systemd_post pcsd.service +%systemd_post pcsd-ruby.service + +%post -n %{pcs_snmp_pkg_name} +%systemd_post pcs_snmp_agent.service + +%preun +%systemd_preun pcsd.service +%systemd_preun pcsd-ruby.service + +%preun -n %{pcs_snmp_pkg_name} +%systemd_preun pcs_snmp_agent.service + +%postun +%systemd_postun_with_restart pcsd.service +%systemd_postun_with_restart pcsd-ruby.service + +%postun -n %{pcs_snmp_pkg_name} +%systemd_postun_with_restart pcs_snmp_agent.service + +%files +%doc CHANGELOG.md +%doc README.md +%doc tornado_README.rst +%doc dacite_README.md +%license tornado_LICENSE +%license dacite_LICENSE +%license COPYING +# rugygem licenses +%license backports_LICENSE.txt +%license daemons_LICENSE +%license ethon_LICENSE +%license eventmachine_LICENSE +%license eventmachine_GNU +%license ffi_COPYING +%license ffi_LICENSE +%license ffi_LICENSE.SPECS +%license mustermann_LICENSE +%license open4_LICENSE +%license rack_MIT-LICENSE +%license rack-protection_License +%license rack-test_MIT-LICENSE.txt +%license rexml_LICENSE.txt +%license sinatra_LICENSE +%license tilt_COPYING +%license webrick_LICENSE.txt +%{python3_sitelib}/* +%{_sbindir}/pcs +%{_sbindir}/pcsd +%{_libdir}/pcs/* +%{_libdir}/pcsd/* +%{_unitdir}/pcsd.service +%{_unitdir}/pcsd-ruby.service +%{_datadir}/bash-completion/completions/pcs +%{_sharedstatedir}/pcsd +%config(noreplace) %{_sysconfdir}/pam.d/pcsd +%dir %{_var}/log/pcsd +%config(noreplace) %{_sysconfdir}/logrotate.d/pcsd +%config(noreplace) %{_sysconfdir}/sysconfig/pcsd +%ghost %config(noreplace) %attr(0600,root,root) %{_sharedstatedir}/pcsd/cfgsync_ctl +%ghost %config(noreplace) %attr(0600,root,root) %{_sharedstatedir}/pcsd/known-hosts +%ghost %config(noreplace) %attr(0600,root,root) %{_sharedstatedir}/pcsd/pcsd.cookiesecret +%ghost %config(noreplace) %attr(0600,root,root) %{_sharedstatedir}/pcsd/pcsd.crt +%ghost %config(noreplace) %attr(0600,root,root) %{_sharedstatedir}/pcsd/pcsd.key +%ghost %config(noreplace) %attr(0644,root,root) %{_sharedstatedir}/pcsd/pcs_settings.conf +%ghost %config(noreplace) %attr(0644,root,root) %{_sharedstatedir}/pcsd/pcs_users.conf +%{_mandir}/man8/pcs.* +%{_mandir}/man8/pcsd.* +%exclude %{_libdir}/pcs/pcs_snmp_agent +%exclude %{_libdir}/pcs/%{pcs_bundled_dir}/packages/pyagentx* + + +%files -n %{pcs_snmp_pkg_name} +%{_libdir}/pcs/pcs_snmp_agent +%{_libdir}/pcs/%{pcs_bundled_dir}/packages/pyagentx* +%{_unitdir}/pcs_snmp_agent.service +%{_datadir}/snmp/mibs/PCMK-PCS*-MIB.txt +%{_mandir}/man8/pcs_snmp_agent.* +%config(noreplace) %{_sysconfdir}/sysconfig/pcs_snmp_agent +%doc CHANGELOG.md +%doc pyagentx_CONTRIBUTORS.txt +%doc pyagentx_README.md +%license COPYING +%license pyagentx_LICENSE.txt + +%changelog +* Mon Sep 06 2021 Miroslav Lisik - 0.11.1-3 +- Fixed pcs web ui - adapt to backend changes +- Resolves: rhbz#1999690 + +* Thu Sep 02 2021 Miroslav Lisik - 0.11.1-2 +- Fixed pcs web ui - data are not wiped out when user logout +- Fixed stop requesting legacy output of remote/status +- Resolves: rhbz#1999104 rhbz#1999690 + +* Thu Aug 26 2021 Miroslav Lisik - 0.11.1-1 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Resolves: rhbz#1283805 rhbz#1910644 rhbz#1910645 rhbz#1956703 rhbz#1956706 rhbz#1985981 rhbz#1991957 rhbz#1996062 rhbz#1996067 + +* Tue Aug 24 2021 Miroslav Lisik - 0.11.0.alpha.1-1 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Updated pcs web ui +- Resolves: rhbz#1283805 rhbz#1910644 rhbz#1910645 rhbz#1985981 rhbz#1991957 rhbz#1996067 + +* Thu Aug 19 2021 DJ Delorie - 0.10.9-2 +- Rebuilt for libffi 3.4.2 SONAME transition. + Related: rhbz#1891914 + +* Tue Aug 10 2021 Miroslav Lisik - 0.10.9-1 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Resolves: rhbz#1991957 + +* Mon Aug 09 2021 Mohan Boddu - 0.10.8-11 +- Rebuilt for IMA sigs, glibc 2.34, aarch64 flags + Related: rhbz#1991688 + +* Tue Jul 20 2021 Miroslav Lisik - 0.10.8-10 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Fixed web-ui build +- Fixed tests for pacemaker 2.1 +- Resolves: rhbz#1975440 rhbz#1922302 + +* Tue Jun 22 2021 Mohan Boddu - 0.10.8-9 +- Rebuilt for RHEL 9 BETA for openssl 3.0 + Related: rhbz#1971065 + +* Wed Jun 16 2021 Miroslav Lisik - 0.10.8-8 +- Rebuild with fixed gaiting tests +- Stopped bundling rubygem-json (use distribution package instead) +- Fixed patches +- Resolves: rhbz#1881064 + +* Tue Jun 15 2021 Miroslav Lisik - 0.10.8-7 +- Fixed License tag +- Rebuild with fixed dependency for gating tier0 tests +- Resolves: rhbz#1881064 + +* Thu Jun 10 2021 Miroslav Lisik - 0.10.8-6 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Removed clufter related commands +- Resolves: rhbz#1881064 + +* Wed Apr 28 2021 Miroslav Lisik - 0.10.8-5 +- Updated pcs web ui node modules +- Fixed build issue on low memory build hosts +- Resolves: rhbz#1951272 + +* Fri Apr 16 2021 Mohan Boddu - 0.10.8-4 +- Rebuilt for RHEL 9 BETA on Apr 15th 2021. Related: rhbz#1947937 + +* Thu Mar 04 2021 Miroslav Lisik - 0.10.8-3 +- Replace pyOpenSSL with python-cryptography +- Resolves: rhbz#1927404 + +* Fri Feb 19 2021 Miroslav Lisik - 0.10.8-2 +- Bundle rubygem depedencies and python3-tornado +- Resolves: rhbz#1929710 + +* Thu Feb 04 2021 Miroslav Lisik - 0.10.8-1 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Updated pcs-web-ui +- Updated bundled python dependency: dacite +- Changed BuildRequires from git to git-core +- Added conditional (Build)Requires: rubygem(rexml) +- Added conditional Requires: rubygem(webrick) + +* Tue Jan 26 2021 Fedora Release Engineering - 0.10.7-4 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_34_Mass_Rebuild + +* Thu Jan 7 2021 Vít Ondruch - 0.10.7-3 +- Rebuilt for https://fedoraproject.org/wiki/Changes/Ruby_3.0 + +* Thu Nov 26 2020 Ondrej Mular - 0.10.7-2 +- Python 3.10 related fix + +* Wed Sep 30 2020 Miroslav Lisik - 0.10.7-1 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Added dependency on python packages pyparsing and dateutil +- Fixed virtual bundle provides for ember, handelbars, jquery and jquery-ui +- Removed dependency on python3-clufter + +* Tue Jul 28 2020 Fedora Release Engineering - 0.10.6-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_33_Mass_Rebuild + +* Tue Jul 21 2020 Miroslav Lisik - 0.10.6-1 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Updated pcs-web-ui +- Stopped bundling tornado (use distribution package instead) +- Stopped bundling rubygem-tilt (use distribution package instead) +- Removed rubygem bundling +- Removed unneeded BuildRequires: execstack, gcc, gcc-c++ +- Excluded some tests for tornado daemon + +* Tue Jul 21 2020 Tom Stellard - 0.10.5-8 +- Use make macros +- https://fedoraproject.org/wiki/Changes/UseMakeBuildInstallMacro + +* Wed Jul 15 2020 Ondrej Mular - 0.10.5-7 +- Use fixed upstream version of dacite with Python 3.9 support +- Split upstream tests in gating into tiers + +* Fri Jul 03 2020 Ondrej Mular - 0.10.5-6 +- Use patched version of dacite compatible with Python 3.9 +- Resolves: rhbz#1838327 + +* Tue May 26 2020 Miro Hrončok - 0.10.5-5 +- Rebuilt for Python 3.9 + +* Thu May 07 2020 Ondrej Mular - 0.10.5-4 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Run only tier0 tests in check section + +* Fri Apr 03 2020 Ondrej Mular - 0.10.5-3 +- Enable gating + +* Fri Mar 27 2020 Ondrej Mular - 0.10.5-2 +- Remove usage of deprecated module xml.etree.cElementTree +- Resolves: rhbz#1817695 + +* Wed Mar 18 2020 Miroslav Lisik - 0.10.5-1 +- Rebased to latest upstream sources (see CHANGELOG.md) + +* Wed Jan 29 2020 Fedora Release Engineering - 0.10.4-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_32_Mass_Rebuild + +* Thu Nov 28 2019 Miroslav Lisik - 0.10.4-1 +- Rebased to latest upstream sources (see CHANGELOG.md) + +* Thu Oct 03 2019 Miro Hrončok - 0.10.3-2 +- Rebuilt for Python 3.8.0rc1 (#1748018) + +* Fri Aug 23 2019 Ondrej Mular - 0.10.3-1 +- Rebased to latest upstream sources (see CHANGELOG.md) + +* Mon Aug 19 2019 Miro Hrončok - 0.10.2-3 +- Rebuilt for Python 3.8 + +* Fri Jul 26 2019 Fedora Release Engineering - 0.10.2-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_31_Mass_Rebuild + +* Fri Jun 14 2019 Ondrej Mular - 0.10.2-1 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Added pam as required package +- An alternative webUI rebased to latest upstream sources +- Improved configuration files permissions in rpm + +* Tue Mar 19 2019 Tomas Jelinek - 0.10.1-4 +- Removed unused dependency rubygem-multi_json +- Removed files needed only for building rubygems from the package + +* Mon Feb 04 2019 Ivan Devát - 0.10.1-3 +- Corrected gem install flags + +* Fri Feb 01 2019 Fedora Release Engineering - 0.10.1-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_30_Mass_Rebuild + +* Wed Jan 09 2019 Ivan Devát - 0.10.1-1 +- Rebased to latest upstream sources (see CHANGELOG.md) + +* Tue Oct 09 2018 Ondrej Mular - 0.10.0.alpha.6-1 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Resolves: rhbz#1618911 + +* Fri Aug 31 2018 Ivan Devát - 0.10.0.alpha.2-3 +- Started bundling rubygem-tilt (rubygem-tilt is orphaned in fedora due to rubygem-prawn dependency) +- Enabled passing tests + +* Sat Aug 25 2018 Ivan Devát - 0.10.0.alpha.2-2 +- Fixed error with missing rubygem location during pcsd start +- Resolves: rhbz#1618911 + +* Thu Aug 02 2018 Ivan Devát - 0.10.0.alpha.2-1 +- Rebased to latest upstream sources (see CHANGELOG.md) + +* Wed Jul 25 2018 Ivan Devát - 0.9.164-4 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_29_Mass_Rebuild + +* Fri Jul 13 2018 Fedora Release Engineering - 0.9.164-3 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_29_Mass_Rebuild + +* Tue Jun 19 2018 Miro Hrončok - 0.9.164-2 +- Rebuilt for Python 3.7 + +* Mon Apr 09 2018 Ondrej Mular - 0.9.164-1 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Fixed: CVE-2018-1086, CVE-2018-1079 + +* Mon Feb 26 2018 Ivan Devát - 0.9.163-2 +- Fixed crash when adding a node to a cluster + +* Tue Feb 20 2018 Ivan Devát - 0.9.163-1 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Adapted for Rack 2 and Sinatra 2 + +* Fri Feb 09 2018 Igor Gnatenko - 0.9.160-5 +- Escape macros in %%changelog + +* Thu Feb 08 2018 Fedora Release Engineering - 0.9.160-4 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_28_Mass_Rebuild + +* Sat Jan 20 2018 Björn Esser - 0.9.160-3 +- Rebuilt for switch to libxcrypt + +* Fri Jan 05 2018 Mamoru TASAKA - 0.9.160-2 +- F-28: rebuild for ruby25 +- Workaround for gem install option + +* Wed Oct 18 2017 Ondrej Mular - 0.9.160-1 +- Rebased to latest upstream sources (see CHANGELOG.md) +- All pcs tests are temporarily disabled because of issues in pacemaker. + +* Thu Sep 14 2017 Ondrej Mular - 0.9.159-4 +- Bundle rubygem-rack-protection which is being updated to 2.0.0 in Fedora. +- Removed setuptools patch. +- Disabled debuginfo subpackage. + +* Thu Aug 03 2017 Fedora Release Engineering - 0.9.159-3 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Binutils_Mass_Rebuild + +* Thu Jul 27 2017 Fedora Release Engineering - 0.9.159-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Mass_Rebuild + +* Wed Jul 12 2017 Ondrej Mular - 0.9.159-1 +- Rebased to latest upstream sources (see CHANGELOG.md) + +* Tue May 23 2017 Tomas Jelinek - 0.9.156-3 +- Fixed python locales issue preventing build-time tests to pass +- Bundle rubygem-tilt which is being retired from Fedora + +* Thu Mar 23 2017 Tomas Jelinek - 0.9.156-2 +- Fixed Cross-site scripting (XSS) vulnerability in web UI CVE-2017-2661 +- Re-added support for clufter as it is now available for Python 3 + +* Wed Feb 22 2017 Tomas Jelinek - 0.9.156-1 +- Rebased to latest upstream sources (see CHANGELOG.md) + +* Sat Feb 11 2017 Fedora Release Engineering - 0.9.155-3 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_26_Mass_Rebuild + +* Thu Jan 12 2017 Vít Ondruch - 0.9.155-2 +- Rebuilt for https://fedoraproject.org/wiki/Changes/Ruby_2.4 + +* Wed Jan 04 2017 Adam Williamson - 0.9.155-1 +- Latest release 0.9.155 +- Fix tests with Python 3.6 and lxml 3.7 +- Package the license as license, not doc +- Use -f param for rm when wiping test directories as they are nested now + +* Mon Dec 19 2016 Miro Hrončok +- Rebuild for Python 3.6 + +* Tue Oct 18 2016 Tomas Jelinek - 0.9.154-2 +- Fixed upgrading from pcs-0.9.150 + +* Thu Sep 22 2016 Tomas Jelinek - 0.9.154-1 +- Re-synced to upstream sources +- Spec file cleanup and fixes + +* Tue Jul 19 2016 Fedora Release Engineering - 0.9.150-2 +- https://fedoraproject.org/wiki/Changes/Automatic_Provides_for_Python_RPM_Packages + +* Mon Apr 11 2016 Tomas Jelinek - 0.9.150-1 +- Re-synced to upstream sources +- Make pcs depend on python3 +- Spec file cleanup + +* Tue Feb 23 2016 Tomas Jelinek - 0.9.149-2 +- Fixed rubygems issues which prevented pcsd from starting +- Added missing python-lxml dependency + +* Thu Feb 18 2016 Tomas Jelinek - 0.9.149-1 +- Re-synced to upstream sources +- Security fix for CVE-2016-0720, CVE-2016-0721 +- Fixed rubygems issues which prevented pcsd from starting +- Rubygems built with RELRO +- Spec file cleanup +- Fixed multilib .pyc/.pyo issue + +* Thu Feb 04 2016 Fedora Release Engineering - 0.9.144-3 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_24_Mass_Rebuild + +* Tue Jan 12 2016 Vít Ondruch - 0.9.144-2 +- Rebuilt for https://fedoraproject.org/wiki/Changes/Ruby_2.3 + +* Fri Sep 18 2015 Tomas Jelinek - 0.9.144-1 +- Re-synced to upstream sources + +* Tue Jun 23 2015 Tomas Jelinek - 0.9.141-2 +- Added requirement for psmisc for killall + +* Tue Jun 23 2015 Tomas Jelinek - 0.9.141-1 +- Re-synced to upstream sources + +* Thu Jun 18 2015 Fedora Release Engineering - 0.9.140-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_23_Mass_Rebuild + +* Fri Jun 05 2015 Tomas Jelinek - 0.9.140-1 +- Re-synced to upstream sources + +* Fri May 22 2015 Tomas Jelinek - 0.9.139-4 +- Fix for CVE-2015-1848, CVE-2015-3983 (sessions not signed) + +* Thu Mar 26 2015 Tomas Jelinek - 0.9.139-3 +- Add BuildRequires: systemd (rhbz#1206253) + +* Fri Feb 27 2015 Tomas Jelinek - 0.9.139-2 +- Reflect clufter inclusion (rhbz#1180723) + +* Thu Feb 19 2015 Tomas Jelinek - 0.9.139-1 +- Re-synced to upstream sources + +* Sat Jan 17 2015 Mamoru TASAKA - 0.9.115-5 +- Rebuild for https://fedoraproject.org/wiki/Changes/Ruby_2.2 + +* Sun Aug 17 2014 Fedora Release Engineering - 0.9.115-4 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_22_Mass_Rebuild + +* Fri Jun 06 2014 Fedora Release Engineering - 0.9.115-3 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_Mass_Rebuild + +* Tue May 06 2014 Tomas Jelinek - 0.9.115-2 +- Rebuild to fix ruby dependencies + +* Mon Apr 21 2014 Chris Feist - 0.9.115-1 +- Re-synced to upstream sources + +* Fri Dec 13 2013 Chris Feist - 0.9.102-1 +- Re-synced to upstream sources + +* Wed Jun 19 2013 Chris Feist - 0.9.48-1 +- Rebuild with upstream sources + +* Thu Jun 13 2013 Chris Feist - 0.9.44-5 +- Added fixes for building rpam with ruby-2.0.0 + +* Mon Jun 03 2013 Chris Feist - 0.9.44-4 +- Rebuild with upstream sources + +* Tue May 07 2013 Chris Feist - 0.9.41-2 +- Resynced to upstream sources + +* Fri Apr 19 2013 Chris Feist - 0.9.39-1 +- Fixed gem building +- Re-synced to upstream sources + +* Mon Mar 25 2013 Chris Feist - 0.9.36-4 +- Don't try to build gems at all + +* Mon Mar 25 2013 Chris Feist - 0.9.36-3 +- Removed all gems from build, will need to find pam package in the future + +* Mon Mar 25 2013 Chris Feist - 0.9.36-2 +- Removed duplicate libraries already present in fedora + +* Mon Mar 18 2013 Chris Feist - 0.9.36-1 +- Resynced to latest upstream + +* Mon Mar 11 2013 Chris Feist - 0.9.33-1 +- Resynched to latest upstream +- pcsd has been moved to /usr/lib to fix /usr/local packaging issues + +* Thu Feb 21 2013 Chris Feist - 0.9.32-1 +- Resynced to latest version of pcs/pcsd + +* Mon Nov 05 2012 Chris Feist - 0.9.27-3 +- Build on all archs + +* Thu Oct 25 2012 Chris Feist - 0.9.27-2 +- Resync to latest version of pcs +- Added pcsd daemon + +* Mon Oct 08 2012 Chris Feist - 0.9.26-1 +- Resync to latest version of pcs + +* Thu Sep 20 2012 Chris Feist - 0.9.24-1 +- Resync to latest version of pcs + +* Thu Sep 20 2012 Chris Feist - 0.9.23-1 +- Resync to latest version of pcs + +* Wed Sep 12 2012 Chris Feist - 0.9.22-1 +- Resync to latest version of pcs + +* Thu Sep 06 2012 Chris Feist - 0.9.19-1 +- Resync to latest version of pcs + +* Tue Aug 07 2012 Chris Feist - 0.9.12-1 +- Resync to latest version of pcs + +* Fri Jul 20 2012 Fedora Release Engineering - 0.9.3.1-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_18_Mass_Rebuild + +* Thu May 24 2012 Chris Feist - 0.9.4-1 +- Resync to latest version of pcs +- Move cluster creation options to cluster sub command. + +* Mon May 07 2012 Chris Feist - 0.9.3.1-1 +- Resync to latest version of pcs which includes fixes to work with F17. + +* Mon Mar 19 2012 Chris Feist - 0.9.2.4-1 +- Resynced to latest version of pcs + +* Mon Jan 23 2012 Chris Feist - 0.9.1-1 +- Updated BuildRequires and %%doc section for fedora + +* Fri Jan 20 2012 Chris Feist - 0.9.0-2 +- Updated spec file for fedora specific changes + +* Mon Jan 16 2012 Chris Feist - 0.9.0-1 +- Initial Build