From db91fd68f1baa7b19f06dc8156822430decce4e7 Mon Sep 17 00:00:00 2001
From: Miroslav Lisik <mlisik@redhat.com>
Date: Thu, 2 Sep 2021 10:29:59 +0200
Subject: [PATCH 1/2] update
---
Makefile.am | 9 +-
configure.ac | 5 +
pcs/config.py | 13 +-
pcs/lib/communication/corosync.py | 8 +-
pcs/utils.py | 4 +-
pcs_test/suite.py | 70 ++
.../cluster/test_add_nodes_validation.py | 18 +-
.../test_stonith_update_scsi_devices.py | 11 +-
pcs_test/tier0/lib/test_env_corosync.py | 618 ++++++++--------
pcs_test/tier1/legacy/test_constraints.py | 76 +-
pcs_test/tier1/legacy/test_resource.py | 48 +-
pcs_test/tier1/legacy/test_stonith.py | 71 +-
.../tools/command_env/config_http_corosync.py | 23 +-
pcs_test/tools/fixture_cib.py | 65 ++
pcsd/Makefile.am | 1 -
pcsd/capabilities.xml | 7 -
pcsd/fenceagent.rb | 59 --
pcsd/pcs.rb | 15 -
pcsd/pcsd.rb | 671 +-----------------
pcsd/remote.rb | 559 +--------------
pcsd/resource.rb | 3 -
pcsd/rserver.rb | 1 -
pcsd/test/test_resource.rb | 4 -
23 files changed, 634 insertions(+), 1725 deletions(-)
delete mode 100644 pcsd/fenceagent.rb
diff --git a/Makefile.am b/Makefile.am
index 6aede970..34692969 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -188,8 +188,13 @@ endif
pylint:
if DEV_TESTS
+if PARALLEL_PYLINT
+pylint_options = --jobs=0
+else
+pylint_options =
+endif
export PYTHONPATH=${abs_top_builddir}/${PCS_BUNDLED_DIR_LOCAL}/packages && \
- $(TIME) $(PYTHON) -m pylint --rcfile pylintrc --persistent=n --reports=n --score=n --disable similarities ${PCS_PYTHON_PACKAGES}
+ $(TIME) $(PYTHON) -m pylint --rcfile pylintrc --persistent=n --reports=n --score=n --disable similarities ${pylint_options} ${PCS_PYTHON_PACKAGES}
endif
@@ -213,7 +218,7 @@ endif
tests_tier0:
export PYTHONPATH=${abs_top_builddir}/${PCS_BUNDLED_DIR_LOCAL}/packages && \
- $(PYTHON) ${abs_builddir}/pcs_test/suite.py $(python_test_options) --tier0
+ $(PYTHON) ${abs_builddir}/pcs_test/suite.py ${python_test_options} --tier0
tests_tier1:
if EXECUTE_TIER1_TESTS
diff --git a/configure.ac b/configure.ac
index f7b9d1ad..75d65616 100644
--- a/configure.ac
+++ b/configure.ac
@@ -148,6 +148,11 @@ AC_ARG_ENABLE([parallel-tests],
[parallel_tests="yes"])
AM_CONDITIONAL([PARALLEL_TESTS], [test "x$parallel_tests" = "xyes"])
+AC_ARG_ENABLE([parallel-pylint],
+ [AS_HELP_STRING([--enable-parallel-pylint], [Enable running pylint in multiple threads (default: no)])],
+ [parallel_pylint="yes"])
+AM_CONDITIONAL([PARALLEL_PYLINT], [test "x$parallel_pylint" = "xyes"])
+
AC_ARG_ENABLE([local-build],
[AS_HELP_STRING([--enable-local-build], [Download and install all dependencies as user / bundles])],
[local_build="yes"])
diff --git a/pcs/config.py b/pcs/config.py
index a0290499..a3e7e164 100644
--- a/pcs/config.py
+++ b/pcs/config.py
@@ -345,12 +345,13 @@ def config_restore_remote(infile_name, infile_obj):
err_msgs.append(output)
continue
_status = json.loads(output)
- if (
- _status["corosync"]
- or _status["pacemaker"]
- or
- # not supported by older pcsd, do not fail if not present
- _status.get("pacemaker_remote", False)
+ if any(
+ _status["node"]["services"][service_name]["running"]
+ for service_name in (
+ "corosync",
+ "pacemaker",
+ "pacemaker_remote",
+ )
):
err_msgs.append(
"Cluster is currently running on node %s. You need to stop "
diff --git a/pcs/lib/communication/corosync.py b/pcs/lib/communication/corosync.py
index fab8e38f..e2a2949c 100644
--- a/pcs/lib/communication/corosync.py
+++ b/pcs/lib/communication/corosync.py
@@ -28,7 +28,7 @@ class CheckCorosyncOffline(
self._set_skip_offline(skip_offline_targets)
def _get_request_data(self):
- return RequestData("remote/status")
+ return RequestData("remote/status", [("version", "2")])
def _process_response(self, response):
report_item = self._get_response_report(response)
@@ -53,7 +53,7 @@ class CheckCorosyncOffline(
return
try:
status = response.data
- if not json.loads(status)["corosync"]:
+ if not json.loads(status)["node"]["corosync"]:
report_item = ReportItem.info(
reports.messages.CorosyncNotRunningOnNode(node_label),
)
@@ -94,7 +94,7 @@ class GetCorosyncOnlineTargets(
self._corosync_online_target_list = []
def _get_request_data(self):
- return RequestData("remote/status")
+ return RequestData("remote/status", [("version", "2")])
def _process_response(self, response):
report_item = self._get_response_report(response)
@@ -103,7 +103,7 @@ class GetCorosyncOnlineTargets(
return
try:
status = response.data
- if json.loads(status)["corosync"]:
+ if json.loads(status)["node"]["corosync"]:
self._corosync_online_target_list.append(
response.request.target
)
diff --git a/pcs/utils.py b/pcs/utils.py
index ef778b52..7774016e 100644
--- a/pcs/utils.py
+++ b/pcs/utils.py
@@ -186,7 +186,9 @@ def checkStatus(node):
Commandline options:
* --request-timeout - timeout for HTTP requests
"""
- return sendHTTPRequest(node, "remote/status", None, False, False)
+ return sendHTTPRequest(
+ node, "remote/status", urlencode({"version": "2"}), False, False
+ )
# Check and see if we're authorized (faster than a status check)
diff --git a/pcs_test/suite.py b/pcs_test/suite.py
index 75ab66cd..bd98b8b0 100644
--- a/pcs_test/suite.py
+++ b/pcs_test/suite.py
@@ -1,6 +1,8 @@
import importlib
import os
import sys
+from threading import Thread
+import time
import unittest
try:
@@ -84,6 +86,67 @@ def discover_tests(
return unittest.TestLoader().loadTestsFromNames(explicitly_enumerated_tests)
+def tier1_fixtures_needed(test_list):
+ for test_name in tests_from_suite(test_list):
+ if test_name.startswith("pcs_test.tier1.legacy."):
+ return True
+ return False
+
+
+def run_tier1_fixtures(run_concurrently=True):
+ # pylint: disable=import-outside-toplevel
+ from pcs_test.tier1.legacy.test_constraints import (
+ CONSTRAINT_TEST_CIB_FIXTURE,
+ )
+ from pcs_test.tier1.legacy.test_resource import RESOURCE_TEST_CIB_FIXTURE
+ from pcs_test.tier1.legacy.test_stonith import (
+ STONITH_LEVEL_TEST_CIB_FIXTURE,
+ )
+
+ fixture_instances = [
+ CONSTRAINT_TEST_CIB_FIXTURE,
+ RESOURCE_TEST_CIB_FIXTURE,
+ STONITH_LEVEL_TEST_CIB_FIXTURE,
+ ]
+ print("Preparing tier1 fixtures...")
+ time_start = time.time()
+ if run_concurrently:
+ thread_list = []
+ for instance in fixture_instances:
+ thread = Thread(target=instance.set_up)
+ thread.daemon = True
+ thread.start()
+ thread_list.append(thread)
+ timeout_counter = 30 # 30 * 10s = 5min
+ while thread_list:
+ if timeout_counter < 0:
+ raise AssertionError("Fixture threads seem to be stuck :(")
+ for thread in thread_list:
+ thread.join(timeout=10)
+ sys.stdout.write(".")
+ sys.stdout.flush()
+ timeout_counter -= 1
+ if not thread.is_alive():
+ thread_list.remove(thread)
+ continue
+
+ else:
+ for instance in fixture_instances:
+ instance.set_up()
+ time_stop = time.time()
+ time_taken = time_stop - time_start
+ sys.stdout.write("Tier1 fixtures prepared in %.3fs\n" % (time_taken))
+ sys.stdout.flush()
+
+ def cleanup():
+ print("Cleaning tier1 fixtures...", end=" ")
+ for instance in fixture_instances:
+ instance.clean_up()
+ print("done")
+
+ return cleanup
+
+
def main():
# pylint: disable=import-outside-toplevel
if "BUNDLED_LIB_LOCATION" in os.environ:
@@ -141,6 +204,11 @@ def main():
sys.exit()
tests_to_run = discovered_tests
+ tier1_fixtures_cleanup = None
+ if tier1_fixtures_needed(tests_to_run):
+ tier1_fixtures_cleanup = run_tier1_fixtures(
+ run_concurrently=run_concurrently
+ )
if run_concurrently:
tests_to_run = ConcurrentTestSuite(
discovered_tests,
@@ -174,6 +242,8 @@ def main():
verbosity=2 if "-v" in sys.argv else 1, resultclass=ResultClass
)
test_result = test_runner.run(tests_to_run)
+ if tier1_fixtures_cleanup:
+ tier1_fixtures_cleanup()
if not test_result.wasSuccessful():
sys.exit(1)
diff --git a/pcs_test/tier0/lib/commands/cluster/test_add_nodes_validation.py b/pcs_test/tier0/lib/commands/cluster/test_add_nodes_validation.py
index c66a5dff..69cdeed2 100644
--- a/pcs_test/tier0/lib/commands/cluster/test_add_nodes_validation.py
+++ b/pcs_test/tier0/lib/commands/cluster/test_add_nodes_validation.py
@@ -14,6 +14,9 @@ from pcs_test.tier0.lib.commands.cluster.test_add_nodes import (
)
from pcs_test.tools import fixture
from pcs_test.tools.command_env import get_env_tools
+from pcs_test.tools.command_env.config_http_corosync import (
+ corosync_running_check_response,
+)
from pcs_test.tools.custom_mock import patch_getaddrinfo
from pcs import settings
@@ -1170,7 +1173,10 @@ class ClusterStatus(TestCase):
.local.read_sbd_config(name_sufix="_2")
.http.corosync.check_corosync_offline(
communication_list=[
- {"label": "node1", "output": '{"corosync":true}'},
+ {
+ "label": "node1",
+ "output": corosync_running_check_response(True),
+ },
{"label": "node2", "output": "an error"},
{
"label": "node3",
@@ -1178,8 +1184,14 @@ class ClusterStatus(TestCase):
"errno": 7,
"error_msg": "an error",
},
- {"label": "node4", "output": '{"corosync":true}'},
- {"label": "node5", "output": '{"corosync":false}'},
+ {
+ "label": "node4",
+ "output": corosync_running_check_response(True),
+ },
+ {
+ "label": "node5",
+ "output": corosync_running_check_response(False),
+ },
]
)
.local.get_host_info(new_nodes)
diff --git a/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py b/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py
index 3bc51325..593757d8 100644
--- a/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py
+++ b/pcs_test/tier0/lib/commands/test_stonith_update_scsi_devices.py
@@ -4,6 +4,9 @@ from unittest import mock, TestCase
from pcs_test.tools import fixture
from pcs_test.tools.command_env import get_env_tools
+from pcs_test.tools.command_env.config_http_corosync import (
+ corosync_running_check_response,
+)
from pcs_test.tools.misc import get_test_resource as rc
from pcs import settings
@@ -1013,7 +1016,7 @@ class TestUpdateScsiDevicesFailures(TestCase):
communication_list=[
dict(
label=self.existing_nodes[0],
- output='{"corosync":true}',
+ output=corosync_running_check_response(True),
),
]
+ [
@@ -1052,11 +1055,11 @@ class TestUpdateScsiDevicesFailures(TestCase):
communication_list=[
dict(
label=self.existing_nodes[0],
- output='{"corosync":true}',
+ output=corosync_running_check_response(True),
),
dict(
label=self.existing_nodes[1],
- output='{"corosync":false}',
+ output=corosync_running_check_response(False),
),
dict(
label=self.existing_nodes[2],
@@ -1122,7 +1125,7 @@ class TestUpdateScsiDevicesFailures(TestCase):
),
dict(
label=self.existing_nodes[2],
- output='{"corosync":false}',
+ output=corosync_running_check_response(False),
),
]
)
diff --git a/pcs_test/tier0/lib/test_env_corosync.py b/pcs_test/tier0/lib/test_env_corosync.py
index dafc63a0..7063ee80 100644
--- a/pcs_test/tier0/lib/test_env_corosync.py
+++ b/pcs_test/tier0/lib/test_env_corosync.py
@@ -14,6 +14,9 @@ from pcs.lib.corosync.config_parser import (
from pcs_test.tools import fixture
from pcs_test.tools.assertions import assert_raise_library_error
from pcs_test.tools.command_env import get_env_tools
+from pcs_test.tools.command_env.config_http_corosync import (
+ corosync_running_check_response,
+)
class PushCorosyncConfLiveBase(TestCase):
@@ -92,12 +95,11 @@ class PushCorosyncConfLiveNoQdeviceTest(PushCorosyncConfLiveBase):
)
def test_dont_need_stopped_cluster(self):
- (
- self.config.http.corosync.set_corosync_conf(
- self.corosync_conf_text, node_labels=self.node_labels
- ).http.corosync.reload_corosync_conf(
- node_labels=self.node_labels[:1]
- )
+ self.config.http.corosync.set_corosync_conf(
+ self.corosync_conf_text, node_labels=self.node_labels
+ )
+ self.config.http.corosync.reload_corosync_conf(
+ node_labels=self.node_labels[:1]
)
self.env_assistant.get_env().push_corosync_conf(
self.corosync_conf_facade
@@ -114,26 +116,19 @@ class PushCorosyncConfLiveNoQdeviceTest(PushCorosyncConfLiveBase):
node="node-2",
),
fixture.info(
- report_codes.COROSYNC_CONFIG_RELOADED, node="node-1"
+ report_codes.COROSYNC_CONFIG_RELOADED,
+ node="node-1",
),
]
)
def test_dont_need_stopped_cluster_error(self):
- (
- self.config.http.corosync.set_corosync_conf(
- self.corosync_conf_text,
- communication_list=[
- {
- "label": "node-1",
- },
- {
- "label": "node-2",
- "response_code": 400,
- "output": "Failed",
- },
- ],
- )
+ self.config.http.corosync.set_corosync_conf(
+ self.corosync_conf_text,
+ communication_list=[
+ {"label": "node-1"},
+ {"label": "node-2", "response_code": 400, "output": "Failed"},
+ ],
)
env = self.env_assistant.get_env()
self.env_assistant.assert_raise_library_error(
@@ -162,35 +157,28 @@ class PushCorosyncConfLiveNoQdeviceTest(PushCorosyncConfLiveBase):
)
def test_dont_need_stopped_cluster_error_skip_offline(self):
- (
- self.config.http.corosync.set_corosync_conf(
- self.corosync_conf_text,
- communication_list=[
+ self.config.http.corosync.set_corosync_conf(
+ self.corosync_conf_text,
+ communication_list=[
+ {
+ "label": "node-1",
+ "response_code": 400,
+ "output": "Failed",
+ },
+ {"label": "node-2"},
+ ],
+ )
+ self.config.http.corosync.reload_corosync_conf(
+ communication_list=[
+ [
{
- "label": "node-1",
+ "label": self.node_labels[0],
"response_code": 400,
"output": "Failed",
},
- {
- "label": "node-2",
- },
],
- ).http.corosync.reload_corosync_conf(
- communication_list=[
- [
- {
- "label": self.node_labels[0],
- "response_code": 400,
- "output": "Failed",
- },
- ],
- [
- {
- "label": self.node_labels[1],
- },
- ],
- ]
- )
+ [{"label": self.node_labels[1]}],
+ ]
)
self.env_assistant.get_env().push_corosync_conf(
self.corosync_conf_facade, skip_offline_nodes=True
@@ -219,33 +207,29 @@ class PushCorosyncConfLiveNoQdeviceTest(PushCorosyncConfLiveBase):
reason="Failed",
),
fixture.info(
- report_codes.COROSYNC_CONFIG_RELOADED, node="node-2"
+ report_codes.COROSYNC_CONFIG_RELOADED,
+ node="node-2",
),
]
)
def test_reload_on_another_node(self):
- (
- self.config.http.corosync.set_corosync_conf(
- self.corosync_conf_text, node_labels=self.node_labels
- ).http.corosync.reload_corosync_conf(
- communication_list=[
- [
- {
- "label": self.node_labels[0],
- "response_code": 200,
- "output": json.dumps(
- dict(code="not_running", message="not running")
- ),
- },
- ],
- [
- {
- "label": self.node_labels[1],
- },
- ],
- ]
- )
+ self.config.http.corosync.set_corosync_conf(
+ self.corosync_conf_text, node_labels=self.node_labels
+ )
+ self.config.http.corosync.reload_corosync_conf(
+ communication_list=[
+ [
+ {
+ "label": self.node_labels[0],
+ "response_code": 200,
+ "output": json.dumps(
+ dict(code="not_running", message="not running")
+ ),
+ },
+ ],
+ [{"label": self.node_labels[1]}],
+ ]
)
self.env_assistant.get_env().push_corosync_conf(
self.corosync_conf_facade
@@ -266,35 +250,35 @@ class PushCorosyncConfLiveNoQdeviceTest(PushCorosyncConfLiveBase):
node="node-1",
),
fixture.info(
- report_codes.COROSYNC_CONFIG_RELOADED, node="node-2"
+ report_codes.COROSYNC_CONFIG_RELOADED,
+ node="node-2",
),
]
)
def test_reload_not_successful(self):
- (
- self.config.http.corosync.set_corosync_conf(
- self.corosync_conf_text, node_labels=self.node_labels
- ).http.corosync.reload_corosync_conf(
- communication_list=[
- [
- {
- "label": self.node_labels[0],
- "response_code": 200,
- "output": json.dumps(
- dict(code="not_running", message="not running")
- ),
- },
- ],
- [
- {
- "label": self.node_labels[1],
- "response_code": 200,
- "output": "not a json",
- },
- ],
- ]
- )
+ self.config.http.corosync.set_corosync_conf(
+ self.corosync_conf_text, node_labels=self.node_labels
+ )
+ self.config.http.corosync.reload_corosync_conf(
+ communication_list=[
+ [
+ {
+ "label": self.node_labels[0],
+ "response_code": 200,
+ "output": json.dumps(
+ dict(code="not_running", message="not running")
+ ),
+ },
+ ],
+ [
+ {
+ "label": self.node_labels[1],
+ "response_code": 200,
+ "output": "not a json",
+ },
+ ],
+ ]
)
self.env_assistant.assert_raise_library_error(
lambda: self.env_assistant.get_env().push_corosync_conf(
@@ -318,7 +302,8 @@ class PushCorosyncConfLiveNoQdeviceTest(PushCorosyncConfLiveBase):
node="node-1",
),
fixture.warn(
- report_codes.INVALID_RESPONSE_FORMAT, node="node-2"
+ report_codes.INVALID_RESPONSE_FORMAT,
+ node="node-2",
),
fixture.error(
report_codes.UNABLE_TO_PERFORM_OPERATION_ON_ANY_NODE
@@ -327,23 +312,22 @@ class PushCorosyncConfLiveNoQdeviceTest(PushCorosyncConfLiveBase):
)
def test_reload_corosync_not_running_anywhere(self):
- (
- self.config.http.corosync.set_corosync_conf(
- self.corosync_conf_text, node_labels=self.node_labels
- ).http.corosync.reload_corosync_conf(
- communication_list=[
- [
- {
- "label": node,
- "response_code": 200,
- "output": json.dumps(
- dict(code="not_running", message="not running")
- ),
- },
- ]
- for node in self.node_labels
+ self.config.http.corosync.set_corosync_conf(
+ self.corosync_conf_text, node_labels=self.node_labels
+ )
+ self.config.http.corosync.reload_corosync_conf(
+ communication_list=[
+ [
+ {
+ "label": node,
+ "response_code": 200,
+ "output": json.dumps(
+ dict(code="not_running", message="not running")
+ ),
+ },
]
- )
+ for node in self.node_labels
+ ]
)
self.env_assistant.get_env().push_corosync_conf(
self.corosync_conf_facade
@@ -372,12 +356,11 @@ class PushCorosyncConfLiveNoQdeviceTest(PushCorosyncConfLiveBase):
def test_need_stopped_cluster(self):
self.corosync_conf_facade.need_stopped_cluster = True
- (
- self.config.http.corosync.check_corosync_offline(
- node_labels=self.node_labels
- ).http.corosync.set_corosync_conf(
- self.corosync_conf_text, node_labels=self.node_labels
- )
+ self.config.http.corosync.check_corosync_offline(
+ node_labels=self.node_labels
+ )
+ self.config.http.corosync.set_corosync_conf(
+ self.corosync_conf_text, node_labels=self.node_labels
)
self.env_assistant.get_env().push_corosync_conf(
self.corosync_conf_facade
@@ -407,21 +390,14 @@ class PushCorosyncConfLiveNoQdeviceTest(PushCorosyncConfLiveBase):
def test_need_stopped_cluster_not_stopped(self):
self.corosync_conf_facade.need_stopped_cluster = True
- (
- self.config.http.corosync.check_corosync_offline(
- communication_list=[
- {
- "label": self.node_labels[0],
- "output": '{"corosync":true}',
- }
- ]
- + [
- {
- "label": node,
- }
- for node in self.node_labels[1:]
- ]
- )
+ self.config.http.corosync.check_corosync_offline(
+ communication_list=[
+ {
+ "label": self.node_labels[0],
+ "output": corosync_running_check_response(True),
+ }
+ ]
+ + [{"label": node} for node in self.node_labels[1:]]
)
env = self.env_assistant.get_env()
self.env_assistant.assert_raise_library_error(
@@ -445,18 +421,14 @@ class PushCorosyncConfLiveNoQdeviceTest(PushCorosyncConfLiveBase):
# If we know for sure that corosync is running, skip_offline doesn't
# matter.
self.corosync_conf_facade.need_stopped_cluster = True
- (
- self.config.http.corosync.check_corosync_offline(
- communication_list=[
- dict(
- label="node-1",
- output='{"corosync":true}',
- ),
- dict(
- label="node-2",
- ),
- ]
- )
+ self.config.http.corosync.check_corosync_offline(
+ communication_list=[
+ dict(
+ label="node-1",
+ output=corosync_running_check_response(True),
+ ),
+ dict(label="node-2"),
+ ]
)
env = self.env_assistant.get_env()
self.env_assistant.assert_raise_library_error(
@@ -481,19 +453,17 @@ class PushCorosyncConfLiveNoQdeviceTest(PushCorosyncConfLiveBase):
def test_need_stopped_cluster_json_error(self):
self.corosync_conf_facade.need_stopped_cluster = True
- (
- self.config.http.corosync.check_corosync_offline(
- communication_list=[
- dict(label="node-1", output="{"), # not valid json
- dict(
- label="node-2",
- # The expected key (/corosync) is missing, we don't
- # care about version 2 status key
- # (/services/corosync/running)
- output='{"services":{"corosync":{"running":true}}}',
- ),
- ]
- )
+ self.config.http.corosync.check_corosync_offline(
+ communication_list=[
+ dict(label="node-1", output="{"), # not valid json
+ dict(
+ label="node-2",
+ # The expected key (/corosync) is missing, tested code
+ # doesn't care about a new key added in version 2 status
+ # (/services/corosync/running)
+ output='{"services":{"corosync":{"running":true}}}',
+ ),
+ ]
)
env = self.env_assistant.get_env()
self.env_assistant.assert_raise_library_error(
@@ -517,19 +487,15 @@ class PushCorosyncConfLiveNoQdeviceTest(PushCorosyncConfLiveBase):
def test_need_stopped_cluster_comunnication_failure(self):
self.corosync_conf_facade.need_stopped_cluster = True
- (
- self.config.http.corosync.check_corosync_offline(
- communication_list=[
- dict(
- label="node-1",
- ),
- dict(
- label="node-2",
- response_code=401,
- output="""{"notauthorized":"true"}""",
- ),
- ]
- )
+ self.config.http.corosync.check_corosync_offline(
+ communication_list=[
+ dict(label="node-1"),
+ dict(
+ label="node-2",
+ response_code=401,
+ output='{"notauthorized":"true"}',
+ ),
+ ]
)
env = self.env_assistant.get_env()
self.env_assistant.assert_raise_library_error(
@@ -560,29 +526,26 @@ class PushCorosyncConfLiveNoQdeviceTest(PushCorosyncConfLiveBase):
def test_need_stopped_cluster_comunnication_failures_skip_offline(self):
# If we don't know if corosync is running, skip_offline matters.
self.corosync_conf_facade.need_stopped_cluster = True
- (
- self.config.http.corosync.check_corosync_offline(
- communication_list=[
- dict(
- label="node-1",
- response_code=401,
- output="""{"notauthorized":"true"}""",
- ),
- dict(label="node-2", output="{"), # not valid json
- ]
- ).http.corosync.set_corosync_conf(
- self.corosync_conf_text,
- communication_list=[
- dict(
- label="node-1",
- response_code=401,
- output="""{"notauthorized":"true"}""",
- ),
- dict(
- label="node-2",
- ),
- ],
- )
+ self.config.http.corosync.check_corosync_offline(
+ communication_list=[
+ dict(
+ label="node-1",
+ response_code=401,
+ output='{"notauthorized":"true"}',
+ ),
+ dict(label="node-2", output="{"), # not valid json
+ ]
+ )
+ self.config.http.corosync.set_corosync_conf(
+ self.corosync_conf_text,
+ communication_list=[
+ dict(
+ label="node-1",
+ response_code=401,
+ output='{"notauthorized":"true"}',
+ ),
+ dict(label="node-2"),
+ ],
)
self.env_assistant.get_env().push_corosync_conf(
self.corosync_conf_facade, skip_offline_nodes=True
@@ -662,15 +625,17 @@ class PushCorosyncConfLiveWithQdeviceTest(PushCorosyncConfLiveBase):
def test_qdevice_reload(self):
self.corosync_conf_facade.need_qdevice_reload = True
- (
- self.config.http.corosync.set_corosync_conf(
- self.corosync_conf_text, node_labels=self.node_labels
- )
- .http.corosync.reload_corosync_conf(
- node_labels=self.node_labels[:1]
- )
- .http.corosync.qdevice_client_stop(node_labels=self.node_labels)
- .http.corosync.qdevice_client_start(node_labels=self.node_labels)
+ self.config.http.corosync.set_corosync_conf(
+ self.corosync_conf_text, node_labels=self.node_labels
+ )
+ self.config.http.corosync.reload_corosync_conf(
+ node_labels=self.node_labels[:1]
+ )
+ self.config.http.corosync.qdevice_client_stop(
+ node_labels=self.node_labels
+ )
+ self.config.http.corosync.qdevice_client_start(
+ node_labels=self.node_labels
)
self.env_assistant.get_env().push_corosync_conf(
@@ -689,7 +654,8 @@ class PushCorosyncConfLiveWithQdeviceTest(PushCorosyncConfLiveBase):
node="node-2",
),
fixture.info(
- report_codes.COROSYNC_CONFIG_RELOADED, node="node-1"
+ report_codes.COROSYNC_CONFIG_RELOADED,
+ node="node-1",
),
fixture.info(report_codes.QDEVICE_CLIENT_RELOAD_STARTED),
fixture.info(
@@ -725,34 +691,34 @@ class PushCorosyncConfLiveWithQdeviceTest(PushCorosyncConfLiveBase):
def test_qdevice_reload_corosync_stopped(self):
self.corosync_conf_facade.need_qdevice_reload = True
- (
- self.config.http.corosync.set_corosync_conf(
- self.corosync_conf_text, node_labels=self.node_labels
- )
- .http.corosync.reload_corosync_conf(
- communication_list=[
- [
- {
- "label": label,
- "response_code": 200,
- "output": json.dumps(
- dict(code="not_running", message="")
- ),
- },
- ]
- for label in self.node_labels
- ]
- )
- .http.corosync.qdevice_client_stop(node_labels=self.node_labels)
- .http.corosync.qdevice_client_start(
- communication_list=[
+ self.config.http.corosync.set_corosync_conf(
+ self.corosync_conf_text, node_labels=self.node_labels
+ )
+ self.config.http.corosync.reload_corosync_conf(
+ communication_list=[
+ [
{
"label": label,
- "output": "corosync is not running, skipping",
- }
- for label in self.node_labels
+ "response_code": 200,
+ "output": json.dumps(
+ dict(code="not_running", message="")
+ ),
+ },
]
- )
+ for label in self.node_labels
+ ]
+ )
+ self.config.http.corosync.qdevice_client_stop(
+ node_labels=self.node_labels
+ )
+ self.config.http.corosync.qdevice_client_start(
+ communication_list=[
+ {
+ "label": label,
+ "output": "corosync is not running, skipping",
+ }
+ for label in self.node_labels
+ ]
)
self.env_assistant.get_env().push_corosync_conf(
@@ -816,38 +782,28 @@ class PushCorosyncConfLiveWithQdeviceTest(PushCorosyncConfLiveBase):
# This also tests that failing to stop qdevice on a node doesn't prevent
# starting qdevice on the same node.
self.corosync_conf_facade.need_qdevice_reload = True
- (
- self.config.http.corosync.set_corosync_conf(
- self.corosync_conf_text, node_labels=self.node_labels
- )
- .http.corosync.reload_corosync_conf(
- node_labels=self.node_labels[:1]
- )
- .http.corosync.qdevice_client_stop(
- communication_list=[
- dict(
- label="node-1",
- ),
- dict(
- label="node-2",
- response_code=400,
- output="error",
- ),
- ]
- )
- .http.corosync.qdevice_client_start(
- communication_list=[
- dict(
- label="node-1",
- errno=8,
- error_msg="failure",
- was_connected=False,
- ),
- dict(
- label="node-2",
- ),
- ]
- )
+ self.config.http.corosync.set_corosync_conf(
+ self.corosync_conf_text, node_labels=self.node_labels
+ )
+ self.config.http.corosync.reload_corosync_conf(
+ node_labels=self.node_labels[:1]
+ )
+ self.config.http.corosync.qdevice_client_stop(
+ communication_list=[
+ dict(label="node-1"),
+ dict(label="node-2", response_code=400, output="error"),
+ ]
+ )
+ self.config.http.corosync.qdevice_client_start(
+ communication_list=[
+ dict(
+ label="node-1",
+ errno=8,
+ error_msg="failure",
+ was_connected=False,
+ ),
+ dict(label="node-2"),
+ ]
)
env = self.env_assistant.get_env()
@@ -867,7 +823,8 @@ class PushCorosyncConfLiveWithQdeviceTest(PushCorosyncConfLiveBase):
node="node-2",
),
fixture.info(
- report_codes.COROSYNC_CONFIG_RELOADED, node="node-1"
+ report_codes.COROSYNC_CONFIG_RELOADED,
+ node="node-1",
),
fixture.info(report_codes.QDEVICE_CLIENT_RELOAD_STARTED),
fixture.info(
@@ -903,62 +860,46 @@ class PushCorosyncConfLiveWithQdeviceTest(PushCorosyncConfLiveBase):
def test_qdevice_reload_failures_skip_offline(self):
self.corosync_conf_facade.need_qdevice_reload = True
- (
- self.config.http.corosync.set_corosync_conf(
- self.corosync_conf_text,
- communication_list=[
- dict(
- label="node-1",
- ),
- dict(
- label="node-2",
- errno=8,
- error_msg="failure",
- was_connected=False,
- ),
+ self.config.http.corosync.set_corosync_conf(
+ self.corosync_conf_text,
+ communication_list=[
+ dict(label="node-1"),
+ dict(
+ label="node-2",
+ errno=8,
+ error_msg="failure",
+ was_connected=False,
+ ),
+ ],
+ )
+ self.config.http.corosync.reload_corosync_conf(
+ communication_list=[
+ [
+ {
+ "label": self.node_labels[0],
+ "response_code": 400,
+ "output": "Failed",
+ },
],
- )
- .http.corosync.reload_corosync_conf(
- communication_list=[
- [
- {
- "label": self.node_labels[0],
- "response_code": 400,
- "output": "Failed",
- },
- ],
- [
- {
- "label": self.node_labels[1],
- },
- ],
- ]
- )
- .http.corosync.qdevice_client_stop(
- communication_list=[
- dict(
- label="node-1",
- ),
- dict(
- label="node-2",
- response_code=400,
- output="error",
- ),
- ]
- )
- .http.corosync.qdevice_client_start(
- communication_list=[
- dict(
- label="node-1",
- errno=8,
- error_msg="failure",
- was_connected=False,
- ),
- dict(
- label="node-2",
- ),
- ]
- )
+ [{"label": self.node_labels[1]}],
+ ]
+ )
+ self.config.http.corosync.qdevice_client_stop(
+ communication_list=[
+ dict(label="node-1"),
+ dict(label="node-2", response_code=400, output="error"),
+ ]
+ )
+ self.config.http.corosync.qdevice_client_start(
+ communication_list=[
+ dict(
+ label="node-1",
+ errno=8,
+ error_msg="failure",
+ was_connected=False,
+ ),
+ dict(label="node-2"),
+ ]
)
env = self.env_assistant.get_env()
@@ -990,7 +931,8 @@ class PushCorosyncConfLiveWithQdeviceTest(PushCorosyncConfLiveBase):
reason="Failed",
),
fixture.info(
- report_codes.COROSYNC_CONFIG_RELOADED, node="node-2"
+ report_codes.COROSYNC_CONFIG_RELOADED,
+ node="node-2",
),
fixture.info(report_codes.QDEVICE_CLIENT_RELOAD_STARTED),
fixture.info(
@@ -1024,29 +966,28 @@ class PushCorosyncConfLiveWithQdeviceTest(PushCorosyncConfLiveBase):
def test_reload_not_successful(self):
self.corosync_conf_facade.need_qdevice_reload = True
- (
- self.config.http.corosync.set_corosync_conf(
- self.corosync_conf_text, node_labels=self.node_labels
- ).http.corosync.reload_corosync_conf(
- communication_list=[
- [
- {
- "label": self.node_labels[0],
- "response_code": 200,
- "output": json.dumps(
- dict(code="not_running", message="not running")
- ),
- },
- ],
- [
- {
- "label": self.node_labels[1],
- "response_code": 200,
- "output": "not a json",
- },
- ],
- ]
- )
+ self.config.http.corosync.set_corosync_conf(
+ self.corosync_conf_text, node_labels=self.node_labels
+ )
+ self.config.http.corosync.reload_corosync_conf(
+ communication_list=[
+ [
+ {
+ "label": self.node_labels[0],
+ "response_code": 200,
+ "output": json.dumps(
+ dict(code="not_running", message="not running")
+ ),
+ },
+ ],
+ [
+ {
+ "label": self.node_labels[1],
+ "response_code": 200,
+ "output": "not a json",
+ },
+ ],
+ ]
)
self.env_assistant.assert_raise_library_error(
lambda: self.env_assistant.get_env().push_corosync_conf(
@@ -1070,7 +1011,8 @@ class PushCorosyncConfLiveWithQdeviceTest(PushCorosyncConfLiveBase):
node="node-1",
),
fixture.warn(
- report_codes.INVALID_RESPONSE_FORMAT, node="node-2"
+ report_codes.INVALID_RESPONSE_FORMAT,
+ node="node-2",
),
fixture.error(
report_codes.UNABLE_TO_PERFORM_OPERATION_ON_ANY_NODE
diff --git a/pcs_test/tier1/legacy/test_constraints.py b/pcs_test/tier1/legacy/test_constraints.py
index 36924925..49b413a8 100644
--- a/pcs_test/tier1/legacy/test_constraints.py
+++ b/pcs_test/tier1/legacy/test_constraints.py
@@ -13,9 +13,11 @@ from pcs_test.tools.assertions import (
from pcs_test.tools.bin_mock import get_mock_settings
from pcs_test.tools.cib import get_assert_pcs_effect_mixin
from pcs_test.tools.fixture_cib import (
+ CachedCibFixture,
fixture_master_xml,
fixture_to_cib,
wrap_element_by_master,
+ wrap_element_by_master_file,
)
from pcs_test.tools.misc import (
get_test_resource as rc,
@@ -23,7 +25,6 @@ from pcs_test.tools.misc import (
skip_unless_crm_rule,
outdent,
ParametrizedTestMetaClass,
- write_data_to_tmpfile,
write_file_to_tmpfile,
)
from pcs_test.tools.pcs_runner import pcs, PcsRunner
@@ -54,70 +55,63 @@ empty_cib = rc("cib-empty-3.7.xml")
large_cib = rc("cib-large.xml")
-@skip_unless_crm_rule()
-class ConstraintTest(unittest.TestCase):
- def setUp(self):
- self.temp_cib = get_tmp_file("tier1_constraints")
- write_file_to_tmpfile(empty_cib, self.temp_cib)
- self.temp_corosync_conf = None
-
- def tearDown(self):
- self.temp_cib.close()
- if self.temp_corosync_conf:
- self.temp_corosync_conf.close()
-
- def fixture_resources(self):
- write_data_to_tmpfile(self.fixture_cib_cache(), self.temp_cib)
-
- def fixture_cib_cache(self):
- if not hasattr(self.__class__, "cib_cache"):
- self.__class__.cib_cache = self.fixture_cib()
- return self.__class__.cib_cache
-
- def fixture_cib(self):
- write_file_to_tmpfile(empty_cib, self.temp_cib)
- self.setupClusterA()
- self.temp_cib.flush()
- self.temp_cib.seek(0)
- cib_content = self.temp_cib.read()
- self.temp_cib.seek(0)
- write_file_to_tmpfile(empty_cib, self.temp_cib)
- return cib_content
-
- # Sets up a cluster with Resources, groups, master/slave resource and clones
- def setupClusterA(self):
+class ConstraintTestCibFixture(CachedCibFixture):
+ def _setup_cib(self):
line = "resource create D1 ocf:heartbeat:Dummy".split()
- output, returnVal = pcs(self.temp_cib.name, line)
+ output, returnVal = pcs(self.cache_path, line)
assert returnVal == 0 and output == ""
line = "resource create D2 ocf:heartbeat:Dummy".split()
- output, returnVal = pcs(self.temp_cib.name, line)
+ output, returnVal = pcs(self.cache_path, line)
assert returnVal == 0 and output == ""
line = "resource create D3 ocf:heartbeat:Dummy".split()
- output, returnVal = pcs(self.temp_cib.name, line)
+ output, returnVal = pcs(self.cache_path, line)
assert returnVal == 0 and output == ""
line = "resource create D4 ocf:heartbeat:Dummy".split()
- output, returnVal = pcs(self.temp_cib.name, line)
+ output, returnVal = pcs(self.cache_path, line)
assert returnVal == 0 and output == ""
line = "resource create D5 ocf:heartbeat:Dummy".split()
- output, returnVal = pcs(self.temp_cib.name, line)
+ output, returnVal = pcs(self.cache_path, line)
assert returnVal == 0 and output == ""
line = "resource create D6 ocf:heartbeat:Dummy".split()
- output, returnVal = pcs(self.temp_cib.name, line)
+ output, returnVal = pcs(self.cache_path, line)
assert returnVal == 0 and output == ""
line = "resource clone D3".split()
- output, returnVal = pcs(self.temp_cib.name, line)
+ output, returnVal = pcs(self.cache_path, line)
assert returnVal == 0 and output == ""
# pcs no longer allows turning resources into masters but supports
# existing ones. In order to test it, we need to put a master in the
# CIB without pcs.
- wrap_element_by_master(self.temp_cib, "D4", master_id="Master")
+ wrap_element_by_master_file(self.cache_path, "D4", master_id="Master")
+
+
+CONSTRAINT_TEST_CIB_FIXTURE = ConstraintTestCibFixture(
+ "fixture_tier1_constraints", empty_cib
+)
+
+
+@skip_unless_crm_rule()
+class ConstraintTest(unittest.TestCase):
+ def setUp(self):
+ self.temp_cib = get_tmp_file("tier1_constraints")
+ write_file_to_tmpfile(empty_cib, self.temp_cib)
+ self.temp_corosync_conf = None
+
+ def tearDown(self):
+ self.temp_cib.close()
+ if self.temp_corosync_conf:
+ self.temp_corosync_conf.close()
+
+ def fixture_resources(self):
+ write_file_to_tmpfile(
+ CONSTRAINT_TEST_CIB_FIXTURE.cache_path, self.temp_cib
+ )
def testConstraintRules(self):
self.fixture_resources()
diff --git a/pcs_test/tier1/legacy/test_resource.py b/pcs_test/tier1/legacy/test_resource.py
index 8b043260..ecf0d23d 100644
--- a/pcs_test/tier1/legacy/test_resource.py
+++ b/pcs_test/tier1/legacy/test_resource.py
@@ -12,8 +12,10 @@ from pcs_test.tools.assertions import (
from pcs_test.tools.bin_mock import get_mock_settings
from pcs_test.tools.cib import get_assert_pcs_effect_mixin
from pcs_test.tools.fixture_cib import (
+ CachedCibFixture,
fixture_master_xml,
fixture_to_cib,
+ wrap_element_by_master_file,
wrap_element_by_master,
)
from pcs_test.tools.misc import (
@@ -154,21 +156,8 @@ class ResourceDescribe(TestCase, AssertPcsMixin):
)
-class Resource(TestCase, AssertPcsMixin):
- def setUp(self):
- self.temp_cib = get_tmp_file("tier1_resource")
- self.temp_large_cib = get_tmp_file("tier1_resource_large")
- write_file_to_tmpfile(empty_cib, self.temp_cib)
- write_file_to_tmpfile(large_cib, self.temp_large_cib)
- self.pcs_runner = PcsRunner(self.temp_cib.name)
- self.pcs_runner.mock_settings = get_mock_settings("crm_resource_binary")
-
- def tearDown(self):
- self.temp_cib.close()
- self.temp_large_cib.close()
-
- # Setups up a cluster with Resources, groups, master/slave resource & clones
- def setupClusterA(self):
+class ResourceTestCibFixture(CachedCibFixture):
+ def _setup_cib(self):
self.assert_pcs_success(
(
"resource create --no-default-ops ClusterIP ocf:heartbeat:IPaddr2"
@@ -215,7 +204,34 @@ class Resource(TestCase, AssertPcsMixin):
# pcs no longer allows turning resources into masters but supports
# existing ones. In order to test it, we need to put a master in the
# CIB without pcs.
- wrap_element_by_master(self.temp_cib, "ClusterIP5", master_id="Master")
+ wrap_element_by_master_file(
+ self.cache_path, "ClusterIP5", master_id="Master"
+ )
+
+
+RESOURCE_TEST_CIB_FIXTURE = ResourceTestCibFixture(
+ "fixture_tier1_resource", empty_cib
+)
+
+
+class Resource(TestCase, AssertPcsMixin):
+ def setUp(self):
+ self.temp_cib = get_tmp_file("tier1_resource")
+ self.temp_large_cib = get_tmp_file("tier1_resource_large")
+ write_file_to_tmpfile(empty_cib, self.temp_cib)
+ write_file_to_tmpfile(large_cib, self.temp_large_cib)
+ self.pcs_runner = PcsRunner(self.temp_cib.name)
+ self.pcs_runner.mock_settings = get_mock_settings("crm_resource_binary")
+
+ def tearDown(self):
+ self.temp_cib.close()
+ self.temp_large_cib.close()
+
+ # Setups up a cluster with Resources, groups, master/slave resource & clones
+ def setupClusterA(self):
+ write_file_to_tmpfile(
+ RESOURCE_TEST_CIB_FIXTURE.cache_path, self.temp_cib
+ )
def testCaseInsensitive(self):
o, r = pcs(
diff --git a/pcs_test/tier1/legacy/test_stonith.py b/pcs_test/tier1/legacy/test_stonith.py
index b3def2d4..f6b93f01 100644
--- a/pcs_test/tier1/legacy/test_stonith.py
+++ b/pcs_test/tier1/legacy/test_stonith.py
@@ -8,6 +8,7 @@ from pcs.common.str_tools import indent
from pcs_test.tier1.cib_resource.common import ResourceTest
from pcs_test.tools.assertions import AssertPcsMixin
from pcs_test.tools.bin_mock import get_mock_settings
+from pcs_test.tools.fixture_cib import CachedCibFixture
from pcs_test.tools.misc import (
get_test_resource as rc,
get_tmp_file,
@@ -840,6 +841,46 @@ _fixture_stonith_level_cache = None
_fixture_stonith_level_cache_lock = Lock()
+class StonithLevelTestCibFixture(CachedCibFixture):
+ def _fixture_stonith_resource(self, name):
+ self.assert_pcs_success(
+ [
+ "stonith",
+ "create",
+ name,
+ "fence_apc",
+ "pcmk_host_list=rh7-1 rh7-2",
+ "ip=i",
+ "username=u",
+ ]
+ )
+
+ def _setup_cib(self):
+ self._fixture_stonith_resource("F1")
+ self._fixture_stonith_resource("F2")
+ self._fixture_stonith_resource("F3")
+
+ self.assert_pcs_success("stonith level add 1 rh7-1 F1".split())
+ self.assert_pcs_success("stonith level add 2 rh7-1 F2".split())
+ self.assert_pcs_success("stonith level add 2 rh7-2 F1".split())
+ self.assert_pcs_success("stonith level add 1 rh7-2 F2".split())
+ self.assert_pcs_success("stonith level add 4 regexp%rh7-\\d F3".split())
+ self.assert_pcs_success(
+ "stonith level add 3 regexp%rh7-\\d F2 F1".split()
+ )
+ self.assert_pcs_success(
+ "stonith level add 5 attrib%fencewith=levels1 F3 F2".split()
+ )
+ self.assert_pcs_success(
+ "stonith level add 6 attrib%fencewith=levels2 F3 F1".split()
+ )
+
+
+STONITH_LEVEL_TEST_CIB_FIXTURE = StonithLevelTestCibFixture(
+ "fixture_tier1_stonith_level_tests", rc("cib-empty-withnodes.xml")
+)
+
+
class LevelTestsBase(TestCase, AssertPcsMixin):
def setUp(self):
self.temp_cib = get_tmp_file("tier1_test_stonith_level")
@@ -877,26 +918,11 @@ class LevelTestsBase(TestCase, AssertPcsMixin):
_fixture_stonith_level_cache = self.fixture_cib_config()
return _fixture_stonith_level_cache
- def fixture_cib_config(self):
- self.fixture_stonith_resource("F1")
- self.fixture_stonith_resource("F2")
- self.fixture_stonith_resource("F3")
-
- self.assert_pcs_success("stonith level add 1 rh7-1 F1".split())
- self.assert_pcs_success("stonith level add 2 rh7-1 F2".split())
- self.assert_pcs_success("stonith level add 2 rh7-2 F1".split())
- self.assert_pcs_success("stonith level add 1 rh7-2 F2".split())
- self.assert_pcs_success("stonith level add 4 regexp%rh7-\\d F3".split())
- self.assert_pcs_success(
- "stonith level add 3 regexp%rh7-\\d F2 F1".split()
- )
- self.assert_pcs_success(
- "stonith level add 5 attrib%fencewith=levels1 F3 F2".split()
- )
- self.assert_pcs_success(
- "stonith level add 6 attrib%fencewith=levels2 F3 F1".split()
- )
-
+ @staticmethod
+ def fixture_cib_config():
+ cib_content = ""
+ with open(STONITH_LEVEL_TEST_CIB_FIXTURE.cache_path, "r") as cib_file:
+ cib_content = cib_file.read()
config = outdent(
"""\
Target: rh7-1
@@ -914,12 +940,7 @@ class LevelTestsBase(TestCase, AssertPcsMixin):
Level 6 - F3,F1
"""
)
-
config_lines = config.splitlines()
- self.temp_cib.flush()
- self.temp_cib.seek(0)
- cib_content = self.temp_cib.read()
- self.temp_cib.seek(0)
return cib_content, config, config_lines
diff --git a/pcs_test/tools/command_env/config_http_corosync.py b/pcs_test/tools/command_env/config_http_corosync.py
index cdaf65ff..7f84f406 100644
--- a/pcs_test/tools/command_env/config_http_corosync.py
+++ b/pcs_test/tools/command_env/config_http_corosync.py
@@ -6,6 +6,23 @@ from pcs_test.tools.command_env.mock_node_communicator import (
)
+def corosync_running_check_response(running):
+ return json.dumps(
+ {
+ "node": {
+ "corosync": running,
+ "services": {
+ "corosync": {
+ "installed": True,
+ "enabled": not running,
+ "running": running,
+ }
+ },
+ }
+ }
+ )
+
+
class CorosyncShortcuts:
def __init__(self, calls):
self.__calls = calls
@@ -29,7 +46,8 @@ class CorosyncShortcuts:
node_labels,
communication_list,
action="remote/status",
- output='{"corosync":false}',
+ param_list=[("version", "2")],
+ output=corosync_running_check_response(False),
)
def get_corosync_online_targets(
@@ -51,7 +69,8 @@ class CorosyncShortcuts:
node_labels,
communication_list,
action="remote/status",
- output='{"corosync":true}',
+ param_list=[("version", "2")],
+ output=corosync_running_check_response(True),
)
def get_corosync_conf(
diff --git a/pcs_test/tools/fixture_cib.py b/pcs_test/tools/fixture_cib.py
index 730b0e33..602491c8 100644
--- a/pcs_test/tools/fixture_cib.py
+++ b/pcs_test/tools/fixture_cib.py
@@ -3,7 +3,14 @@ import os
from unittest import mock
from lxml import etree
+from pcs_test.tools.assertions import AssertPcsMixin
from pcs_test.tools.custom_mock import MockLibraryReportProcessor
+from pcs_test.tools.misc import (
+ get_test_resource,
+ get_tmp_file,
+ write_file_to_tmpfile,
+)
+from pcs_test.tools.pcs_runner import PcsRunner
from pcs_test.tools.xml import etree_to_str
from pcs import settings
@@ -12,6 +19,54 @@ from pcs.lib.external import CommandRunner
# pylint: disable=line-too-long
+class CachedCibFixture(AssertPcsMixin):
+ def __init__(self, cache_name, empty_cib_path):
+ self._empty_cib_path = empty_cib_path
+ self._cache_name = cache_name
+ self._cache_path = None
+ self._pcs_runner = None
+
+ def _setup_cib(self):
+ raise NotImplementedError()
+
+ def set_up(self):
+ fixture_dir = get_test_resource("temp_fixtures")
+ os.makedirs(fixture_dir, exist_ok=True)
+ self._cache_path = os.path.join(fixture_dir, self._cache_name)
+ self._pcs_runner = PcsRunner(self._cache_path)
+
+ with open(self._empty_cib_path, "r") as template_file, open(
+ self.cache_path, "w"
+ ) as cache_file:
+ cache_file.write(template_file.read())
+ self._setup_cib()
+
+ def clean_up(self):
+ if os.path.isfile(self.cache_path):
+ os.unlink(self.cache_path)
+
+ @property
+ def cache_path(self):
+ if self._cache_path is None:
+ raise AssertionError("Cache has not been initiialized")
+ return self._cache_path
+
+ # methods for supporting assert_pcs_success
+ @property
+ def pcs_runner(self):
+ if self._pcs_runner is None:
+ raise AssertionError("Cache has not been initialized")
+ return self._pcs_runner
+
+ def assertEqual(self, first, second, msg=None):
+ # pylint: disable=invalid-name
+ # pylint: disable=no-self-use
+ if first != second:
+ raise AssertionError(
+ f"{msg}\n{first} != {second}" if msg else f"{first} != {second}"
+ )
+
+
def wrap_element_by_master(cib_file, resource_id, master_id=None):
cib_file.seek(0)
cib_tree = etree.parse(cib_file, etree.XMLParser(huge_tree=True)).getroot()
@@ -49,6 +104,16 @@ def wrap_element_by_master(cib_file, resource_id, master_id=None):
)
+def wrap_element_by_master_file(filepath, resource_id, master_id=None):
+ cib_tmp = get_tmp_file("wrap_by_master")
+ write_file_to_tmpfile(filepath, cib_tmp)
+ wrap_element_by_master(cib_tmp, resource_id, master_id=master_id)
+ cib_tmp.seek(0)
+ with open(filepath, "w") as target:
+ target.write(cib_tmp.read())
+ cib_tmp.close()
+
+
def fixture_master_xml(name, all_ops=True, meta_dict=None):
default_ops = f"""
<op id="{name}-notify-interval-0s" interval="0s" name="notify"
diff --git a/pcsd/Makefile.am b/pcsd/Makefile.am
index 066ae8b6..a16917f5 100644
--- a/pcsd/Makefile.am
+++ b/pcsd/Makefile.am
@@ -50,7 +50,6 @@ dist_pcsd_DATA = \
cluster.rb \
config.rb \
corosyncconf.rb \
- fenceagent.rb \
pcsd_action_command.rb \
pcsd-cli-main.rb \
pcsd_exchange_format.rb \
diff --git a/pcsd/capabilities.xml b/pcsd/capabilities.xml
index 745b05ad..f9dd8891 100644
--- a/pcsd/capabilities.xml
+++ b/pcsd/capabilities.xml
@@ -561,13 +561,6 @@
pcs commands: cluster kill
</description>
</capability>
- <capability id="node.restart" in-pcs="0" in-pcsd="1">
- <description>
- Restart one host machine or the local host machine if no host specified.
-
- daemon urls: node_restart
- </description>
- </capability>
diff --git a/pcsd/fenceagent.rb b/pcsd/fenceagent.rb
deleted file mode 100644
index 4a3ba07d..00000000
--- a/pcsd/fenceagent.rb
+++ /dev/null
@@ -1,59 +0,0 @@
-def getFenceAgents(auth_user)
- fence_agent_list = {}
- stdout, stderr, retval = run_cmd(
- auth_user, PCS, "stonith", "list", "--nodesc"
- )
- if retval != 0
- $logger.error("Error running 'pcs stonith list --nodesc")
- $logger.error(stdout + stderr)
- return {}
- end
-
- agents = stdout
- agents.each { |a|
- fa = FenceAgent.new
- fa.name = a.chomp
- fence_agent_list[fa.name] = fa
- }
- return fence_agent_list
-end
-
-class FenceAgent
- attr_accessor :name, :resource_class, :required_options, :optional_options, :advanced_options, :info
- def initialize(name=nil, required_options={}, optional_options={}, resource_class=nil, advanced_options={})
- @name = name
- @required_options = {}
- @optional_options = {}
- @required_options = required_options
- @optional_options = optional_options
- @advanced_options = advanced_options
- @resource_class = nil
- end
-
- def type
- name
- end
-
- def to_json(options = {})
- JSON.generate({
- :full_name => "stonith:#{name}",
- :class => 'stonith',
- :provider => nil,
- :type => name,
- })
- end
-
- def long_desc
- if info && info.length >= 2
- return info[1]
- end
- return ""
- end
-
- def short_desc
- if info && info.length >= 1
- return info[0]
- end
- return ""
- end
-end
diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb
index 9e26c607..1507bdf5 100644
--- a/pcsd/pcs.rb
+++ b/pcsd/pcs.rb
@@ -1514,21 +1514,6 @@ def allowed_for_superuser(auth_user)
return true
end
-def get_default_overview_node_list(clustername)
- nodes = get_cluster_nodes clustername
- node_list = []
- nodes.each { |node|
- node_list << {
- 'error_list' => [],
- 'warning_list' => [],
- 'status' => 'unknown',
- 'quorum' => false,
- 'name' => node
- }
- }
- return node_list
-end
-
def enable_service(service)
result = run_pcs_internal(
PCSAuth.getSuperuserAuth(),
diff --git a/pcsd/pcsd.rb b/pcsd/pcsd.rb
index bf91e906..3297fc5e 100644
--- a/pcsd/pcsd.rb
+++ b/pcsd/pcsd.rb
@@ -11,7 +11,6 @@ require 'cgi'
require 'bootstrap.rb'
require 'resource.rb'
require 'remote.rb'
-require 'fenceagent.rb'
require 'cluster.rb'
require 'config.rb'
require 'pcs.rb'
@@ -54,14 +53,14 @@ end
before do
# nobody is logged in yet
@auth_user = nil
- @tornado_session_username = Thread.current[:tornado_username]
- @tornado_session_groups = Thread.current[:tornado_groups]
- @tornado_is_authenticated = Thread.current[:tornado_is_authenticated]
if(request.path.start_with?('/remote/') and request.path != "/remote/auth") or request.path == '/run_pcs' or request.path.start_with?('/api/')
# Sets @auth_user to a hash containing info about logged in user or halts
# the request processing if login credentials are incorrect.
- protect_by_token!
+ @auth_user = PCSAuth.loginByToken(request.cookies)
+ unless @auth_user
+ halt [401, '{"notauthorized":"true"}']
+ end
else
# Set a sane default: nobody is logged in, but we do not need to check both
# for nil and empty username (if auth_user and auth_user[:username])
@@ -120,37 +119,6 @@ def run_cfgsync
end
end
-helpers do
- def is_ajax?
- return request.env['HTTP_X_REQUESTED_WITH'] == 'XMLHttpRequest'
- end
-
- def protect_by_token!
- @auth_user = PCSAuth.loginByToken(request.cookies)
- unless @auth_user
- halt [401, '{"notauthorized":"true"}']
- end
- end
-
- def getParamList(params)
- param_line = []
- meta_options = []
- params.each { |param, val|
- if param.start_with?("_res_paramne_") or (param.start_with?("_res_paramempty_") and val != "")
- myparam = param.sub(/^_res_paramne_/,"").sub(/^_res_paramempty_/,"")
- param_line << "#{myparam}=#{val}"
- end
- if param == "disabled"
- meta_options << 'meta' << 'target-role=Stopped'
- end
- if param == "force" and val
- param_line << "--force"
- end
- }
- return param_line + meta_options
- end
-end
-
get '/remote/?:command?' do
return remote(params, request, @auth_user)
end
@@ -675,10 +643,6 @@ post '/manage/auth_gui_against_nodes' do
]
end
-get '/clusters_overview' do
- clusters_overview(params, request, getAuthUser())
-end
-
get '/imported-cluster-list' do
imported_cluster_list(params, request, getAuthUser())
end
@@ -693,190 +657,11 @@ post '/managec/:cluster/permissions_save/?' do
)
end
-get '/managec/:cluster/status_all' do
- auth_user = getAuthUser()
- status_all(params, request, auth_user, get_cluster_nodes(params[:cluster]))
-end
-
get '/managec/:cluster/cluster_status' do
auth_user = getAuthUser()
cluster_status_gui(auth_user, params[:cluster])
end
-get '/managec/:cluster/cluster_properties' do
- auth_user = getAuthUser()
- cluster = params[:cluster]
- unless cluster
- return 200, {}
- end
- code, out = send_cluster_request_with_token(auth_user, cluster, 'get_cib')
- if code == 403
- return [403, 'Permission denied']
- elsif code != 200
- return [400, 'getting CIB failed']
- end
- begin
- properties = getAllSettings(nil, REXML::Document.new(out))
- code, out = send_cluster_request_with_token(
- auth_user, cluster, 'get_cluster_properties_definition'
- )
-
- if code == 403
- return [403, 'Permission denied']
- elsif code == 404
- definition = {
- 'batch-limit' => {
- 'name' => 'batch-limit',
- 'source' => 'pacemaker-schedulerd',
- 'default' => '0',
- 'type' => 'integer',
- 'shortdesc' => 'The number of jobs that pacemaker is allowed to execute in parallel.',
- 'longdesc' => 'The "correct" value will depend on the speed and load of your network and cluster nodes.',
- 'readable_name' => 'Batch Limit',
- 'advanced' => false
- },
- 'no-quorum-policy' => {
- 'name' => 'no-quorum-policy',
- 'source' => 'pacemaker-schedulerd',
- 'default' => 'stop',
- 'type' => 'enum',
- 'enum' => ['stop', 'freeze', 'ignore', 'suicide'],
- 'shortdesc' => 'What to do when the cluster does not have quorum.',
- 'longdesc' => 'Allowed values:
- * ignore - continue all resource management
- * freeze - continue resource management, but don\'t recover resources from nodes not in the affected partition
- * stop - stop all resources in the affected cluster partition
- * suicide - fence all nodes in the affected cluster partition',
- 'readable_name' => 'No Quorum Policy',
- 'advanced' => false
- },
- 'symmetric-cluster' => {
- 'name' => 'symmetric-cluster',
- 'source' => 'pacemaker-schedulerd',
- 'default' => 'true',
- 'type' => 'boolean',
- 'shortdesc' => 'All resources can run anywhere by default.',
- 'longdesc' => 'All resources can run anywhere by default.',
- 'readable_name' => 'Symmetric',
- 'advanced' => false
- },
- 'stonith-enabled' => {
- 'name' => 'stonith-enabled',
- 'source' => 'pacemaker-schedulerd',
- 'default' => 'true',
- 'type' => 'boolean',
- 'shortdesc' => 'Failed nodes are STONITH\'d',
- 'longdesc' => 'Failed nodes are STONITH\'d',
- 'readable_name' => 'Stonith Enabled',
- 'advanced' => false
- },
- 'stonith-action' => {
- 'name' => 'stonith-action',
- 'source' => 'pacemaker-schedulerd',
- 'default' => 'reboot',
- 'type' => 'enum',
- 'enum' => ['reboot', 'poweroff', 'off'],
- 'shortdesc' => 'Action to send to STONITH device',
- 'longdesc' => 'Action to send to STONITH device Allowed values: reboot, poweroff, off',
- 'readable_name' => 'Stonith Action',
- 'advanced' => false
- },
- 'cluster-delay' => {
- 'name' => 'cluster-delay',
- 'source' => 'pacemaker-schedulerd',
- 'default' => '60s',
- 'type' => 'time',
- 'shortdesc' => 'Round trip delay over the network (excluding action execution)',
- 'longdesc' => 'The "correct" value will depend on the speed and load of your network and cluster nodes.',
- 'readable_name' => 'Cluster Delay',
- 'advanced' => false
- },
- 'stop-orphan-resources' => {
- 'name' => 'stop-orphan-resources',
- 'source' => 'pacemaker-schedulerd',
- 'default' => 'true',
- 'type' => 'boolean',
- 'shortdesc' => 'Should deleted resources be stopped',
- 'longdesc' => 'Should deleted resources be stopped',
- 'readable_name' => 'Stop Orphan Resources',
- 'advanced' => false
- },
- 'stop-orphan-actions' => {
- 'name' => 'stop-orphan-actions',
- 'source' => 'pacemaker-schedulerd',
- 'default' => 'true',
- 'type' => 'boolean',
- 'shortdesc' => 'Should deleted actions be cancelled',
- 'longdesc' => 'Should deleted actions be cancelled',
- 'readable_name' => 'Stop Orphan Actions',
- 'advanced' => false
- },
- 'start-failure-is-fatal' => {
- 'name' => 'start-failure-is-fatal',
- 'source' => 'pacemaker-schedulerd',
- 'default' => 'true',
- 'type' => 'boolean',
- 'shortdesc' => 'Always treat start failures as fatal',
- 'longdesc' => 'This was the old default. However when set to FALSE, the cluster will instead use the resource\'s failcount and value for resource-failure-stickiness',
- 'readable_name' => 'Start Failure is Fatal',
- 'advanced' => false
- },
- 'pe-error-series-max' => {
- 'name' => 'pe-error-series-max',
- 'source' => 'pacemaker-schedulerd',
- 'default' => '-1',
- 'type' => 'integer',
- 'shortdesc' => 'The number of PE inputs resulting in ERRORs to save',
- 'longdesc' => 'Zero to disable, -1 to store unlimited.',
- 'readable_name' => 'PE Error Storage',
- 'advanced' => false
- },
- 'pe-warn-series-max' => {
- 'name' => 'pe-warn-series-max',
- 'source' => 'pacemaker-schedulerd',
- 'default' => '5000',
- 'type' => 'integer',
- 'shortdesc' => 'The number of PE inputs resulting in WARNINGs to save',
- 'longdesc' => 'Zero to disable, -1 to store unlimited.',
- 'readable_name' => 'PE Warning Storage',
- 'advanced' => false
- },
- 'pe-input-series-max' => {
- 'name' => 'pe-input-series-max',
- 'source' => 'pacemaker-schedulerd',
- 'default' => '4000',
- 'type' => 'integer',
- 'shortdesc' => 'The number of other PE inputs to save',
- 'longdesc' => 'Zero to disable, -1 to store unlimited.',
- 'readable_name' => 'PE Input Storage',
- 'advanced' => false
- },
- 'enable-acl' => {
- 'name' => 'enable-acl',
- 'source' => 'pacemaker-based',
- 'default' => 'false',
- 'type' => 'boolean',
- 'shortdesc' => 'Enable CIB ACL',
- 'longdesc' => 'Should pacemaker use ACLs to determine access to cluster',
- 'readable_name' => 'Enable ACLs',
- 'advanced' => false
- },
- }
- elsif code != 200
- return [400, 'getting properties definition failed']
- else
- definition = JSON.parse(out)
- end
-
- definition.each { |name, prop|
- prop['value'] = properties[name]
- }
- return [200, JSON.generate(definition)]
- rescue
- return [400, 'unable to get cluster properties']
- end
-end
-
get '/managec/:cluster/get_resource_agent_metadata' do
auth_user = getAuthUser()
cluster = params[:cluster]
@@ -888,69 +673,7 @@ get '/managec/:cluster/get_resource_agent_metadata' do
false,
{:resource_agent => resource_agent}
)
- if code != 404
- return [code, out]
- end
-
- code, out = send_cluster_request_with_token(
- auth_user,
- cluster,
- 'resource_metadata',
- false,
- {
- :resourcename => resource_agent,
- :new => true
- }
- )
- if code != 200
- return [400, 'Unable to get meta-data of specified resource agent.']
- end
- desc_regex = Regexp.new(
- '<span class="reg[^>]*>(?<short>[^>]*) </span>[^<]*' +
- '<span title="(?<long>[^"]*)"'
- )
- parameters_regex = Regexp.new(
- '<input type="hidden" name="resource_type"[^>]*>(?<required>[\s\S]*)' +
- '<div class="bold">Optional Arguments:</div>(?<optional>[\S\s]*)' +
- '<tr class="stop">'
- )
- parameter_regex = Regexp.new(
- '<tr title="(?<longdesc>[^"]*)"[^>]*>[\s]*<td class="reg">\s*' +
- '(?<name>[^<\s]*)\s*</td>\s*<td>\s*' +
- '<input placeholder="(?<shortdesc>[^"]*)"'
- )
-
- desc = desc_regex.match(out)
- unless desc
- return [400, 'Unable to get meta-data of specified resource agent.']
- end
- result = {
- :name => resource_agent,
- :shortdesc => html2plain(desc[:short]),
- :longdesc => html2plain(desc[:long]),
- :parameters => []
- }
-
- parameters = parameters_regex.match(out)
- parameters[:required].scan(parameter_regex) { |match|
- result[:parameters] << {
- :name => html2plain(match[1]),
- :longdesc => html2plain(match[0]),
- :shortdesc => html2plain(match[2]),
- :type => 'string',
- :required => true
- }
- }
- parameters[:optional].scan(parameter_regex) { |match|
- result[:parameters] << {
- :name => html2plain(match[1]),
- :longdesc => html2plain(match[0]),
- :shortdesc => html2plain(match[2]),
- :type => 'string',
- :required => false
- }
- }
- return [200, JSON.generate(result)]
+ return [code, out]
end
get '/managec/:cluster/get_fence_agent_metadata' do
@@ -964,90 +687,7 @@ get '/managec/:cluster/get_fence_agent_metadata' do
false,
{:fence_agent => fence_agent}
)
- if code != 404
- return [code, out]
- end
-
- code, out = send_cluster_request_with_token(
- auth_user,
- cluster,
- 'fence_device_metadata',
- false,
- {
- :resourcename => fence_agent.sub('stonith:', ''),
- :new => true
- }
- )
- if code != 200
- return [400, 'Unable to get meta-data of specified fence agent.']
- end
- desc_regex = Regexp.new(
- '<span class="reg[^>]*>(?<short>[^>]*) </span>[^<]*' +
- '<span title="(?<long>[^"]*)"'
- )
- parameters_regex = Regexp.new(
- '<input type="hidden" name="resource_type"[^>]*>(?<required>[\s\S]*)' +
- '<div class="bold">Optional Arguments:</div>(?<optional>[\S\s]*)' +
- '<div class="bold">Advanced Arguments:</div>(?<advanced>[\S\s]*)' +
- '<tr class="stop">'
- )
- required_parameter_regex = Regexp.new(
- '<tr title="(?<longdesc>[^"]*)[^>]*>[\s]*' +
- '<td class="reg">\s* (?<name>[^<\s]*)\s*</td>\s*<td>\s*' +
- '<input placeholder="(?<shortdesc>[^"]*)"'
- )
- other_parameter_regex = Regexp.new(
- '<td class="reg">\s* (?<name>[^<\s]*)\s*</td>\s*<td>\s*' +
- '<input placeholder="(?<shortdesc>[^"]*)"'
- )
-
- result = {
- :name => fence_agent,
- :shortdesc => '',
- :longdesc => '',
- :parameters => []
- }
-
- # pcsd in version 0.9.137 (and older) does not provide description for
- # fence agents
- desc = desc_regex.match(out)
- if desc
- result[:shortdesc] = html2plain(desc[:short])
- result[:longdesc] = html2plain(desc[:long])
- end
-
- parameters = parameters_regex.match(out)
- parameters[:required].scan(required_parameter_regex) { |match|
- result[:parameters] << {
- :name => html2plain(match[1]),
- :longdesc => html2plain(match[0]),
- :shortdesc => html2plain(match[2]),
- :type => 'string',
- :required => true,
- :advanced => false
- }
- }
- parameters[:optional].scan(other_parameter_regex) { |match|
- result[:parameters] << {
- :name => html2plain(match[0]),
- :longdesc => '',
- :shortdesc => html2plain(match[1]),
- :type => 'string',
- :required => false,
- :advanced => false
- }
- }
- parameters[:advanced].scan(other_parameter_regex) { |match|
- result[:parameters] << {
- :name => html2plain(match[0]),
- :longdesc => '',
- :shortdesc => html2plain(match[1]),
- :type => 'string',
- :required => false,
- :advanced => true
- }
- }
- return [200, JSON.generate(result)]
+ return [code, out]
end
post '/managec/:cluster/fix_auth_of_cluster' do
@@ -1123,7 +763,6 @@ def pcs_compatibility_layer_known_hosts_add(
known_hosts = get_known_hosts().select { |name, obj|
host_list.include?(name)
}
- # try the new endpoint provided by pcs-0.10
known_hosts_request_data = {}
known_hosts.each { |host_name, host_obj|
known_hosts_request_data[host_name] = {
@@ -1149,50 +788,14 @@ def pcs_compatibility_layer_known_hosts_add(
)
end
- # a remote host supports the endpoint; success
- if retval == 200
- return 'success'
- end
-
- # a remote host supports the endpoint; error
- if retval != 404
- return 'error'
- end
-
- # a remote host does not support the endpoint
- # fallback to the old endpoint provided by pcs-0.9 since 0.9.140
- request_data = {}
- known_hosts.each { |host_name, host_obj|
- addr = host_obj.first_dest()['addr']
- port = host_obj.first_dest()['port']
- request_data["node:#{host_name}"] = host_obj.token
- request_data["port:#{host_name}"] = port
- request_data["node:#{addr}"] = host_obj.token
- request_data["port:#{addr}"] = port
- }
- if is_cluster_request
- retval, _out = send_cluster_request_with_token(
- auth_user, target, '/save_tokens', true, request_data
- )
- else
- retval, _out = send_request_with_token(
- auth_user, target, '/save_tokens', true, request_data
- )
- end
-
- # a remote host supports the endpoint; success
if retval == 200
return 'success'
end
- # a remote host supports the endpoint; error
- if retval != 404
- return 'error'
+ if retval == 404
+ return 'not_supported'
end
-
- # a remote host does not support any of the endpoints
- # there's nothing we can do about it
- return 'not_supported'
+ return 'error'
end
def pcs_compatibility_layer_get_cluster_known_hosts(cluster_name, target_node)
@@ -1200,11 +803,9 @@ def pcs_compatibility_layer_get_cluster_known_hosts(cluster_name, target_node)
known_hosts = []
auth_user = PCSAuth.getSuperuserAuth()
- # try the new endpoint provided by pcs-0.10
retval, out = send_request_with_token(
auth_user, target_node, '/get_cluster_known_hosts'
)
- # a remote host supports /get_cluster_known_hosts; data downloaded
if retval == 200
begin
JSON.parse(out).each { |name, data|
@@ -1222,159 +823,21 @@ def pcs_compatibility_layer_get_cluster_known_hosts(cluster_name, target_node)
"cannot get authentication info from cluster '#{cluster_name}'"
)
end
- return known_hosts, warning_messages
- end
-
- # a remote host supports /get_cluster_known_hosts; an error occured
- if retval != 404
+ elsif retval == 404
warning_messages << (
"Unable to automatically authenticate against cluster nodes: " +
- "cannot get authentication info from cluster '#{cluster_name}'"
+ "cluster '#{cluster_name}' is running an old version of pcs/pcsd"
)
- return known_hosts, warning_messages
- end
-
- # a remote host does not support /get_cluster_known_hosts
- # fallback to the old endpoint provided by pcs-0.9 since 0.9.140
- retval, out = send_request_with_token(
- auth_user, target_node, '/get_cluster_tokens', false, {'with_ports' => '1'}
- )
-
- # a remote host supports /get_cluster_tokens; data downloaded
- if retval == 200
- begin
- data = JSON.parse(out)
- expected_keys = ['tokens', 'ports']
- if expected_keys.all? {|i| data.has_key?(i) and data[i].class == Hash}
- # new format
- new_tokens = data["tokens"] || {}
- new_ports = data["ports"] || {}
- else
- # old format
- new_tokens = data
- new_ports = {}
- end
- new_tokens.each { |name_addr, token|
- known_hosts << PcsKnownHost.new(
- name_addr,
- token,
- [
- {
- 'addr' => name_addr,
- 'port' => (new_ports[name_addr] || PCSD_DEFAULT_PORT),
- }
- ]
- )
- }
- rescue => e
- $logger.error "Unable to parse the response of /get_cluster_tokens: #{e}"
- known_hosts = []
- warning_messages << (
- "Unable to automatically authenticate against cluster nodes: " +
- "cannot get authentication info from cluster '#{cluster_name}'"
- )
- end
- return known_hosts, warning_messages
- end
-
- # a remote host supports /get_cluster_tokens; an error occured
- if retval != 404
+ else
warning_messages << (
"Unable to automatically authenticate against cluster nodes: " +
"cannot get authentication info from cluster '#{cluster_name}'"
)
- return known_hosts, warning_messages
end
- # a remote host does not support /get_cluster_tokens
- # there's nothing we can do about it
- warning_messages << (
- "Unable to automatically authenticate against cluster nodes: " +
- "cluster '#{cluster_name}' is running an old version of pcs/pcsd"
- )
return known_hosts, warning_messages
end
-def pcs_0_9_142_resource_change_group(auth_user, params)
- parameters = {
- :resource_id => params[:resource_id],
- :resource_group => '',
- :_orig_resource_group => '',
- }
- parameters[:resource_group] = params[:group_id] if params[:group_id]
- if params[:old_group_id]
- parameters[:_orig_resource_group] = params[:old_group_id]
- end
- return send_cluster_request_with_token(
- auth_user, params[:cluster], 'update_resource', true, parameters
- )
-end
-
-def pcs_0_9_142_resource_clone(auth_user, params)
- parameters = {
- :resource_id => params[:resource_id],
- :resource_clone => true,
- :_orig_resource_clone => 'false',
- }
- return send_cluster_request_with_token(
- auth_user, params[:cluster], 'update_resource', true, parameters
- )
-end
-
-def pcs_0_9_142_resource_unclone(auth_user, params)
- parameters = {
- :resource_id => params[:resource_id],
- :resource_clone => nil,
- :_orig_resource_clone => 'true',
- }
- return send_cluster_request_with_token(
- auth_user, params[:cluster], 'update_resource', true, parameters
- )
-end
-
-def pcs_0_9_142_resource_master(auth_user, params)
- parameters = {
- :resource_id => params[:resource_id],
- :resource_ms => true,
- :_orig_resource_ms => 'false',
- }
- return send_cluster_request_with_token(
- auth_user, params[:cluster], 'update_resource', true, parameters
- )
-end
-
-# There is a bug in pcs-0.9.138 and older in processing the standby and
-# unstandby request. JS of that pcsd always sent nodename in "node"
-# parameter, which caused pcsd daemon to run the standby command locally with
-# param["node"] as node name. This worked fine if the local cluster was
-# managed from JS, as pacemaker simply put the requested node into standby.
-# However it didn't work for managing non-local clusters, as the command was
-# run on the local cluster everytime. Pcsd daemon would send the request to a
-# remote cluster if the param["name"] variable was set, and that never
-# happened. That however wouldn't work either, as then the required parameter
-# "node" wasn't sent in the request causing an exception on the receiving
-# node. This is fixed in commit 053f63ca109d9ef9e7f0416e90aab8e140480f5b
-#
-# In order to be able to put nodes running pcs-0.9.138 into standby, the
-# nodename must be sent in "node" param, and the "name" must not be sent.
-def pcs_0_9_138_node_standby(auth_user, params)
- translated_params = {
- 'node' => params[:name],
- }
- return send_cluster_request_with_token(
- auth_user, params[:cluster], 'node_standby', true, translated_params
- )
-end
-
-def pcs_0_9_138_node_unstandby(auth_user, params)
- translated_params = {
- 'node' => params[:name],
- }
- return send_cluster_request_with_token(
- auth_user, params[:cluster], 'node_unstandby', true, translated_params
- )
-end
-
def pcs_0_10_6_get_avail_resource_agents(code, out)
if code != 200
return code, out
@@ -1421,99 +884,9 @@ post '/managec/:cluster/?*' do
if params[:cluster]
request = "/" + params[:splat].join("/")
- # backward compatibility layer BEGIN
- translate_for_version = {
- '/node_standby' => [
- [[0, 9, 138], method(:pcs_0_9_138_node_standby)],
- ],
- '/node_unstandby' => [
- [[0, 9, 138], method(:pcs_0_9_138_node_unstandby)],
- ],
- }
- if translate_for_version.key?(request)
- target_pcsd_version = [0, 0, 0]
- version_code, version_out = send_cluster_request_with_token(
- auth_user, params[:cluster], 'get_sw_versions'
- )
- if version_code == 200
- begin
- versions = JSON.parse(version_out)
- target_pcsd_version = versions['pcs'] if versions['pcs']
- rescue JSON::ParserError
- end
- end
- translate_function = nil
- translate_for_version[request].each { |pair|
- if (target_pcsd_version <=> pair[0]) != 1 # target <= pair
- translate_function = pair[1]
- break
- end
- }
- end
- # backward compatibility layer END
-
- if translate_function
- code, out = translate_function.call(auth_user, params)
- else
- code, out = send_cluster_request_with_token(
- auth_user, params[:cluster], request, true, params, true, raw_data
- )
- end
-
- # backward compatibility layer BEGIN
- if code == 404
- case request
- # supported since pcs-0.9.143 (tree view of resources)
- when '/resource_change_group', 'resource_change_group'
- code, out = pcs_0_9_142_resource_change_group(auth_user, params)
- # supported since pcs-0.9.143 (tree view of resources)
- when '/resource_clone', 'resource_clone'
- code, out = pcs_0_9_142_resource_clone(auth_user, params)
- # supported since pcs-0.9.143 (tree view of resources)
- when '/resource_unclone', 'resource_unclone'
- code, out = pcs_0_9_142_resource_unclone(auth_user, params)
- # supported since pcs-0.9.143 (tree view of resources)
- when '/resource_master', 'resource_master'
- # defaults to true for old pcsds without capabilities defined
- supports_resource_master = true
- capabilities_code, capabilities_out = send_cluster_request_with_token(
- auth_user, params[:cluster], 'capabilities'
- )
- if capabilities_code == 200
- begin
- capabilities_json = JSON.parse(capabilities_out)
- supports_resource_master = capabilities_json[:pcsd_capabilities].include?(
- 'pcmk.resource.master'
- )
- rescue JSON::ParserError
- end
- end
- if supports_resource_master
- code, out = pcs_0_9_142_resource_master(auth_user, params)
- end
- else
- redirection = {
- # constraints removal for pcs-0.9.137 and older
- "/remove_constraint_remote" => "/resource_cmd/rm_constraint",
- # constraints removal for pcs-0.9.137 and older
- "/remove_constraint_rule_remote" => "/resource_cmd/rm_constraint_rule"
- }
- if redirection.key?(request)
- code, out = send_cluster_request_with_token(
- auth_user,
- params[:cluster],
- redirection[request],
- true,
- params,
- false,
- raw_data
- )
- end
- end
- end
- # backward compatibility layer END
-
- return code, out
+ return send_cluster_request_with_token(
+ auth_user, params[:cluster], request, true, params, true, raw_data
+ )
end
end
@@ -1548,17 +921,3 @@ get '*' do
redirect "Bad URL"
call(env.merge("PATH_INFO" => '/nodes'))
end
-
-def html2plain(text)
- return CGI.unescapeHTML(text).gsub(/<br[^>]*>/, "\n")
-end
-
-helpers do
- def h(text)
- Rack::Utils.escape_html(text)
- end
-
- def nl2br(text)
- text.gsub(/\n/, "<br>")
- end
-end
diff --git a/pcsd/remote.rb b/pcsd/remote.rb
index 1c019e98..e36f651f 100644
--- a/pcsd/remote.rb
+++ b/pcsd/remote.rb
@@ -25,14 +25,14 @@ def remote(params, request, auth_user)
remote_cmd_without_pacemaker = {
:capabilities => method(:capabilities),
:status => method(:node_status),
- :status_all => method(:status_all),
:cluster_status => method(:cluster_status_remote),
:cluster_status_plaintext => method(:cluster_status_plaintext),
:auth => method(:auth),
:check_auth => method(:check_auth),
+ # lib api:
+ # /api/v1/cluster-setup/v1
:cluster_setup => method(:cluster_setup),
:get_quorum_info => method(:get_quorum_info),
- :get_cib => method(:get_cib),
:get_corosync_conf => method(:get_corosync_conf_remote),
:set_corosync_conf => method(:set_corosync_conf),
:get_sync_capabilities => method(:get_sync_capabilities),
@@ -45,14 +45,6 @@ def remote(params, request, auth_user)
:cluster_start => method(:cluster_start),
:cluster_stop => method(:cluster_stop),
:config_restore => method(:config_restore),
- # TODO deprecated, remove, not used anymore
- :node_restart => method(:node_restart),
- # lib api:
- # /api/v1/node-standby-unstandby/v1
- :node_standby => method(:node_standby),
- # lib api:
- # /api/v1/node-standby-unstandby/v1
- :node_unstandby => method(:node_unstandby),
:cluster_enable => method(:cluster_enable),
:cluster_disable => method(:cluster_disable),
:get_sw_versions => method(:get_sw_versions),
@@ -69,12 +61,6 @@ def remote(params, request, auth_user)
:sbd_enable => method(:sbd_enable),
:remove_stonith_watchdog_timeout=> method(:remove_stonith_watchdog_timeout),
:set_stonith_watchdog_timeout_to_zero => method(:set_stonith_watchdog_timeout_to_zero),
- # lib api:
- # /api/v1/sbd-enable-sbd/v1
- :remote_enable_sbd => method(:remote_enable_sbd),
- # lib api:
- # /api/v1/sbd-disable-sbd/v1
- :remote_disable_sbd => method(:remote_disable_sbd),
:qdevice_net_get_ca_certificate => method(:qdevice_net_get_ca_certificate),
# lib api:
# /api/v1/qdevice-qdevice-net-sign-certificate-request/v1
@@ -100,9 +86,6 @@ def remote(params, request, auth_user)
# lib api:
# /api/v1/resource-agent-list-agents/v1
:get_avail_resource_agents => method(:get_avail_resource_agents),
- # lib api:
- # /api/v1/stonith-agent-list-agents/v1
- :get_avail_fence_agents => method(:get_avail_fence_agents),
}
remote_cmd_with_pacemaker = {
:pacemaker_node_status => method(:remote_pacemaker_node_status),
@@ -159,18 +142,6 @@ def remote(params, request, auth_user)
:get_fence_agent_metadata => method(:get_fence_agent_metadata),
:manage_resource => method(:manage_resource),
:unmanage_resource => method(:unmanage_resource),
- # lib api:
- # /api/v1/alert-create-alert/v1
- :create_alert => method(:create_alert),
- # lib api:
- # /api/v1/alert-update-alert/v1
- :update_alert => method(:update_alert),
- :create_recipient => method(:create_recipient),
- :update_recipient => method(:update_recipient),
- # lib api:
- # /api/v1/alert-remove-alert/v1
- # /api/v1/alert-remove-recipient/v1
- :remove_alerts_and_recipients => method("remove_alerts_and_recipients"),
}
command = params[:command].to_sym
@@ -193,6 +164,24 @@ def remote(params, request, auth_user)
end
end
+def _get_param_list(params)
+ param_line = []
+ meta_options = []
+ params.each { |param, val|
+ if param.start_with?("_res_paramne_") or (param.start_with?("_res_paramempty_") and val != "")
+ myparam = param.sub(/^_res_paramne_/,"").sub(/^_res_paramempty_/,"")
+ param_line << "#{myparam}=#{val}"
+ end
+ if param == "disabled"
+ meta_options << 'meta' << 'target-role=Stopped'
+ end
+ if param == "force" and val
+ param_line << "--force"
+ end
+ }
+ return param_line + meta_options
+end
+
def capabilities(params, request, auth_user)
return JSON.generate({
:pcsd_capabilities => CAPABILITIES_PCSD,
@@ -394,53 +383,6 @@ def config_restore(params, request, auth_user)
end
end
-# TODO deprecated, remove, not used anymore
-def node_restart(params, request, auth_user)
- if params[:name]
- code, response = send_request_with_token(
- auth_user, params[:name], 'node_restart', true
- )
- else
- if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
- return 403, 'Permission denied'
- end
- $logger.info "Restarting Node"
- output = `/sbin/reboot`
- $logger.debug output
- return output
- end
-end
-
-def node_standby(params, request, auth_user)
- if params[:name]
- code, response = send_request_with_token(
- auth_user, params[:name], 'node_standby', true
- )
- else
- if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
- return 403, 'Permission denied'
- end
- $logger.info "Standby Node"
- stdout, stderr, retval = run_cmd(auth_user, PCS, "node", "standby")
- return stdout
- end
-end
-
-def node_unstandby(params, request, auth_user)
- if params[:name]
- code, response = send_request_with_token(
- auth_user, params[:name], 'node_unstandby', true
- )
- else
- if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
- return 403, 'Permission denied'
- end
- $logger.info "Unstandby Node"
- stdout, stderr, retval = run_cmd(auth_user, PCS, "node", "unstandby")
- return stdout
- end
-end
-
def cluster_enable(params, request, auth_user)
if params[:name]
code, response = send_request_with_token(
@@ -491,21 +433,6 @@ def get_quorum_info(params, request, auth_user)
end
end
-def get_cib(params, request, auth_user)
- if not allowed_for_local_cluster(auth_user, Permissions::READ)
- return 403, 'Permission denied'
- end
- cib, stderr, retval = run_cmd(auth_user, CIBADMIN, "-Ql")
- if retval != 0
- if not pacemaker_running?
- return [400, '{"pacemaker_not_running":true}']
- end
- return [500, "Unable to get CIB: " + cib.to_s + stderr.to_s]
- else
- return [200, cib]
- end
-end
-
def get_corosync_conf_remote(params, request, auth_user)
if not allowed_for_local_cluster(auth_user, Permissions::READ)
return 403, 'Permission denied'
@@ -912,66 +839,6 @@ def node_status(params, request, auth_user)
return [400, "Unsupported version '#{version}' of status requested"]
end
-def status_all(params, request, auth_user, nodes=[], dont_update_config=false)
- if nodes == nil
- return JSON.generate({"error" => "true"})
- end
-
- final_response = {}
- threads = []
- forbidden_nodes = {}
- nodes.each {|node|
- threads << Thread.new(Thread.current[:pcsd_logger_container]) { |logger|
- Thread.current[:pcsd_logger_container] = logger
- code, response = send_request_with_token(auth_user, node, 'status')
- if 403 == code
- forbidden_nodes[node] = true
- end
- begin
- final_response[node] = JSON.parse(response)
- rescue JSON::ParserError => e
- final_response[node] = {"bad_json" => true}
- $logger.info("ERROR: Parse Error when parsing status JSON from #{node}")
- end
- if final_response[node] and final_response[node]["notoken"] == true
- $logger.error("ERROR: bad token for #{node}")
- end
- }
- }
- threads.each { |t| t.join }
- if forbidden_nodes.length > 0
- return 403, 'Permission denied'
- end
-
- # Get full list of nodes and see if we need to update the configuration
- node_list = []
- final_response.each { |fr,n|
- node_list += n["corosync_offline"] if n["corosync_offline"]
- node_list += n["corosync_online"] if n["corosync_online"]
- node_list += n["pacemaker_offline"] if n["pacemaker_offline"]
- node_list += n["pacemaker_online"] if n["pacemaker_online"]
- }
-
- node_list.uniq!
- if node_list.length > 0
- config = PCSConfig.new(Cfgsync::PcsdSettings.from_file().text())
- old_node_list = config.get_nodes(params[:cluster])
- if !(dont_update_config or config.cluster_nodes_equal?(params[:cluster], node_list))
- $logger.info("Updating node list for: #{params[:cluster]} #{old_node_list}->#{node_list}")
- config.update_cluster(params[:cluster], node_list)
- sync_config = Cfgsync::PcsdSettings.from_text(config.text())
- # on version conflict just go on, config will be corrected eventually
- # by displaying the cluster in the web UI
- Cfgsync::save_sync_new_version(
- sync_config, get_corosync_nodes_names(), $cluster_name, true
- )
- return status_all(params, request, auth_user, node_list, true)
- end
- end
- $logger.debug("NODE LIST: " + node_list.inspect)
- return JSON.generate(final_response)
-end
-
def imported_cluster_list(params, request, auth_user)
config = PCSConfig.new(Cfgsync::PcsdSettings.from_file().text())
imported_clusters = {"cluster_list" => []}
@@ -981,173 +848,6 @@ def imported_cluster_list(params, request, auth_user)
return JSON.generate(imported_clusters)
end
-def clusters_overview(params, request, auth_user)
- cluster_map = {}
- forbidden_clusters = {}
- threads = []
- config = PCSConfig.new(Cfgsync::PcsdSettings.from_file().text())
- config.clusters.each { |cluster|
- threads << Thread.new(Thread.current[:pcsd_logger_container]) { |logger|
- Thread.current[:pcsd_logger_container] = logger
- cluster_map[cluster.name] = {
- 'cluster_name' => cluster.name,
- 'error_list' => [
- {'message' => 'Unable to connect to the cluster. Request timeout.'}
- ],
- 'warning_list' => [],
- 'status' => 'unknown',
- 'node_list' => get_default_overview_node_list(cluster.name),
- 'resource_list' => []
- }
- overview_cluster = nil
- online, offline, not_authorized_nodes = is_auth_against_nodes(
- auth_user,
- get_cluster_nodes(cluster.name),
- 3
- )
- not_supported = false
- forbidden = false
- cluster_nodes_auth = (online + offline).uniq
- cluster_nodes_all = (cluster_nodes_auth + not_authorized_nodes).uniq
- nodes_not_in_cluster = []
- for node in cluster_nodes_auth
- code, response = send_request_with_token(
- auth_user, node, 'cluster_status', true, {}, true, nil, 8
- )
- if code == 404
- not_supported = true
- next
- end
- if 403 == code
- forbidden = true
- forbidden_clusters[cluster.name] = true
- break
- end
- begin
- parsed_response = JSON.parse(response)
- if parsed_response['noresponse'] or parsed_response['pacemaker_not_running']
- next
- elsif parsed_response['notoken'] or parsed_response['notauthorized']
- next
- elsif parsed_response['cluster_name'] != cluster.name
- # queried node is not in the cluster (any more)
- nodes_not_in_cluster << node
- next
- else
- overview_cluster = parsed_response
- break
- end
- rescue JSON::ParserError
- end
- end
-
- if cluster_nodes_all.sort == nodes_not_in_cluster.sort
- overview_cluster = {
- 'cluster_name' => cluster.name,
- 'error_list' => [],
- 'warning_list' => [],
- 'status' => 'unknown',
- 'node_list' => [],
- 'resource_list' => []
- }
- end
-
- if not overview_cluster
- overview_cluster = {
- 'cluster_name' => cluster.name,
- 'error_list' => [],
- 'warning_list' => [],
- 'status' => 'unknown',
- 'node_list' => get_default_overview_node_list(cluster.name),
- 'resource_list' => []
- }
- if not_supported
- overview_cluster['warning_list'] = [
- {
- 'message' => 'Cluster is running an old version of pcs/pcsd which does not provide data for the dashboard.',
- },
- ]
- else
- if forbidden
- overview_cluster['error_list'] = [
- {
- 'message' => 'You do not have permissions to view the cluster.',
- 'type' => 'forbidden',
- },
- ]
- overview_cluster['node_list'] = []
- else
- overview_cluster['error_list'] = [
- {
- 'message' => 'Unable to connect to the cluster.',
- },
- ]
- end
- end
- end
- if not_authorized_nodes.length > 0
- overview_cluster['warning_list'] << {
- 'message' => 'GUI is not authorized against node(s) '\
- + not_authorized_nodes.join(', '),
- 'type' => 'nodes_not_authorized',
- 'node_list' => not_authorized_nodes,
- }
- end
-
- overview_cluster['node_list'].each { |node|
- if node['status_version'] == '1'
- overview_cluster['warning_list'] << {
- :message => 'Some nodes are running old version of pcs/pcsd.'
- }
- break
- end
- }
-
- cluster_map[cluster.name] = overview_cluster
- }
- }
-
- begin
- Timeout::timeout(18) {
- threads.each { |t| t.join }
- }
- rescue Timeout::Error
- threads.each { |t| t.exit }
- end
-
- # update clusters in PCSConfig
- not_current_data = false
- config = PCSConfig.new(Cfgsync::PcsdSettings.from_file().text())
- cluster_map.each { |cluster, values|
- next if forbidden_clusters[cluster]
- nodes = []
- values['node_list'].each { |node|
- nodes << node['name']
- }
- if !config.cluster_nodes_equal?(cluster, nodes)
- $logger.info("Updating node list for: #{cluster} #{config.get_nodes(cluster)}->#{nodes}")
- config.update_cluster(cluster, nodes)
- not_current_data = true
- end
- }
- if not_current_data
- sync_config = Cfgsync::PcsdSettings.from_text(config.text())
- # on version conflict just go on, config will be corrected eventually
- # by displaying the cluster in the web UI
- Cfgsync::save_sync_new_version(
- sync_config, get_corosync_nodes_names(), $cluster_name, true
- )
- end
-
- overview = {
- 'not_current_data' => not_current_data,
- 'cluster_list' => cluster_map.values.sort { |a, b|
- a['clustername'] <=> b['clustername']
- }
- }
- return JSON.generate(overview)
-end
-
def auth(params, request, auth_user)
# User authentication using username and password is done in python part of
# pcsd. We will get here only if credentials are correct, so we just need to
@@ -1220,7 +920,7 @@ def update_resource (params, request, auth_user)
return 403, 'Permission denied'
end
- param_line = getParamList(params)
+ param_line = _get_param_list(params)
if not params[:resource_id]
cmd = [PCS, "resource", "create", params[:name], params[:resource_type]]
cmd += param_line
@@ -1320,7 +1020,7 @@ def update_fence_device(params, request, auth_user)
$logger.info "Updating fence device"
$logger.info params
- param_line = getParamList(params)
+ param_line = _get_param_list(params)
$logger.info param_line
if not params[:resource_id]
@@ -1353,14 +1053,6 @@ def get_avail_resource_agents(params, request, auth_user)
return JSON.generate(getResourceAgents(auth_user).map{|a| [a, get_resource_agent_name_structure(a)]}.to_h)
end
-def get_avail_fence_agents(params, request, auth_user)
- if not allowed_for_local_cluster(auth_user, Permissions::READ)
- return 403, 'Permission denied'
- end
- agents = getFenceAgents(auth_user)
- return JSON.generate(agents)
-end
-
def remove_resource(params, request, auth_user)
if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
return 403, 'Permission denied'
@@ -1740,18 +1432,6 @@ def update_cluster_settings(params, request, auth_user)
to_update = []
current = getAllSettings(auth_user)
- # We need to be able to set cluster properties also from older version GUI.
- # This code handles proper processing of checkboxes.
- # === backward compatibility layer start ===
- params['hidden'].each { |prop, val|
- next if prop == 'hidden_input'
- unless properties.include?(prop)
- properties[prop] = val
- to_update << prop
- end
- }
- # === backward compatibility layer end ===
-
properties.each { |prop, val|
val.strip!
if not current.include?(prop) and val != '' # add
@@ -2236,62 +1916,6 @@ def set_stonith_watchdog_timeout_to_zero(param, request, auth_user)
end
end
-def remote_enable_sbd(params, request, auth_user)
- unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
- return 403, 'Permission denied'
- end
-
- arg_list = []
-
- if ['true', '1', 'on'].include?(params[:ignore_offline_nodes])
- arg_list << '--skip-offline'
- end
-
- params[:watchdog].each do |node, watchdog|
- unless watchdog.strip.empty?
- arg_list << "watchdog=#{watchdog.strip}@#{node}"
- end
- end
-
- params[:config].each do |option, value|
- unless value.empty?
- arg_list << "#{option}=#{value}"
- end
- end
-
- _, stderr, retcode = run_cmd(
- auth_user, PCS, 'stonith', 'sbd', 'enable', *arg_list
- )
-
- if retcode != 0
- return [400, "Unable to enable sbd in cluster:\n#{stderr.join('')}"]
- end
-
- return [200, 'Sbd has been enabled.']
-end
-
-def remote_disable_sbd(params, request, auth_user)
- unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
- return 403, 'Permission denied'
- end
-
- arg_list = []
-
- if ['true', '1', 'on'].include?(params[:ignore_offline_nodes])
- arg_list << '--skip-offline'
- end
-
- _, stderr, retcode = run_cmd(
- auth_user, PCS, 'stonith', 'sbd', 'disable', *arg_list
- )
-
- if retcode != 0
- return [400, "Unable to disable sbd in cluster:\n#{stderr.join('')}"]
- end
-
- return [200, 'Sbd has been disabled.']
-end
-
def qdevice_net_get_ca_certificate(params, request, auth_user)
unless allowed_for_local_cluster(auth_user, Permissions::READ)
return 403, 'Permission denied'
@@ -2697,145 +2321,6 @@ def manage_services(params, request, auth_user)
end
end
-def _hash_to_argument_list(hash)
- result = []
- if hash.kind_of?(Hash)
- hash.each {|key, value|
- value = '' if value.nil?
- result << "#{key}=#{value}"
- }
- end
- return result
-end
-
-def create_alert(params, request, auth_user)
- unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
- return 403, 'Permission denied'
- end
- path = params[:path]
- unless path
- return [400, 'Missing required parameter: path']
- end
- alert_id = params[:alert_id]
- description = params[:description]
- meta_attr_list = _hash_to_argument_list(params[:meta_attr])
- instance_attr_list = _hash_to_argument_list(params[:instance_attr])
- cmd = [PCS, 'alert', 'create', "path=#{path}"]
- cmd << "id=#{alert_id}" if alert_id and alert_id != ''
- cmd << "description=#{description}" if description and description != ''
- cmd += ['options', *instance_attr_list] if instance_attr_list.any?
- cmd += ['meta', *meta_attr_list] if meta_attr_list.any?
- output, stderr, retval = run_cmd(auth_user, *cmd)
- if retval != 0
- return [400, "Unable to create alert: #{stderr.join("\n")}"]
- end
- return [200, 'Alert created']
-end
-
-def update_alert(params, request, auth_user)
- unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
- return 403, 'Permission denied'
- end
- alert_id = params[:alert_id]
- unless alert_id
- return [400, 'Missing required parameter: alert_id']
- end
- path = params[:path]
- description = params[:description]
- meta_attr_list = _hash_to_argument_list(params[:meta_attr])
- instance_attr_list = _hash_to_argument_list(params[:instance_attr])
- cmd = [PCS, 'alert', 'update', alert_id]
- cmd << "path=#{path}" if path
- cmd << "description=#{description}" if description
- cmd += ['options', *instance_attr_list] if instance_attr_list.any?
- cmd += ['meta', *meta_attr_list] if meta_attr_list.any?
- output, stderr, retval = run_cmd(auth_user, *cmd)
- if retval != 0
- return [400, "Unable to update alert: #{stderr.join("\n")}"]
- end
- return [200, 'Alert updated']
-end
-
-def remove_alerts_and_recipients(params, request, auth_user)
- unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
- return 403, 'Permission denied'
- end
- alert_list = params[:alert_list]
- recipient_list = params[:recipient_list]
- if recipient_list.kind_of?(Array) and recipient_list.any?
- output, stderr, retval = run_cmd(
- auth_user, PCS, 'alert', 'recipient', 'remove', *recipient_list
- )
- if retval != 0
- return [400, "Unable to remove recipients: #{stderr.join("\n")}"]
- end
- end
- if alert_list.kind_of?(Array) and alert_list.any?
- output, stderr, retval = run_cmd(
- auth_user, PCS, 'alert', 'remove', *alert_list
- )
- if retval != 0
- return [400, "Unable to remove alerts: #{stderr.join("\n")}"]
- end
- end
- return [200, 'All removed']
-end
-
-def create_recipient(params, request, auth_user)
- unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
- return 403, 'Permission denied'
- end
- alert_id = params[:alert_id]
- if not alert_id or alert_id.strip! == ''
- return [400, 'Missing required paramter: alert_id']
- end
- value = params[:value]
- if not value or value == ''
- return [400, 'Missing required paramter: value']
- end
- recipient_id = params[:recipient_id]
- description = params[:description]
- meta_attr_list = _hash_to_argument_list(params[:meta_attr])
- instance_attr_list = _hash_to_argument_list(params[:instance_attr])
- cmd = [PCS, 'alert', 'recipient', 'add', alert_id, "value=#{value}"]
- cmd << "id=#{recipient_id}" if recipient_id and recipient_id != ''
- cmd << "description=#{description}" if description and description != ''
- cmd += ['options', *instance_attr_list] if instance_attr_list.any?
- cmd += ['meta', *meta_attr_list] if meta_attr_list.any?
- output, stderr, retval = run_cmd(auth_user, *cmd)
- if retval != 0
- return [400, "Unable to create recipient: #{stderr.join("\n")}"]
- end
- return [200, 'Recipient created']
-end
-
-def update_recipient(params, request, auth_user)
- unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
- return 403, 'Permission denied'
- end
- recipient_id = params[:recipient_id]
- if not recipient_id or recipient_id.strip! == ''
- return [400, 'Missing required paramter: recipient_id']
- end
- value = params[:value]
- if value and value.strip! == ''
- return [400, 'Parameter value canot be empty string']
- end
- description = params[:description]
- meta_attr_list = _hash_to_argument_list(params[:meta_attr])
- instance_attr_list = _hash_to_argument_list(params[:instance_attr])
- cmd = [PCS, 'alert', 'recipient', 'update', recipient_id]
- cmd << "value=#{value}" if value
- cmd << "description=#{description}" if description
- cmd += ['options', *instance_attr_list] if instance_attr_list.any?
- cmd += ['meta', *meta_attr_list] if meta_attr_list.any?
- output, stderr, retval = run_cmd(auth_user, *cmd)
- if retval != 0
- return [400, "Unable to update recipient: #{stderr.join("\n")}"]
- end
- return [200, 'Recipient updated']
-end
-
def pcsd_success(msg)
$logger.info(msg)
return [200, msg]
diff --git a/pcsd/resource.rb b/pcsd/resource.rb
index e49422f8..27894cc9 100644
--- a/pcsd/resource.rb
+++ b/pcsd/resource.rb
@@ -103,11 +103,8 @@ def get_resource_agent_name_structure(agent_name)
match = expression.match(agent_name)
if match
provider = match.names.include?('provider') ? match[:provider] : nil
- class_provider = provider.nil? ? match[:standard] : "#{match[:standard]}:#{provider}"
return {
:full_name => agent_name,
- # TODO remove, this is only used by the old web UI
- :class_provider => class_provider,
:class => match[:standard],
:provider => provider,
:type => match[:type],
diff --git a/pcsd/rserver.rb b/pcsd/rserver.rb
index c37f9df4..e2c5e2a1 100644
--- a/pcsd/rserver.rb
+++ b/pcsd/rserver.rb
@@ -26,7 +26,6 @@ class TornadoCommunicationMiddleware
session = JSON.parse(Base64.strict_decode64(env["HTTP_X_PCSD_PAYLOAD"]))
Thread.current[:tornado_username] = session["username"]
Thread.current[:tornado_groups] = session["groups"]
- Thread.current[:tornado_is_authenticated] = session["is_authenticated"]
end
status, headers, body = @app.call(env)
diff --git a/pcsd/test/test_resource.rb b/pcsd/test/test_resource.rb
index 1eb0d3aa..97679eca 100644
--- a/pcsd/test/test_resource.rb
+++ b/pcsd/test/test_resource.rb
@@ -8,7 +8,6 @@ class GetResourceAgentNameStructure < Test::Unit::TestCase
get_resource_agent_name_structure('standard:provider:type'),
{
:full_name => 'standard:provider:type',
- :class_provider => 'standard:provider',
:class => 'standard',
:provider => 'provider',
:type => 'type',
@@ -21,7 +20,6 @@ class GetResourceAgentNameStructure < Test::Unit::TestCase
get_resource_agent_name_structure('standard:type'),
{
:full_name => 'standard:type',
- :class_provider => 'standard',
:class => 'standard',
:provider => nil,
:type => 'type',
@@ -34,7 +32,6 @@ class GetResourceAgentNameStructure < Test::Unit::TestCase
get_resource_agent_name_structure('systemd:service@instance:name'),
{
:full_name => 'systemd:service@instance:name',
- :class_provider => 'systemd',
:class => 'systemd',
:provider => nil,
:type => 'service@instance:name',
@@ -47,7 +44,6 @@ class GetResourceAgentNameStructure < Test::Unit::TestCase
get_resource_agent_name_structure('service:service@instance:name'),
{
:full_name => 'service:service@instance:name',
- :class_provider => 'service',
:class => 'service',
:provider => nil,
:type => 'service@instance:name',
--
2.31.1