diff --git a/.gitignore b/.gitignore index 670f8a6..c3430ba 100644 --- a/.gitignore +++ b/.gitignore @@ -1,20 +1,23 @@ SOURCES/HAM-logo.png -SOURCES/backports-3.11.4.gem +SOURCES/backports-3.17.2.gem +SOURCES/dacite-1.5.0.tar.gz SOURCES/daemons-1.3.1.gem -SOURCES/ethon-0.11.0.gem +SOURCES/dataclasses-0.6.tar.gz +SOURCES/ethon-0.12.0.gem SOURCES/eventmachine-1.2.7.gem -SOURCES/ffi-1.9.25.gem +SOURCES/ffi-1.13.1.gem SOURCES/json-2.3.0.gem -SOURCES/mustermann-1.0.3.gem +SOURCES/mustermann-1.1.1.gem SOURCES/open4-1.3.4-1.gem -SOURCES/pcs-0.10.4.tar.gz -SOURCES/pcs-web-ui-0.1.2.tar.gz -SOURCES/pcs-web-ui-node-modules-0.1.2.tar.xz +SOURCES/pcs-0.10.6.tar.gz +SOURCES/pcs-web-ui-0.1.4.tar.gz +SOURCES/pcs-web-ui-node-modules-0.1.3.tar.xz SOURCES/pyagentx-0.4.pcs.2.tar.gz -SOURCES/rack-2.0.6.gem -SOURCES/rack-protection-2.0.4.gem -SOURCES/rack-test-1.0.0.gem -SOURCES/sinatra-2.0.4.gem +SOURCES/rack-2.2.3.gem +SOURCES/rack-protection-2.0.8.1.gem +SOURCES/rack-test-1.1.0.gem +SOURCES/ruby2_keywords-0.0.2.gem +SOURCES/sinatra-2.0.8.1.gem SOURCES/thin-1.7.2.gem -SOURCES/tilt-2.0.9.gem -SOURCES/tornado-6.0.3.tar.gz +SOURCES/tilt-2.0.10.gem +SOURCES/tornado-6.0.4.tar.gz diff --git a/.pcs.metadata b/.pcs.metadata index 3b37b33..e3f6601 100644 --- a/.pcs.metadata +++ b/.pcs.metadata @@ -1,20 +1,23 @@ 679a4ce22a33ffd4d704261a17c00cff98d9499a SOURCES/HAM-logo.png -edf08f3a0d9e202048857d78ddda44e59294084c SOURCES/backports-3.11.4.gem +28b63a742124da6c9575a1c5e7d7331ef93600b2 SOURCES/backports-3.17.2.gem +c14ee49221d8e1b09364b5f248bc3da12484f675 SOURCES/dacite-1.5.0.tar.gz e28c1e78d1a6e34e80f4933b494f1e0501939dd3 SOURCES/daemons-1.3.1.gem -3c921ceeb2847be8cfa25704be74923e233786bd SOURCES/ethon-0.11.0.gem +81079b734108084eea0ae1c05a1cab0e806a3a1d SOURCES/dataclasses-0.6.tar.gz +921ef1be44583a7644ee7f20fe5f26f21d018a04 SOURCES/ethon-0.12.0.gem 7a5b2896e210fac9759c786ee4510f265f75b481 SOURCES/eventmachine-1.2.7.gem -86fa011857f977254ccf39f507587310f9ade768 SOURCES/ffi-1.9.25.gem +cfa25e7a3760c3ec16723cb8263d9b7a52d0eadf SOURCES/ffi-1.13.1.gem 0230e8c5a37f1543982e5b04be503dd5f9004b47 SOURCES/json-2.3.0.gem -2d090e7d3cd2a35efeaeacf006100fb83b828686 SOURCES/mustermann-1.0.3.gem +50a4e37904485810cb05e27d75c9783e5a8f3402 SOURCES/mustermann-1.1.1.gem 41a7fe9f8e3e02da5ae76c821b89c5b376a97746 SOURCES/open4-1.3.4-1.gem -d2b649f271580b18d39efffa93f62b55291ef55d SOURCES/pcs-0.10.4.tar.gz -8ac1291ce8f56073b74149ac56acc094337a3298 SOURCES/pcs-web-ui-0.1.2.tar.gz -52599fe9c17bda8cc0cad1acf830a9114b8b6db6 SOURCES/pcs-web-ui-node-modules-0.1.2.tar.xz +73fafb4228326c14a799f0cccbcb734ab7ba2bfa SOURCES/pcs-0.10.6.tar.gz +d67de4d5cefd9ba3cde45c7ec4a5d1e9b1e6032a SOURCES/pcs-web-ui-0.1.4.tar.gz +3e09042e3dc32c992451ba4c0454f2879f0d3f40 SOURCES/pcs-web-ui-node-modules-0.1.3.tar.xz 3176b2f2b332c2b6bf79fe882e83feecf3d3f011 SOURCES/pyagentx-0.4.pcs.2.tar.gz -b15267e1f94e69238a00a6f1bd48fb7683c03a78 SOURCES/rack-2.0.6.gem -c1376e5678322b401d988d261762a78bf2cf3361 SOURCES/rack-protection-2.0.4.gem -4c99cf0a82372a1bc5968c1551d9e606b68b4879 SOURCES/rack-test-1.0.0.gem -1c85f05c874bc8c0bf9c40291ea2d430090cdfd9 SOURCES/sinatra-2.0.4.gem +345b7169d4d2d62176a225510399963bad62b68f SOURCES/rack-2.2.3.gem +1f046e23baca8beece3b38c60382f44aa2b2cb41 SOURCES/rack-protection-2.0.8.1.gem +b80bc5ca38a885e747271675ba91dd3d02136bf1 SOURCES/rack-test-1.1.0.gem +0be571aacb5d6a212a30af3f322a7000d8af1ef9 SOURCES/ruby2_keywords-0.0.2.gem +04cca7a5d9d641fe076e4e24dc5b6ff31922f4c3 SOURCES/sinatra-2.0.8.1.gem 41395e86322ffd31f3a7aef1f697bda3e1e2d6b9 SOURCES/thin-1.7.2.gem -55a75a80e29731d072fe44dfaf865479b65c27fd SOURCES/tilt-2.0.9.gem -126c66189fc5b26a39c9b54eb17254652cca8b27 SOURCES/tornado-6.0.3.tar.gz +d265c822a6b228392d899e9eb5114613d65e6967 SOURCES/tilt-2.0.10.gem +e177f2a092dc5f23b0b3078e40adf52e17a9f8a6 SOURCES/tornado-6.0.4.tar.gz diff --git a/SOURCES/bz1676431-01-Display-status-of-disaster-recovery.patch b/SOURCES/bz1676431-01-Display-status-of-disaster-recovery.patch deleted file mode 100644 index bd37518..0000000 --- a/SOURCES/bz1676431-01-Display-status-of-disaster-recovery.patch +++ /dev/null @@ -1,5055 +0,0 @@ -From 7cf137380bc80653c50747a1d4d70783d593fcb5 Mon Sep 17 00:00:00 2001 -From: Miroslav Lisik -Date: Fri, 29 Nov 2019 12:16:11 +0100 -Subject: [PATCH 1/3] squash bz1676431 Display status of disaster recovery site - -support DR config in node add, node remove, cluster destroy - -dr: add command for setting recovery site - -improve typing - -move tests - -dr: add a command for displaying clusters' status - -dr: add a command for displaying dr config - -dr: add 'destroy' sub-command - -dr: review based fixes - -update capabilities, changelog ---- - CHANGELOG.md | 9 + - pcs/app.py | 2 + - pcs/cli/common/console_report.py | 16 +- - pcs/cli/common/lib_wrapper.py | 13 + - pcs/cli/dr.py | 138 ++++ - pcs/cli/routing/dr.py | 15 + - pcs/cluster.py | 1 + - pcs/common/dr.py | 109 +++ - pcs/common/file_type_codes.py | 27 +- - pcs/common/report_codes.py | 3 + - pcs/lib/commands/cluster.py | 18 +- - pcs/lib/commands/dr.py | 316 ++++++++ - pcs/lib/communication/corosync.py | 28 + - pcs/lib/communication/status.py | 97 +++ - pcs/lib/dr/__init__.py | 0 - pcs/lib/dr/config/__init__.py | 0 - pcs/lib/dr/config/facade.py | 49 ++ - pcs/lib/dr/env.py | 28 + - pcs/lib/env.py | 17 + - pcs/lib/file/instance.py | 21 +- - pcs/lib/file/metadata.py | 8 + - pcs/lib/file/toolbox.py | 80 +- - pcs/lib/node.py | 5 +- - pcs/lib/node_communication_format.py | 16 + - pcs/lib/reports.py | 31 + - pcs/pcs.8 | 18 +- - pcs/pcs_internal.py | 1 + - pcs/settings_default.py | 1 + - pcs/usage.py | 32 +- - .../tier0/cli/common/test_console_report.py | 24 + - pcs_test/tier0/cli/test_dr.py | 293 +++++++ - pcs_test/tier0/common/test_dr.py | 167 ++++ - .../lib/commands/cluster/test_add_nodes.py | 143 +++- - pcs_test/tier0/lib/commands/dr/__init__.py | 0 - .../tier0/lib/commands/dr/test_destroy.py | 342 ++++++++ - .../tier0/lib/commands/dr/test_get_config.py | 134 ++++ - .../lib/commands/dr/test_set_recovery_site.py | 702 ++++++++++++++++ - pcs_test/tier0/lib/commands/dr/test_status.py | 756 ++++++++++++++++++ - .../tier0/lib/communication/test_status.py | 7 + - pcs_test/tier0/lib/dr/__init__.py | 0 - pcs_test/tier0/lib/dr/test_facade.py | 138 ++++ - pcs_test/tier0/lib/test_env.py | 42 +- - .../tools/command_env/config_corosync_conf.py | 9 +- - pcs_test/tools/command_env/config_http.py | 3 + - .../tools/command_env/config_http_corosync.py | 24 + - .../tools/command_env/config_http_files.py | 28 +- - .../tools/command_env/config_http_status.py | 52 ++ - .../mock_get_local_corosync_conf.py | 12 +- - pcsd/capabilities.xml | 12 + - pcsd/pcsd_file.rb | 15 + - pcsd/pcsd_remove_file.rb | 7 + - pcsd/remote.rb | 19 +- - pcsd/settings.rb | 1 + - pcsd/settings.rb.debian | 1 + - pylintrc | 2 +- - 55 files changed, 3964 insertions(+), 68 deletions(-) - create mode 100644 pcs/cli/dr.py - create mode 100644 pcs/cli/routing/dr.py - create mode 100644 pcs/common/dr.py - create mode 100644 pcs/lib/commands/dr.py - create mode 100644 pcs/lib/communication/status.py - create mode 100644 pcs/lib/dr/__init__.py - create mode 100644 pcs/lib/dr/config/__init__.py - create mode 100644 pcs/lib/dr/config/facade.py - create mode 100644 pcs/lib/dr/env.py - create mode 100644 pcs_test/tier0/cli/test_dr.py - create mode 100644 pcs_test/tier0/common/test_dr.py - create mode 100644 pcs_test/tier0/lib/commands/dr/__init__.py - create mode 100644 pcs_test/tier0/lib/commands/dr/test_destroy.py - create mode 100644 pcs_test/tier0/lib/commands/dr/test_get_config.py - create mode 100644 pcs_test/tier0/lib/commands/dr/test_set_recovery_site.py - create mode 100644 pcs_test/tier0/lib/commands/dr/test_status.py - create mode 100644 pcs_test/tier0/lib/communication/test_status.py - create mode 100644 pcs_test/tier0/lib/dr/__init__.py - create mode 100644 pcs_test/tier0/lib/dr/test_facade.py - create mode 100644 pcs_test/tools/command_env/config_http_status.py - -diff --git a/CHANGELOG.md b/CHANGELOG.md -index 69e6da44..889436c3 100644 ---- a/CHANGELOG.md -+++ b/CHANGELOG.md -@@ -1,5 +1,14 @@ - # Change Log - -+## [Unreleased] -+ -+### Added -+- It is possible to configure a disaster-recovery site and display its status -+ ([rhbz#1676431]) -+ -+[rhbz#1676431]: https://bugzilla.redhat.com/show_bug.cgi?id=1676431 -+ -+ - ## [0.10.4] - 2019-11-28 - - ### Added -diff --git a/pcs/app.py b/pcs/app.py -index 8df07c1d..defc4055 100644 ---- a/pcs/app.py -+++ b/pcs/app.py -@@ -25,6 +25,7 @@ from pcs.cli.routing import ( - cluster, - config, - constraint, -+ dr, - host, - node, - pcsd, -@@ -245,6 +246,7 @@ def main(argv=None): - "booth": booth.booth_cmd, - "host": host.host_cmd, - "client": client.client_cmd, -+ "dr": dr.dr_cmd, - "help": lambda lib, argv, modifiers: usage.main(), - } - try: -diff --git a/pcs/cli/common/console_report.py b/pcs/cli/common/console_report.py -index 0a730cfa..d349c823 100644 ---- a/pcs/cli/common/console_report.py -+++ b/pcs/cli/common/console_report.py -@@ -2,6 +2,7 @@ - from collections import defaultdict - from collections.abc import Iterable - from functools import partial -+from typing import Mapping - import sys - - from pcs.common import ( -@@ -46,6 +47,7 @@ _file_role_translation = { - file_type_codes.BOOTH_CONFIG: "Booth configuration", - file_type_codes.BOOTH_KEY: "Booth key", - file_type_codes.COROSYNC_AUTHKEY: "Corosync authkey", -+ file_type_codes.PCS_DR_CONFIG: "disaster-recovery configuration", - file_type_codes.PACEMAKER_AUTHKEY: "Pacemaker authkey", - file_type_codes.PCSD_ENVIRONMENT_CONFIG: "pcsd configuration", - file_type_codes.PCSD_SSL_CERT: "pcsd SSL certificate", -@@ -53,7 +55,7 @@ _file_role_translation = { - file_type_codes.PCS_KNOWN_HOSTS: "known-hosts", - file_type_codes.PCS_SETTINGS_CONF: "pcs configuration", - } --_file_role_to_option_translation = { -+_file_role_to_option_translation: Mapping[str, str] = { - file_type_codes.BOOTH_CONFIG: "--booth-conf", - file_type_codes.BOOTH_KEY: "--booth-key", - file_type_codes.CIB: "-f", -@@ -2284,4 +2286,16 @@ CODE_TO_MESSAGE_BUILDER_MAP = { - "resources\n\n{crm_simulate_plaintext_output}" - ).format(**info) - , -+ -+ codes.DR_CONFIG_ALREADY_EXIST: lambda info: ( -+ "Disaster-recovery already configured" -+ ).format(**info), -+ -+ codes.DR_CONFIG_DOES_NOT_EXIST: lambda info: ( -+ "Disaster-recovery is not configured" -+ ).format(**info), -+ -+ codes.NODE_IN_LOCAL_CLUSTER: lambda info: ( -+ "Node '{node}' is part of local cluster" -+ ).format(**info), - } -diff --git a/pcs/cli/common/lib_wrapper.py b/pcs/cli/common/lib_wrapper.py -index 27b7d8b1..4ef6bf2f 100644 ---- a/pcs/cli/common/lib_wrapper.py -+++ b/pcs/cli/common/lib_wrapper.py -@@ -9,6 +9,7 @@ from pcs.lib.commands import ( - booth, - cib_options, - cluster, -+ dr, - fencing_topology, - node, - pcsd, -@@ -183,6 +184,18 @@ def load_module(env, middleware_factory, name): - } - ) - -+ if name == "dr": -+ return bind_all( -+ env, -+ middleware.build(middleware_factory.corosync_conf_existing), -+ { -+ "get_config": dr.get_config, -+ "destroy": dr.destroy, -+ "set_recovery_site": dr.set_recovery_site, -+ "status_all_sites_plaintext": dr.status_all_sites_plaintext, -+ } -+ ) -+ - if name == "remote_node": - return bind_all( - env, -diff --git a/pcs/cli/dr.py b/pcs/cli/dr.py -new file mode 100644 -index 00000000..c6830aa0 ---- /dev/null -+++ b/pcs/cli/dr.py -@@ -0,0 +1,138 @@ -+from typing import ( -+ Any, -+ List, -+ Sequence, -+) -+ -+from pcs.cli.common.console_report import error -+from pcs.cli.common.errors import CmdLineInputError -+from pcs.cli.common.parse_args import InputModifiers -+from pcs.common import report_codes -+from pcs.common.dr import ( -+ DrConfigDto, -+ DrConfigSiteDto, -+ DrSiteStatusDto, -+) -+from pcs.common.tools import indent -+ -+def config( -+ lib: Any, -+ argv: Sequence[str], -+ modifiers: InputModifiers, -+) -> None: -+ """ -+ Options: None -+ """ -+ modifiers.ensure_only_supported() -+ if argv: -+ raise CmdLineInputError() -+ config_raw = lib.dr.get_config() -+ try: -+ config_dto = DrConfigDto.from_dict(config_raw) -+ except (KeyError, TypeError, ValueError): -+ raise error( -+ "Unable to communicate with pcsd, received response:\n" -+ f"{config_raw}" -+ ) -+ -+ lines = ["Local site:"] -+ lines.extend(indent(_config_site_lines(config_dto.local_site))) -+ for site_dto in config_dto.remote_site_list: -+ lines.append("Remote site:") -+ lines.extend(indent(_config_site_lines(site_dto))) -+ print("\n".join(lines)) -+ -+def _config_site_lines(site_dto: DrConfigSiteDto) -> List[str]: -+ lines = [f"Role: {site_dto.site_role.capitalize()}"] -+ if site_dto.node_list: -+ lines.append("Nodes:") -+ lines.extend(indent(sorted([node.name for node in site_dto.node_list]))) -+ return lines -+ -+ -+def set_recovery_site( -+ lib: Any, -+ argv: Sequence[str], -+ modifiers: InputModifiers, -+) -> None: -+ """ -+ Options: -+ * --request-timeout - HTTP timeout for node authorization check -+ """ -+ modifiers.ensure_only_supported("--request-timeout") -+ if len(argv) != 1: -+ raise CmdLineInputError() -+ lib.dr.set_recovery_site(argv[0]) -+ -+def status( -+ lib: Any, -+ argv: Sequence[str], -+ modifiers: InputModifiers, -+) -> None: -+ """ -+ Options: -+ * --full - show full details, node attributes and failcount -+ * --hide-inactive - hide inactive resources -+ * --request-timeout - HTTP timeout for node authorization check -+ """ -+ modifiers.ensure_only_supported( -+ "--full", "--hide-inactive", "--request-timeout", -+ ) -+ if argv: -+ raise CmdLineInputError() -+ -+ status_list_raw = lib.dr.status_all_sites_plaintext( -+ hide_inactive_resources=modifiers.get("--hide-inactive"), -+ verbose=modifiers.get("--full"), -+ ) -+ try: -+ status_list = [ -+ DrSiteStatusDto.from_dict(status_raw) -+ for status_raw in status_list_raw -+ ] -+ except (KeyError, TypeError, ValueError): -+ raise error( -+ "Unable to communicate with pcsd, received response:\n" -+ f"{status_list_raw}" -+ ) -+ -+ has_errors = False -+ plaintext_parts = [] -+ for site_status in status_list: -+ plaintext_parts.append( -+ "--- {local_remote} cluster - {role} site ---".format( -+ local_remote=("Local" if site_status.local_site else "Remote"), -+ role=site_status.site_role.capitalize() -+ ) -+ ) -+ if site_status.status_successfully_obtained: -+ plaintext_parts.append(site_status.status_plaintext.strip()) -+ plaintext_parts.extend(["", ""]) -+ else: -+ has_errors = True -+ plaintext_parts.extend([ -+ "Error: Unable to get status of the cluster from any node", -+ "" -+ ]) -+ print("\n".join(plaintext_parts).strip()) -+ if has_errors: -+ raise error("Unable to get status of all sites") -+ -+ -+def destroy( -+ lib: Any, -+ argv: Sequence[str], -+ modifiers: InputModifiers, -+) -> None: -+ """ -+ Options: -+ * --skip-offline - skip unreachable nodes (including missing auth token) -+ * --request-timeout - HTTP timeout for node authorization check -+ """ -+ modifiers.ensure_only_supported("--skip-offline", "--request-timeout") -+ if argv: -+ raise CmdLineInputError() -+ force_flags = [] -+ if modifiers.get("--skip-offline"): -+ force_flags.append(report_codes.SKIP_OFFLINE_NODES) -+ lib.dr.destroy(force_flags=force_flags) -diff --git a/pcs/cli/routing/dr.py b/pcs/cli/routing/dr.py -new file mode 100644 -index 00000000..dbf44c1c ---- /dev/null -+++ b/pcs/cli/routing/dr.py -@@ -0,0 +1,15 @@ -+from pcs import usage -+from pcs.cli import dr -+from pcs.cli.common.routing import create_router -+ -+dr_cmd = create_router( -+ { -+ "help": lambda lib, argv, modifiers: usage.dr(argv), -+ "config": dr.config, -+ "destroy": dr.destroy, -+ "set-recovery-site": dr.set_recovery_site, -+ "status": dr.status, -+ }, -+ ["dr"], -+ default_cmd="help", -+) -diff --git a/pcs/cluster.py b/pcs/cluster.py -index 3a931b60..9473675f 100644 ---- a/pcs/cluster.py -+++ b/pcs/cluster.py -@@ -1209,6 +1209,7 @@ def cluster_destroy(lib, argv, modifiers): - settings.corosync_conf_file, - settings.corosync_authkey_file, - settings.pacemaker_authkey_file, -+ settings.pcsd_dr_config_location, - ]) - state_files = [ - "cib-*", -diff --git a/pcs/common/dr.py b/pcs/common/dr.py -new file mode 100644 -index 00000000..1648d93d ---- /dev/null -+++ b/pcs/common/dr.py -@@ -0,0 +1,109 @@ -+from enum import auto -+from typing import ( -+ Any, -+ Iterable, -+ Mapping, -+) -+ -+from pcs.common.interface.dto import DataTransferObject -+from pcs.common.tools import AutoNameEnum -+ -+ -+class DrRole(AutoNameEnum): -+ PRIMARY = auto() -+ RECOVERY = auto() -+ -+ -+class DrConfigNodeDto(DataTransferObject): -+ def __init__(self, name: str): -+ self.name = name -+ -+ def to_dict(self) -> Mapping[str, Any]: -+ return dict(name=self.name) -+ -+ @classmethod -+ def from_dict(cls, payload: Mapping[str, Any]) -> "DrConfigNodeDto": -+ return cls(payload["name"]) -+ -+ -+class DrConfigSiteDto(DataTransferObject): -+ def __init__( -+ self, -+ site_role: DrRole, -+ node_list: Iterable[DrConfigNodeDto] -+ ): -+ self.site_role = site_role -+ self.node_list = node_list -+ -+ def to_dict(self) -> Mapping[str, Any]: -+ return dict( -+ site_role=self.site_role.value, -+ node_list=[node.to_dict() for node in self.node_list] -+ ) -+ -+ @classmethod -+ def from_dict(cls, payload: Mapping[str, Any]) -> "DrConfigSiteDto": -+ return cls( -+ DrRole(payload["site_role"]), -+ [ -+ DrConfigNodeDto.from_dict(payload_node) -+ for payload_node in payload["node_list"] -+ ], -+ ) -+ -+ -+class DrConfigDto(DataTransferObject): -+ def __init__( -+ self, -+ local_site: DrConfigSiteDto, -+ remote_site_list: Iterable[DrConfigSiteDto] -+ ): -+ self.local_site = local_site -+ self.remote_site_list = remote_site_list -+ -+ def to_dict(self) -> Mapping[str, Any]: -+ return dict( -+ local_site=self.local_site.to_dict(), -+ remote_site_list=[site.to_dict() for site in self.remote_site_list], -+ ) -+ -+ @classmethod -+ def from_dict(cls, payload: Mapping[str, Any]) -> "DrConfigDto": -+ return cls( -+ DrConfigSiteDto.from_dict(payload["local_site"]), -+ [ -+ DrConfigSiteDto.from_dict(payload_site) -+ for payload_site in payload["remote_site_list"] -+ ], -+ ) -+ -+ -+class DrSiteStatusDto(DataTransferObject): -+ def __init__( -+ self, -+ local_site: bool, -+ site_role: DrRole, -+ status_plaintext: str, -+ status_successfully_obtained: bool -+ ): -+ self.local_site = local_site -+ self.site_role = site_role -+ self.status_plaintext = status_plaintext -+ self.status_successfully_obtained = status_successfully_obtained -+ -+ def to_dict(self) -> Mapping[str, Any]: -+ return dict( -+ local_site=self.local_site, -+ site_role=self.site_role.value, -+ status_plaintext=self.status_plaintext, -+ status_successfully_obtained=self.status_successfully_obtained, -+ ) -+ -+ @classmethod -+ def from_dict(cls, payload: Mapping[str, Any]) -> "DrSiteStatusDto": -+ return cls( -+ payload["local_site"], -+ DrRole(payload["site_role"]), -+ payload["status_plaintext"], -+ payload["status_successfully_obtained"], -+ ) -diff --git a/pcs/common/file_type_codes.py b/pcs/common/file_type_codes.py -index 9c801180..967aa76b 100644 ---- a/pcs/common/file_type_codes.py -+++ b/pcs/common/file_type_codes.py -@@ -1,11 +1,16 @@ --BOOTH_CONFIG = "BOOTH_CONFIG" --BOOTH_KEY = "BOOTH_KEY" --CIB = "CIB" --COROSYNC_AUTHKEY = "COROSYNC_AUTHKEY" --COROSYNC_CONF = "COROSYNC_CONF" --PACEMAKER_AUTHKEY = "PACEMAKER_AUTHKEY" --PCSD_ENVIRONMENT_CONFIG = "PCSD_ENVIRONMENT_CONFIG" --PCSD_SSL_CERT = "PCSD_SSL_CERT" --PCSD_SSL_KEY = "PCSD_SSL_KEY" --PCS_KNOWN_HOSTS = "PCS_KNOWN_HOSTS" --PCS_SETTINGS_CONF = "PCS_SETTINGS_CONF" -+from typing import NewType -+ -+FileTypeCode = NewType("FileTypeCode", str) -+ -+BOOTH_CONFIG = FileTypeCode("BOOTH_CONFIG") -+BOOTH_KEY = FileTypeCode("BOOTH_KEY") -+CIB = FileTypeCode("CIB") -+COROSYNC_AUTHKEY = FileTypeCode("COROSYNC_AUTHKEY") -+COROSYNC_CONF = FileTypeCode("COROSYNC_CONF") -+PACEMAKER_AUTHKEY = FileTypeCode("PACEMAKER_AUTHKEY") -+PCSD_ENVIRONMENT_CONFIG = FileTypeCode("PCSD_ENVIRONMENT_CONFIG") -+PCSD_SSL_CERT = FileTypeCode("PCSD_SSL_CERT") -+PCSD_SSL_KEY = FileTypeCode("PCSD_SSL_KEY") -+PCS_KNOWN_HOSTS = FileTypeCode("PCS_KNOWN_HOSTS") -+PCS_SETTINGS_CONF = FileTypeCode("PCS_SETTINGS_CONF") -+PCS_DR_CONFIG = FileTypeCode("PCS_DR_CONFIG") -diff --git a/pcs/common/report_codes.py b/pcs/common/report_codes.py -index 4e3433a8..514ac079 100644 ---- a/pcs/common/report_codes.py -+++ b/pcs/common/report_codes.py -@@ -141,6 +141,8 @@ COROSYNC_TRANSPORT_UNSUPPORTED_OPTIONS = "COROSYNC_TRANSPORT_UNSUPPORTED_OPTIONS - CRM_MON_ERROR = "CRM_MON_ERROR" - DEFAULTS_CAN_BE_OVERRIDEN = "DEFAULTS_CAN_BE_OVERRIDEN" - DEPRECATED_OPTION = "DEPRECATED_OPTION" -+DR_CONFIG_ALREADY_EXIST = "DR_CONFIG_ALREADY_EXIST" -+DR_CONFIG_DOES_NOT_EXIST = "DR_CONFIG_DOES_NOT_EXIST" - DUPLICATE_CONSTRAINTS_EXIST = "DUPLICATE_CONSTRAINTS_EXIST" - EMPTY_RESOURCE_SET_LIST = "EMPTY_RESOURCE_SET_LIST" - EMPTY_ID = "EMPTY_ID" -@@ -203,6 +205,7 @@ NONE_HOST_FOUND = "NONE_HOST_FOUND" - NODE_USED_AS_TIE_BREAKER = "NODE_USED_AS_TIE_BREAKER" - NODES_TO_REMOVE_UNREACHABLE = "NODES_TO_REMOVE_UNREACHABLE" - NODE_TO_CLEAR_IS_STILL_IN_CLUSTER = "NODE_TO_CLEAR_IS_STILL_IN_CLUSTER" -+NODE_IN_LOCAL_CLUSTER = "NODE_IN_LOCAL_CLUSTER" - OMITTING_NODE = "OMITTING_NODE" - OBJECT_WITH_ID_IN_UNEXPECTED_CONTEXT = "OBJECT_WITH_ID_IN_UNEXPECTED_CONTEXT" - PACEMAKER_LOCAL_NODE_NAME_NOT_FOUND = "PACEMAKER_LOCAL_NODE_NAME_NOT_FOUND" -diff --git a/pcs/lib/commands/cluster.py b/pcs/lib/commands/cluster.py -index 64015864..f30dcb25 100644 ---- a/pcs/lib/commands/cluster.py -+++ b/pcs/lib/commands/cluster.py -@@ -777,7 +777,7 @@ def add_nodes( - skip_wrong_config=force, - ) - -- # distribute corosync and pacemaker authkeys -+ # distribute corosync and pacemaker authkeys and other config files - files_action = {} - forceable_io_error_creator = reports.get_problem_creator( - report_codes.SKIP_FILE_DISTRIBUTION_ERRORS, force -@@ -814,6 +814,22 @@ def add_nodes( - file_path=settings.pacemaker_authkey_file, - )) - -+ if os.path.isfile(settings.pcsd_dr_config_location): -+ try: -+ files_action.update( -+ node_communication_format.pcs_dr_config_file( -+ open(settings.pcsd_dr_config_location, "rb").read() -+ ) -+ ) -+ except EnvironmentError as e: -+ report_processor.report(forceable_io_error_creator( -+ reports.file_io_error, -+ file_type_codes.PCS_DR_CONFIG, -+ RawFileError.ACTION_READ, -+ format_environment_error(e), -+ file_path=settings.pcsd_dr_config_location, -+ )) -+ - # pcs_settings.conf was previously synced using pcsdcli send_local_configs. - # This has been changed temporarily until new system for distribution and - # syncronization of configs will be introduced. -diff --git a/pcs/lib/commands/dr.py b/pcs/lib/commands/dr.py -new file mode 100644 -index 00000000..41ddb5cb ---- /dev/null -+++ b/pcs/lib/commands/dr.py -@@ -0,0 +1,316 @@ -+from typing import ( -+ Any, -+ Container, -+ Iterable, -+ List, -+ Mapping, -+ Tuple, -+) -+ -+from pcs.common import file_type_codes, report_codes -+from pcs.common.dr import ( -+ DrConfigDto, -+ DrConfigNodeDto, -+ DrConfigSiteDto, -+ DrSiteStatusDto, -+) -+from pcs.common.file import RawFileError -+from pcs.common.node_communicator import RequestTarget -+from pcs.common.reports import SimpleReportProcessor -+ -+from pcs.lib import node_communication_format, reports -+from pcs.lib.communication.corosync import GetCorosyncConf -+from pcs.lib.communication.nodes import ( -+ DistributeFilesWithoutForces, -+ RemoveFilesWithoutForces, -+) -+from pcs.lib.communication.status import GetFullClusterStatusPlaintext -+from pcs.lib.communication.tools import ( -+ run as run_com_cmd, -+ run_and_raise, -+) -+from pcs.lib.corosync.config_facade import ConfigFacade as CorosyncConfigFacade -+from pcs.lib.dr.config.facade import ( -+ DrRole, -+ Facade as DrConfigFacade, -+) -+from pcs.lib.env import LibraryEnvironment -+from pcs.lib.errors import LibraryError, ReportItemList -+from pcs.lib.file.instance import FileInstance -+from pcs.lib.file.raw_file import raw_file_error_report -+from pcs.lib.file.toolbox import for_file_type as get_file_toolbox -+from pcs.lib.interface.config import ParserErrorException -+from pcs.lib.node import get_existing_nodes_names -+ -+ -+def get_config(env: LibraryEnvironment) -> Mapping[str, Any]: -+ """ -+ Return local disaster recovery config -+ -+ env -- LibraryEnvironment -+ """ -+ report_processor = SimpleReportProcessor(env.report_processor) -+ report_list, dr_config = _load_dr_config(env.get_dr_env().config) -+ report_processor.report_list(report_list) -+ if report_processor.has_errors: -+ raise LibraryError() -+ -+ return DrConfigDto( -+ DrConfigSiteDto( -+ dr_config.local_role, -+ [] -+ ), -+ [ -+ DrConfigSiteDto( -+ site.role, -+ [DrConfigNodeDto(name) for name in site.node_name_list] -+ ) -+ for site in dr_config.get_remote_site_list() -+ ] -+ ).to_dict() -+ -+ -+def set_recovery_site(env: LibraryEnvironment, node_name: str) -> None: -+ """ -+ Set up disaster recovery with the local cluster being the primary site -+ -+ env -+ node_name -- a known host from the recovery site -+ """ -+ if env.ghost_file_codes: -+ raise LibraryError( -+ reports.live_environment_required(env.ghost_file_codes) -+ ) -+ report_processor = SimpleReportProcessor(env.report_processor) -+ dr_env = env.get_dr_env() -+ if dr_env.config.raw_file.exists(): -+ report_processor.report(reports.dr_config_already_exist()) -+ target_factory = env.get_node_target_factory() -+ -+ local_nodes, report_list = get_existing_nodes_names( -+ env.get_corosync_conf(), -+ error_on_missing_name=True -+ ) -+ report_processor.report_list(report_list) -+ -+ if node_name in local_nodes: -+ report_processor.report(reports.node_in_local_cluster(node_name)) -+ -+ report_list, local_targets = target_factory.get_target_list_with_reports( -+ local_nodes, allow_skip=False, report_none_host_found=False -+ ) -+ report_processor.report_list(report_list) -+ -+ report_list, remote_targets = ( -+ target_factory.get_target_list_with_reports( -+ [node_name], allow_skip=False, report_none_host_found=False -+ ) -+ ) -+ report_processor.report_list(report_list) -+ -+ if report_processor.has_errors: -+ raise LibraryError() -+ -+ com_cmd = GetCorosyncConf(env.report_processor) -+ com_cmd.set_targets(remote_targets) -+ remote_cluster_nodes, report_list = get_existing_nodes_names( -+ CorosyncConfigFacade.from_string( -+ run_and_raise(env.get_node_communicator(), com_cmd) -+ ), -+ error_on_missing_name=True -+ ) -+ if report_processor.report_list(report_list): -+ raise LibraryError() -+ -+ # ensure we have tokens for all nodes of remote cluster -+ report_list, remote_targets = target_factory.get_target_list_with_reports( -+ remote_cluster_nodes, allow_skip=False, report_none_host_found=False -+ ) -+ if report_processor.report_list(report_list): -+ raise LibraryError() -+ dr_config_exporter = ( -+ get_file_toolbox(file_type_codes.PCS_DR_CONFIG).exporter -+ ) -+ # create dr config for remote cluster -+ remote_dr_cfg = dr_env.create_facade(DrRole.RECOVERY) -+ remote_dr_cfg.add_site(DrRole.PRIMARY, local_nodes) -+ # send config to all node of remote cluster -+ distribute_file_cmd = DistributeFilesWithoutForces( -+ env.report_processor, -+ node_communication_format.pcs_dr_config_file( -+ dr_config_exporter.export(remote_dr_cfg.config) -+ ) -+ ) -+ distribute_file_cmd.set_targets(remote_targets) -+ run_and_raise(env.get_node_communicator(), distribute_file_cmd) -+ # create new dr config, with local cluster as primary site -+ local_dr_cfg = dr_env.create_facade(DrRole.PRIMARY) -+ local_dr_cfg.add_site(DrRole.RECOVERY, remote_cluster_nodes) -+ distribute_file_cmd = DistributeFilesWithoutForces( -+ env.report_processor, -+ node_communication_format.pcs_dr_config_file( -+ dr_config_exporter.export(local_dr_cfg.config) -+ ) -+ ) -+ distribute_file_cmd.set_targets(local_targets) -+ run_and_raise(env.get_node_communicator(), distribute_file_cmd) -+ # Note: No token sync across multiple clusters. Most probably they are in -+ # different subnetworks. -+ -+ -+def status_all_sites_plaintext( -+ env: LibraryEnvironment, -+ hide_inactive_resources: bool = False, -+ verbose: bool = False, -+) -> List[Mapping[str, Any]]: -+ """ -+ Return local site's and all remote sites' status as plaintext -+ -+ env -- LibraryEnvironment -+ hide_inactive_resources -- if True, do not display non-running resources -+ verbose -- if True, display more info -+ """ -+ # The command does not provide an option to skip offline / unreacheable / -+ # misbehaving nodes. -+ # The point of such skipping is to stop a command if it is unable to make -+ # changes on all nodes. The user can then decide to proceed anyway and -+ # make changes on the skipped nodes later manually. -+ # This command only reads from nodes so it automatically asks other nodes -+ # if one is offline / misbehaving. -+ class SiteData(): -+ local: bool -+ role: DrRole -+ target_list: Iterable[RequestTarget] -+ status_loaded: bool -+ status_plaintext: str -+ -+ def __init__(self, local, role, target_list): -+ self.local = local -+ self.role = role -+ self.target_list = target_list -+ self.status_loaded = False -+ self.status_plaintext = "" -+ -+ -+ if env.ghost_file_codes: -+ raise LibraryError( -+ reports.live_environment_required(env.ghost_file_codes) -+ ) -+ -+ report_processor = SimpleReportProcessor(env.report_processor) -+ report_list, dr_config = _load_dr_config(env.get_dr_env().config) -+ report_processor.report_list(report_list) -+ if report_processor.has_errors: -+ raise LibraryError() -+ -+ site_data_list = [] -+ target_factory = env.get_node_target_factory() -+ -+ # get local nodes -+ local_nodes, report_list = get_existing_nodes_names(env.get_corosync_conf()) -+ report_processor.report_list(report_list) -+ report_list, local_targets = target_factory.get_target_list_with_reports( -+ local_nodes, -+ skip_non_existing=True, -+ ) -+ report_processor.report_list(report_list) -+ site_data_list.append(SiteData(True, dr_config.local_role, local_targets)) -+ -+ # get remote sites' nodes -+ for conf_remote_site in dr_config.get_remote_site_list(): -+ report_list, remote_targets = ( -+ target_factory.get_target_list_with_reports( -+ conf_remote_site.node_name_list, -+ skip_non_existing=True, -+ ) -+ ) -+ report_processor.report_list(report_list) -+ site_data_list.append( -+ SiteData(False, conf_remote_site.role, remote_targets) -+ ) -+ if report_processor.has_errors: -+ raise LibraryError() -+ -+ # get all statuses -+ for site_data in site_data_list: -+ com_cmd = GetFullClusterStatusPlaintext( -+ report_processor, -+ hide_inactive_resources=hide_inactive_resources, -+ verbose=verbose, -+ ) -+ com_cmd.set_targets(site_data.target_list) -+ site_data.status_loaded, site_data.status_plaintext = run_com_cmd( -+ env.get_node_communicator(), com_cmd -+ ) -+ -+ return [ -+ DrSiteStatusDto( -+ site_data.local, -+ site_data.role, -+ site_data.status_plaintext, -+ site_data.status_loaded, -+ ).to_dict() -+ for site_data in site_data_list -+ ] -+ -+def _load_dr_config( -+ config_file: FileInstance, -+) -> Tuple[ReportItemList, DrConfigFacade]: -+ if not config_file.raw_file.exists(): -+ return [reports.dr_config_does_not_exist()], DrConfigFacade.empty() -+ try: -+ return [], config_file.read_to_facade() -+ except RawFileError as e: -+ return [raw_file_error_report(e)], DrConfigFacade.empty() -+ except ParserErrorException as e: -+ return ( -+ config_file.parser_exception_to_report_list(e), -+ DrConfigFacade.empty() -+ ) -+ -+ -+def destroy(env: LibraryEnvironment, force_flags: Container[str] = ()) -> None: -+ """ -+ Destroy disaster-recovery configuration on all sites -+ """ -+ if env.ghost_file_codes: -+ raise LibraryError( -+ reports.live_environment_required(env.ghost_file_codes) -+ ) -+ -+ report_processor = SimpleReportProcessor(env.report_processor) -+ skip_offline = report_codes.SKIP_OFFLINE_NODES in force_flags -+ -+ report_list, dr_config = _load_dr_config(env.get_dr_env().config) -+ report_processor.report_list(report_list) -+ -+ if report_processor.has_errors: -+ raise LibraryError() -+ -+ local_nodes, report_list = get_existing_nodes_names(env.get_corosync_conf()) -+ report_processor.report_list(report_list) -+ -+ if report_processor.has_errors: -+ raise LibraryError() -+ -+ remote_nodes: List[str] = [] -+ for conf_remote_site in dr_config.get_remote_site_list(): -+ remote_nodes.extend(conf_remote_site.node_name_list) -+ -+ target_factory = env.get_node_target_factory() -+ report_list, targets = target_factory.get_target_list_with_reports( -+ remote_nodes + local_nodes, skip_non_existing=skip_offline, -+ ) -+ report_processor.report_list(report_list) -+ if report_processor.has_errors: -+ raise LibraryError() -+ -+ com_cmd = RemoveFilesWithoutForces( -+ env.report_processor, { -+ "pcs disaster-recovery config": { -+ "type": "pcs_disaster_recovery_conf", -+ }, -+ }, -+ ) -+ com_cmd.set_targets(targets) -+ run_and_raise(env.get_node_communicator(), com_cmd) -diff --git a/pcs/lib/communication/corosync.py b/pcs/lib/communication/corosync.py -index 0f3c3787..1a78e0de 100644 ---- a/pcs/lib/communication/corosync.py -+++ b/pcs/lib/communication/corosync.py -@@ -138,3 +138,31 @@ class ReloadCorosyncConf( - def on_complete(self): - if not self.__was_successful and self.__has_failures: - self._report(reports.unable_to_perform_operation_on_any_node()) -+ -+ -+class GetCorosyncConf( -+ AllSameDataMixin, OneByOneStrategyMixin, RunRemotelyBase -+): -+ __was_successful = False -+ __has_failures = False -+ __corosync_conf = None -+ -+ def _get_request_data(self): -+ return RequestData("remote/get_corosync_conf") -+ -+ def _process_response(self, response): -+ report = response_to_report_item( -+ response, severity=ReportItemSeverity.WARNING -+ ) -+ if report is not None: -+ self.__has_failures = True -+ self._report(report) -+ return self._get_next_list() -+ self.__corosync_conf = response.data -+ self.__was_successful = True -+ return [] -+ -+ def on_complete(self): -+ if not self.__was_successful and self.__has_failures: -+ self._report(reports.unable_to_perform_operation_on_any_node()) -+ return self.__corosync_conf -diff --git a/pcs/lib/communication/status.py b/pcs/lib/communication/status.py -new file mode 100644 -index 00000000..3470415a ---- /dev/null -+++ b/pcs/lib/communication/status.py -@@ -0,0 +1,97 @@ -+import json -+from typing import Tuple -+ -+from pcs.common.node_communicator import RequestData -+from pcs.lib import reports -+from pcs.lib.communication.tools import ( -+ AllSameDataMixin, -+ OneByOneStrategyMixin, -+ RunRemotelyBase, -+) -+from pcs.lib.errors import ReportItemSeverity -+from pcs.lib.node_communication import response_to_report_item -+ -+ -+class GetFullClusterStatusPlaintext( -+ AllSameDataMixin, OneByOneStrategyMixin, RunRemotelyBase -+): -+ def __init__( -+ self, report_processor, hide_inactive_resources=False, verbose=False -+ ): -+ super().__init__(report_processor) -+ self._hide_inactive_resources = hide_inactive_resources -+ self._verbose = verbose -+ self._cluster_status = "" -+ self._was_successful = False -+ -+ def _get_request_data(self): -+ return RequestData( -+ "remote/cluster_status_plaintext", -+ [ -+ ( -+ "data_json", -+ json.dumps(dict( -+ hide_inactive_resources=self._hide_inactive_resources, -+ verbose=self._verbose, -+ )) -+ ) -+ ], -+ ) -+ -+ def _process_response(self, response): -+ report = response_to_report_item( -+ response, severity=ReportItemSeverity.WARNING -+ ) -+ if report is not None: -+ self._report(report) -+ return self._get_next_list() -+ -+ node = response.request.target.label -+ try: -+ output = json.loads(response.data) -+ if output["status"] == "success": -+ self._was_successful = True -+ self._cluster_status = output["data"] -+ return [] -+ if output["status_msg"]: -+ self._report( -+ reports.node_communication_command_unsuccessful( -+ node, -+ response.request.action, -+ output["status_msg"] -+ ) -+ ) -+ # TODO Node name should be added to each received report item and -+ # those modified report itemss should be reported. That, however, -+ # requires reports overhaul which would add posibility to add a -+ # node name to any report item. Also, infos and warnings should not -+ # be ignored. -+ if output["report_list"]: -+ for report_data in output["report_list"]: -+ if ( -+ report_data["severity"] == ReportItemSeverity.ERROR -+ and -+ report_data["report_text"] -+ ): -+ self._report( -+ reports.node_communication_command_unsuccessful( -+ node, -+ response.request.action, -+ report_data["report_text"] -+ ) -+ ) -+ except (ValueError, LookupError, TypeError): -+ self._report(reports.invalid_response_format( -+ node, -+ severity=ReportItemSeverity.WARNING, -+ )) -+ -+ return self._get_next_list() -+ -+ def on_complete(self) -> Tuple[bool, str]: -+ # Usually, reports.unable_to_perform_operation_on_any_node is reported -+ # when the operation was unsuccessful and failed on at least one node. -+ # The only use case this communication command is used does not need -+ # that report and on top of that the report causes confusing ouptut for -+ # the user. The report may be added in a future if needed. -+ return self._was_successful, self._cluster_status -diff --git a/pcs/lib/dr/__init__.py b/pcs/lib/dr/__init__.py -new file mode 100644 -index 00000000..e69de29b -diff --git a/pcs/lib/dr/config/__init__.py b/pcs/lib/dr/config/__init__.py -new file mode 100644 -index 00000000..e69de29b -diff --git a/pcs/lib/dr/config/facade.py b/pcs/lib/dr/config/facade.py -new file mode 100644 -index 00000000..f3187ba5 ---- /dev/null -+++ b/pcs/lib/dr/config/facade.py -@@ -0,0 +1,49 @@ -+from typing import ( -+ Iterable, -+ List, -+ NamedTuple, -+) -+ -+from pcs.common.dr import DrRole -+from pcs.lib.interface.config import FacadeInterface -+ -+ -+class DrSite(NamedTuple): -+ role: DrRole -+ node_name_list: List[str] -+ -+ -+class Facade(FacadeInterface): -+ @classmethod -+ def create(cls, local_role: DrRole) -> "Facade": -+ return cls(dict( -+ local=dict( -+ role=local_role.value, -+ ), -+ remote_sites=[], -+ )) -+ -+ @classmethod -+ def empty(cls) -> "Facade": -+ return cls(dict()) -+ -+ @property -+ def local_role(self) -> DrRole: -+ return DrRole(self._config["local"]["role"]) -+ -+ def add_site(self, role: DrRole, node_list: Iterable[str]) -> None: -+ self._config["remote_sites"].append( -+ dict( -+ role=role.value, -+ nodes=[dict(name=node) for node in node_list], -+ ) -+ ) -+ -+ def get_remote_site_list(self) -> List[DrSite]: -+ return [ -+ DrSite( -+ DrRole(conf_site["role"]), -+ [node["name"] for node in conf_site["nodes"]] -+ ) -+ for conf_site in self._config.get("remote_sites", []) -+ ] -diff --git a/pcs/lib/dr/env.py b/pcs/lib/dr/env.py -new file mode 100644 -index 00000000..c73ee622 ---- /dev/null -+++ b/pcs/lib/dr/env.py -@@ -0,0 +1,28 @@ -+from pcs.common import file_type_codes -+ -+from pcs.lib.file.instance import FileInstance -+from pcs.lib.file.toolbox import ( -+ for_file_type as get_file_toolbox, -+ FileToolbox, -+) -+ -+from .config.facade import ( -+ DrRole, -+ Facade, -+) -+ -+class DrEnv: -+ def __init__(self): -+ self._config_file = FileInstance.for_dr_config() -+ -+ @staticmethod -+ def create_facade(role: DrRole) -> Facade: -+ return Facade.create(role) -+ -+ @property -+ def config(self) -> FileInstance: -+ return self._config_file -+ -+ @staticmethod -+ def get_config_toolbox() -> FileToolbox: -+ return get_file_toolbox(file_type_codes.PCS_DR_CONFIG) -diff --git a/pcs/lib/env.py b/pcs/lib/env.py -index 66f7b1a4..0b12103e 100644 ---- a/pcs/lib/env.py -+++ b/pcs/lib/env.py -@@ -3,11 +3,13 @@ from typing import ( - ) - from xml.etree.ElementTree import Element - -+from pcs.common import file_type_codes - from pcs.common.node_communicator import Communicator, NodeCommunicatorFactory - from pcs.common.tools import Version - from pcs.lib import reports - from pcs.lib.booth.env import BoothEnv - from pcs.lib.cib.tools import get_cib_crm_feature_set -+from pcs.lib.dr.env import DrEnv - from pcs.lib.node import get_existing_nodes_names - from pcs.lib.communication import qdevice - from pcs.lib.communication.corosync import ( -@@ -89,6 +91,7 @@ class LibraryEnvironment: - self._request_timeout - ) - self.__loaded_booth_env = None -+ self.__loaded_dr_env = None - - self.__timeout_cache = {} - -@@ -108,6 +111,15 @@ class LibraryEnvironment: - def user_groups(self): - return self._user_groups - -+ @property -+ def ghost_file_codes(self): -+ codes = set() -+ if not self.is_cib_live: -+ codes.add(file_type_codes.CIB) -+ if not self.is_corosync_conf_live: -+ codes.add(file_type_codes.COROSYNC_CONF) -+ return codes -+ - def get_cib(self, minimal_version: Optional[Version] = None) -> Element: - if self.__loaded_cib_diff_source is not None: - raise AssertionError("CIB has already been loaded") -@@ -412,3 +424,8 @@ class LibraryEnvironment: - if self.__loaded_booth_env is None: - self.__loaded_booth_env = BoothEnv(name, self._booth_files_data) - return self.__loaded_booth_env -+ -+ def get_dr_env(self) -> DrEnv: -+ if self.__loaded_dr_env is None: -+ self.__loaded_dr_env = DrEnv() -+ return self.__loaded_dr_env -diff --git a/pcs/lib/file/instance.py b/pcs/lib/file/instance.py -index da6b760c..f0812c2d 100644 ---- a/pcs/lib/file/instance.py -+++ b/pcs/lib/file/instance.py -@@ -51,18 +51,27 @@ class FileInstance(): - """ - Factory for known-hosts file - """ -- file_type_code = file_type_codes.PCS_KNOWN_HOSTS -- return cls( -- raw_file.RealFile(metadata.for_file_type(file_type_code)), -- toolbox.for_file_type(file_type_code) -- ) -+ return cls._for_common(file_type_codes.PCS_KNOWN_HOSTS) - - @classmethod - def for_pacemaker_key(cls): - """ - Factory for pacemaker key file - """ -- file_type_code = file_type_codes.PACEMAKER_AUTHKEY -+ return cls._for_common(file_type_codes.PACEMAKER_AUTHKEY) -+ -+ @classmethod -+ def for_dr_config(cls) -> "FileInstance": -+ """ -+ Factory for disaster-recovery config file -+ """ -+ return cls._for_common(file_type_codes.PCS_DR_CONFIG) -+ -+ @classmethod -+ def _for_common( -+ cls, -+ file_type_code: file_type_codes.FileTypeCode, -+ ) -> "FileInstance": - return cls( - raw_file.RealFile(metadata.for_file_type(file_type_code)), - toolbox.for_file_type(file_type_code) -diff --git a/pcs/lib/file/metadata.py b/pcs/lib/file/metadata.py -index 175e5ac1..72701aed 100644 ---- a/pcs/lib/file/metadata.py -+++ b/pcs/lib/file/metadata.py -@@ -50,6 +50,14 @@ _metadata = { - permissions=0o600, - is_binary=False, - ), -+ code.PCS_DR_CONFIG: lambda: FileMetadata( -+ file_type_code=code.PCS_DR_CONFIG, -+ path=settings.pcsd_dr_config_location, -+ owner_user_name="root", -+ owner_group_name="root", -+ permissions=0o600, -+ is_binary=False, -+ ) - } - - def for_file_type(file_type_code, *args, **kwargs): -diff --git a/pcs/lib/file/toolbox.py b/pcs/lib/file/toolbox.py -index 5d827887..db852617 100644 ---- a/pcs/lib/file/toolbox.py -+++ b/pcs/lib/file/toolbox.py -@@ -1,4 +1,9 @@ --from collections import namedtuple -+from typing import ( -+ Any, -+ Dict, -+ NamedTuple, -+ Type, -+) - import json - - from pcs.common import file_type_codes as code -@@ -8,6 +13,8 @@ from pcs.lib.booth.config_parser import ( - Exporter as BoothConfigExporter, - Parser as BoothConfigParser, - ) -+from pcs.lib.dr.config.facade import Facade as DrConfigFacade -+from pcs.lib.errors import ReportItemList - from pcs.lib.interface.config import ( - ExporterInterface, - FacadeInterface, -@@ -16,27 +23,23 @@ from pcs.lib.interface.config import ( - ) - - --FileToolbox = namedtuple( -- "FileToolbox", -- [ -- # File type code the toolbox belongs to -- "file_type_code", -- # Provides an easy access for reading and modifying data -- "facade", -- # Turns raw data into a structure which the facade is able to process -- "parser", -- # Turns a structure produced by the parser and the facade to raw data -- "exporter", -- # Checks that the structure is valid -- "validator", -- # Provides means for file syncing based on the file's version -- "version_controller", -- ] --) -+class FileToolbox(NamedTuple): -+ # File type code the toolbox belongs to -+ file_type_code: code.FileTypeCode -+ # Provides an easy access for reading and modifying data -+ facade: Type[FacadeInterface] -+ # Turns raw data into a structure which the facade is able to process -+ parser: Type[ParserInterface] -+ # Turns a structure produced by the parser and the facade to raw data -+ exporter: Type[ExporterInterface] -+ # Checks that the structure is valid -+ validator: None # TBI -+ # Provides means for file syncing based on the file's version -+ version_controller: None # TBI - - - class JsonParserException(ParserErrorException): -- def __init__(self, json_exception): -+ def __init__(self, json_exception: json.JSONDecodeError): - super().__init__() - self.json_exception = json_exception - -@@ -45,7 +48,7 @@ class JsonParser(ParserInterface): - Adapts standard json parser to our interfaces - """ - @staticmethod -- def parse(raw_file_data): -+ def parse(raw_file_data: bytes) -> Dict[str, Any]: - try: - # json.loads handles bytes, it expects utf-8, 16 or 32 encoding - return json.loads(raw_file_data) -@@ -54,8 +57,12 @@ class JsonParser(ParserInterface): - - @staticmethod - def exception_to_report_list( -- exception, file_type_code, file_path, force_code, is_forced_or_warning -- ): -+ exception: JsonParserException, -+ file_type_code: code.FileTypeCode, -+ file_path: str, -+ force_code: str, # TODO: fix -+ is_forced_or_warning: bool -+ ) -> ReportItemList: - report_creator = reports.get_problem_creator( - force_code=force_code, is_forced=is_forced_or_warning - ) -@@ -80,7 +87,7 @@ class JsonExporter(ExporterInterface): - Adapts standard json exporter to our interfaces - """ - @staticmethod -- def export(config_structure): -+ def export(config_structure: Dict[str, Any])-> bytes: - return json.dumps( - config_structure, indent=4, sort_keys=True, - ).encode("utf-8") -@@ -88,23 +95,27 @@ class JsonExporter(ExporterInterface): - - class NoopParser(ParserInterface): - @staticmethod -- def parse(raw_file_data): -+ def parse(raw_file_data: bytes) -> bytes: - return raw_file_data - - @staticmethod - def exception_to_report_list( -- exception, file_type_code, file_path, force_code, is_forced_or_warning -- ): -+ exception: ParserErrorException, -+ file_type_code: code.FileTypeCode, -+ file_path: str, -+ force_code: str, # TODO: fix -+ is_forced_or_warning: bool -+ ) -> ReportItemList: - return [] - - class NoopExporter(ExporterInterface): - @staticmethod -- def export(config_structure): -+ def export(config_structure: bytes) -> bytes: - return config_structure - - class NoopFacade(FacadeInterface): - @classmethod -- def create(cls): -+ def create(cls) -> "NoopFacade": - return cls(bytes()) - - -@@ -135,7 +146,16 @@ _toolboxes = { - ), - code.PCS_KNOWN_HOSTS: FileToolbox( - file_type_code=code.PCS_KNOWN_HOSTS, -- facade=None, # TODO needed for 'auth' and 'deauth' commands -+ # TODO needed for 'auth' and 'deauth' commands -+ facade=None, # type: ignore -+ parser=JsonParser, -+ exporter=JsonExporter, -+ validator=None, # TODO needed for files syncing -+ version_controller=None, # TODO needed for files syncing -+ ), -+ code.PCS_DR_CONFIG: FileToolbox( -+ file_type_code=code.PCS_DR_CONFIG, -+ facade=DrConfigFacade, - parser=JsonParser, - exporter=JsonExporter, - validator=None, # TODO needed for files syncing -@@ -143,5 +163,5 @@ _toolboxes = { - ), - } - --def for_file_type(file_type_code): -+def for_file_type(file_type_code: code.FileTypeCode) -> FileToolbox: - return _toolboxes[file_type_code] -diff --git a/pcs/lib/node.py b/pcs/lib/node.py -index 1930ffa8..09543c8e 100644 ---- a/pcs/lib/node.py -+++ b/pcs/lib/node.py -@@ -1,5 +1,6 @@ - from typing import ( - Iterable, -+ List, - Optional, - Tuple, - ) -@@ -18,7 +19,7 @@ def get_existing_nodes_names( - corosync_conf: Optional[CorosyncConfigFacade] = None, - cib: Optional[Element] = None, - error_on_missing_name: bool = False --) -> Tuple[Iterable[str], ReportItemList]: -+) -> Tuple[List[str], ReportItemList]: - return __get_nodes_names( - *__get_nodes(corosync_conf, cib), - error_on_missing_name -@@ -56,7 +57,7 @@ def __get_nodes_names( - corosync_nodes: Iterable[CorosyncNode], - remote_and_guest_nodes: Iterable[PacemakerNode], - error_on_missing_name: bool = False --) -> Tuple[Iterable[str], ReportItemList]: -+) -> Tuple[List[str], ReportItemList]: - report_list = [] - corosync_names = [] - name_missing_in_corosync = False -diff --git a/pcs/lib/node_communication_format.py b/pcs/lib/node_communication_format.py -index 6134c66d..1cef35b4 100644 ---- a/pcs/lib/node_communication_format.py -+++ b/pcs/lib/node_communication_format.py -@@ -1,5 +1,9 @@ - import base64 - from collections import namedtuple -+from typing import ( -+ Any, -+ Dict, -+) - - from pcs.lib import reports - from pcs.lib.errors import LibraryError -@@ -55,6 +59,18 @@ def corosync_conf_file(corosync_conf_content): - "corosync.conf": corosync_conf_format(corosync_conf_content) - } - -+def pcs_dr_config_format(dr_conf_content: bytes) -> Dict[str, Any]: -+ return { -+ "type": "pcs_disaster_recovery_conf", -+ "data": base64.b64encode(dr_conf_content).decode("utf-8"), -+ "rewrite_existing": True, -+ } -+ -+def pcs_dr_config_file(dr_conf_content: bytes) -> Dict[str, Any]: -+ return { -+ "disaster-recovery config": pcs_dr_config_format(dr_conf_content) -+ } -+ - def pcs_settings_conf_format(content): - return { - "data": content, -diff --git a/pcs/lib/reports.py b/pcs/lib/reports.py -index e83737b0..1f081007 100644 ---- a/pcs/lib/reports.py -+++ b/pcs/lib/reports.py -@@ -4221,3 +4221,34 @@ def resource_disable_affects_other_resources( - "crm_simulate_plaintext_output": crm_simulate_plaintext_output, - } - ) -+ -+ -+def dr_config_already_exist(): -+ """ -+ Disaster recovery config exists when the opposite was expected -+ """ -+ return ReportItem.error( -+ report_codes.DR_CONFIG_ALREADY_EXIST, -+ ) -+ -+def dr_config_does_not_exist(): -+ """ -+ Disaster recovery config does not exist when the opposite was expected -+ """ -+ return ReportItem.error( -+ report_codes.DR_CONFIG_DOES_NOT_EXIST, -+ ) -+ -+def node_in_local_cluster(node): -+ """ -+ Node is part of local cluster and it cannot be used for example to set up -+ disaster-recovery site -+ -+ node -- node which is part of local cluster -+ """ -+ return ReportItem.error( -+ report_codes.NODE_IN_LOCAL_CLUSTER, -+ info=dict( -+ node=node, -+ ), -+ ) -diff --git a/pcs/pcs.8 b/pcs/pcs.8 -index 5765c6b5..651fda83 100644 ---- a/pcs/pcs.8 -+++ b/pcs/pcs.8 -@@ -75,6 +75,9 @@ alert - .TP - client - Manage pcsd client configuration. -+.TP -+dr -+ Manage disaster recovery configuration. - .SS "resource" - .TP - [status [\fB\-\-hide\-inactive\fR]] -@@ -887,7 +890,7 @@ stop - Stop booth arbitrator service. - .SS "status" - .TP --[status] [\fB\-\-full\fR | \fB\-\-hide\-inactive\fR] -+[status] [\fB\-\-full\fR] [\fB\-\-hide\-inactive\fR] - View all information about the cluster and resources (\fB\-\-full\fR provides more details, \fB\-\-hide\-inactive\fR hides inactive resources). - .TP - resources [\fB\-\-hide\-inactive\fR] -@@ -1015,6 +1018,19 @@ Remove specified recipients. - .TP - local-auth [] [\-u ] [\-p ] - Authenticate current user to local pcsd. This is required to run some pcs commands which may require permissions of root user such as 'pcs cluster start'. -+.SS "dr" -+.TP -+config -+Display disaster-recovery configuration from the local node. -+.TP -+status [\fB\-\-full\fR] [\fB\-\-hide\-inactive\fR] -+Display status of the local and the remote site cluster (\fB\-\-full\fR provides more details, \fB\-\-hide\-inactive\fR hides inactive resources). -+.TP -+set\-recovery\-site -+Set up disaster\-recovery with the local cluster being the primary site. The recovery site is defined by a name of one of its nodes. -+.TP -+destroy -+Permanently destroy disaster-recovery configuration on all sites. - .SH EXAMPLES - .TP - Show all resources -diff --git a/pcs/pcs_internal.py b/pcs/pcs_internal.py -index fecdc8d5..d956d71e 100644 ---- a/pcs/pcs_internal.py -+++ b/pcs/pcs_internal.py -@@ -22,6 +22,7 @@ SUPPORTED_COMMANDS = { - "cluster.setup", - "cluster.add_nodes", - "cluster.remove_nodes", -+ "status.full_cluster_status_plaintext", - } - - -diff --git a/pcs/settings_default.py b/pcs/settings_default.py -index ab61b20b..6d8f33ac 100644 ---- a/pcs/settings_default.py -+++ b/pcs/settings_default.py -@@ -50,6 +50,7 @@ pcsd_users_conf_location = os.path.join(pcsd_var_location, "pcs_users.conf") - pcsd_settings_conf_location = os.path.join( - pcsd_var_location, "pcs_settings.conf" - ) -+pcsd_dr_config_location = os.path.join(pcsd_var_location, "disaster-recovery") - pcsd_exec_location = "/usr/lib/pcsd/" - pcsd_log_location = "/var/log/pcsd/pcsd.log" - pcsd_default_port = 2224 -diff --git a/pcs/usage.py b/pcs/usage.py -index 0b16289e..e4f5af32 100644 ---- a/pcs/usage.py -+++ b/pcs/usage.py -@@ -22,6 +22,7 @@ def full_usage(): - out += strip_extras(host([], False)) - out += strip_extras(alert([], False)) - out += strip_extras(client([], False)) -+ out += strip_extras(dr([], False)) - print(out.strip()) - print("Examples:\n" + examples.replace(r" \ ", "")) - -@@ -124,6 +125,7 @@ def generate_completion_tree_from_usage(): - tree["alert"] = generate_tree(alert([], False)) - tree["booth"] = generate_tree(booth([], False)) - tree["client"] = generate_tree(client([], False)) -+ tree["dr"] = generate_tree(dr([], False)) - return tree - - def generate_tree(usage_txt): -@@ -194,6 +196,7 @@ Commands: - node Manage cluster nodes. - alert Manage pacemaker alerts. - client Manage pcsd client configuration. -+ dr Manage disaster recovery configuration. - """ - # Advanced usage to possibly add later - # --corosync_conf= Specify alternative corosync.conf file -@@ -1517,7 +1520,7 @@ def status(args=(), pout=True): - Usage: pcs status [commands]... - View current cluster and resource status - Commands: -- [status] [--full | --hide-inactive] -+ [status] [--full] [--hide-inactive] - View all information about the cluster and resources (--full provides - more details, --hide-inactive hides inactive resources). - -@@ -2019,6 +2022,32 @@ Commands: - return output - - -+def dr(args=(), pout=True): -+ output = """ -+Usage: pcs dr -+Manage disaster recovery configuration. -+ -+Commands: -+ config -+ Display disaster-recovery configuration from the local node. -+ -+ status [--full] [--hide-inactive] -+ Display status of the local and the remote site cluster (--full -+ provides more details, --hide-inactive hides inactive resources). -+ -+ set-recovery-site -+ Set up disaster-recovery with the local cluster being the primary site. -+ The recovery site is defined by a name of one of its nodes. -+ -+ destroy -+ Permanently destroy disaster-recovery configuration on all sites. -+""" -+ if pout: -+ print(sub_usage(args, output)) -+ return None -+ return output -+ -+ - def show(main_usage_name, rest_usage_names): - usage_map = { - "acl": acl, -@@ -2028,6 +2057,7 @@ def show(main_usage_name, rest_usage_names): - "cluster": cluster, - "config": config, - "constraint": constraint, -+ "dr": dr, - "host": host, - "node": node, - "pcsd": pcsd, -diff --git a/pcs_test/tier0/cli/common/test_console_report.py b/pcs_test/tier0/cli/common/test_console_report.py -index 2deb896d..0d0c2457 100644 ---- a/pcs_test/tier0/cli/common/test_console_report.py -+++ b/pcs_test/tier0/cli/common/test_console_report.py -@@ -4489,3 +4489,27 @@ class ResourceDisableAffectsOtherResources(NameBuildTest): - "crm_simulate output", - ) - ) -+ -+ -+class DrConfigAlreadyExist(NameBuildTest): -+ def test_success(self): -+ self.assert_message_from_report( -+ "Disaster-recovery already configured", -+ reports.dr_config_already_exist() -+ ) -+ -+ -+class DrConfigDoesNotExist(NameBuildTest): -+ def test_success(self): -+ self.assert_message_from_report( -+ "Disaster-recovery is not configured", -+ reports.dr_config_does_not_exist() -+ ) -+ -+ -+class NodeInLocalCluster(NameBuildTest): -+ def test_success(self): -+ self.assert_message_from_report( -+ "Node 'node-name' is part of local cluster", -+ reports.node_in_local_cluster("node-name") -+ ) -diff --git a/pcs_test/tier0/cli/test_dr.py b/pcs_test/tier0/cli/test_dr.py -new file mode 100644 -index 00000000..4422cdc4 ---- /dev/null -+++ b/pcs_test/tier0/cli/test_dr.py -@@ -0,0 +1,293 @@ -+from textwrap import dedent -+from unittest import mock, TestCase -+ -+from pcs_test.tools.misc import dict_to_modifiers -+ -+from pcs.common import report_codes -+ -+from pcs.cli import dr -+from pcs.cli.common.errors import CmdLineInputError -+ -+ -+@mock.patch("pcs.cli.dr.print") -+class Config(TestCase): -+ def setUp(self): -+ self.lib = mock.Mock(spec_set=["dr"]) -+ self.lib.dr = mock.Mock(spec_set=["get_config"]) -+ -+ def _call_cmd(self, argv=None): -+ dr.config(self.lib, argv or [], dict_to_modifiers({})) -+ -+ def test_argv(self, mock_print): -+ with self.assertRaises(CmdLineInputError) as cm: -+ self._call_cmd(["x"]) -+ self.assertIsNone(cm.exception.message) -+ mock_print.assert_not_called() -+ -+ def test_success(self, mock_print): -+ self.lib.dr.get_config.return_value = { -+ "local_site": { -+ "node_list": [], -+ "site_role": "RECOVERY", -+ }, -+ "remote_site_list": [ -+ { -+ "node_list": [ -+ {"name": "nodeA2"}, -+ {"name": "nodeA1"}, -+ ], -+ "site_role": "PRIMARY", -+ }, -+ { -+ "node_list": [ -+ {"name": "nodeB1"}, -+ ], -+ "site_role": "RECOVERY", -+ } -+ ], -+ } -+ self._call_cmd([]) -+ self.lib.dr.get_config.assert_called_once_with() -+ mock_print.assert_called_once_with(dedent("""\ -+ Local site: -+ Role: Recovery -+ Remote site: -+ Role: Primary -+ Nodes: -+ nodeA1 -+ nodeA2 -+ Remote site: -+ Role: Recovery -+ Nodes: -+ nodeB1""")) -+ -+ @mock.patch("pcs.cli.common.console_report.sys.stderr.write") -+ def test_invalid_response(self, mock_stderr, mock_print): -+ self.lib.dr.get_config.return_value = [ -+ "wrong response", -+ {"x": "y"}, -+ ] -+ with self.assertRaises(SystemExit) as cm: -+ self._call_cmd([]) -+ self.assertEqual(cm.exception.code, 1) -+ self.lib.dr.get_config.assert_called_once_with() -+ mock_print.assert_not_called() -+ mock_stderr.assert_called_once_with( -+ "Error: Unable to communicate with pcsd, received response:\n" -+ "['wrong response', {'x': 'y'}]\n" -+ ) -+ -+ -+class SetRecoverySite(TestCase): -+ def setUp(self): -+ self.lib = mock.Mock(spec_set=["dr"]) -+ self.dr = mock.Mock(spec_set=["set_recovery_site"]) -+ self.lib.dr = self.dr -+ -+ def call_cmd(self, argv): -+ dr.set_recovery_site(self.lib, argv, dict_to_modifiers({})) -+ -+ def test_no_node(self): -+ with self.assertRaises(CmdLineInputError) as cm: -+ self.call_cmd([]) -+ self.assertIsNone(cm.exception.message) -+ -+ def test_multiple_nodes(self): -+ with self.assertRaises(CmdLineInputError) as cm: -+ self.call_cmd(["node1", "node2"]) -+ self.assertIsNone(cm.exception.message) -+ -+ def test_success(self): -+ node = "node" -+ self.call_cmd([node]) -+ self.dr.set_recovery_site.assert_called_once_with(node) -+ -+ -+@mock.patch("pcs.cli.dr.print") -+class Status(TestCase): -+ def setUp(self): -+ self.lib = mock.Mock(spec_set=["dr"]) -+ self.lib.dr = mock.Mock(spec_set=["status_all_sites_plaintext"]) -+ -+ def _call_cmd(self, argv, modifiers=None): -+ dr.status(self.lib, argv, dict_to_modifiers(modifiers or {})) -+ -+ def _fixture_response(self, local_success=True, remote_success=True): -+ self.lib.dr.status_all_sites_plaintext.return_value = [ -+ { -+ "local_site": True, -+ "site_role": "PRIMARY", -+ "status_plaintext": ( -+ "local cluster\nstatus" if local_success -+ else "this should never be displayed" -+ ), -+ "status_successfully_obtained": local_success, -+ }, -+ { -+ "local_site": False, -+ "site_role": "RECOVERY", -+ "status_plaintext": ( -+ "remote cluster\nstatus" if remote_success -+ else "this should never be displayed" -+ ), -+ "status_successfully_obtained": remote_success, -+ }, -+ ] -+ -+ @staticmethod -+ def _fixture_print(): -+ return dedent("""\ -+ --- Local cluster - Primary site --- -+ local cluster -+ status -+ -+ -+ --- Remote cluster - Recovery site --- -+ remote cluster -+ status""" -+ ) -+ -+ def test_argv(self, mock_print): -+ with self.assertRaises(CmdLineInputError) as cm: -+ self._call_cmd(["x"]) -+ self.assertIsNone(cm.exception.message) -+ mock_print.assert_not_called() -+ -+ def test_success(self, mock_print): -+ self._fixture_response() -+ self._call_cmd([]) -+ self.lib.dr.status_all_sites_plaintext.assert_called_once_with( -+ hide_inactive_resources=False, verbose=False -+ ) -+ mock_print.assert_called_once_with(self._fixture_print()) -+ -+ def test_success_full(self, mock_print): -+ self._fixture_response() -+ self._call_cmd([], {"full": True}) -+ self.lib.dr.status_all_sites_plaintext.assert_called_once_with( -+ hide_inactive_resources=False, verbose=True -+ ) -+ mock_print.assert_called_once_with(self._fixture_print()) -+ -+ def test_success_hide_inactive(self, mock_print): -+ self._fixture_response() -+ self._call_cmd([], {"hide-inactive": True}) -+ self.lib.dr.status_all_sites_plaintext.assert_called_once_with( -+ hide_inactive_resources=True, verbose=False -+ ) -+ mock_print.assert_called_once_with(self._fixture_print()) -+ -+ def test_success_all_flags(self, mock_print): -+ self._fixture_response() -+ self._call_cmd([], {"full": True, "hide-inactive": True}) -+ self.lib.dr.status_all_sites_plaintext.assert_called_once_with( -+ hide_inactive_resources=True, verbose=True -+ ) -+ mock_print.assert_called_once_with(self._fixture_print()) -+ -+ @mock.patch("pcs.cli.common.console_report.sys.stderr.write") -+ def test_error_local(self, mock_stderr, mock_print): -+ self._fixture_response(local_success=False) -+ with self.assertRaises(SystemExit) as cm: -+ self._call_cmd([]) -+ self.assertEqual(cm.exception.code, 1) -+ self.lib.dr.status_all_sites_plaintext.assert_called_once_with( -+ hide_inactive_resources=False, verbose=False -+ ) -+ mock_print.assert_called_once_with(dedent("""\ -+ --- Local cluster - Primary site --- -+ Error: Unable to get status of the cluster from any node -+ -+ --- Remote cluster - Recovery site --- -+ remote cluster -+ status""" -+ )) -+ mock_stderr.assert_called_once_with( -+ "Error: Unable to get status of all sites\n" -+ ) -+ -+ @mock.patch("pcs.cli.common.console_report.sys.stderr.write") -+ def test_error_remote(self, mock_stderr, mock_print): -+ self._fixture_response(remote_success=False) -+ with self.assertRaises(SystemExit) as cm: -+ self._call_cmd([]) -+ self.assertEqual(cm.exception.code, 1) -+ self.lib.dr.status_all_sites_plaintext.assert_called_once_with( -+ hide_inactive_resources=False, verbose=False -+ ) -+ mock_print.assert_called_once_with(dedent("""\ -+ --- Local cluster - Primary site --- -+ local cluster -+ status -+ -+ -+ --- Remote cluster - Recovery site --- -+ Error: Unable to get status of the cluster from any node""" -+ )) -+ mock_stderr.assert_called_once_with( -+ "Error: Unable to get status of all sites\n" -+ ) -+ -+ @mock.patch("pcs.cli.common.console_report.sys.stderr.write") -+ def test_error_both(self, mock_stderr, mock_print): -+ self._fixture_response(local_success=False, remote_success=False) -+ with self.assertRaises(SystemExit) as cm: -+ self._call_cmd([]) -+ self.assertEqual(cm.exception.code, 1) -+ self.lib.dr.status_all_sites_plaintext.assert_called_once_with( -+ hide_inactive_resources=False, verbose=False -+ ) -+ mock_print.assert_called_once_with(dedent("""\ -+ --- Local cluster - Primary site --- -+ Error: Unable to get status of the cluster from any node -+ -+ --- Remote cluster - Recovery site --- -+ Error: Unable to get status of the cluster from any node""" -+ )) -+ mock_stderr.assert_called_once_with( -+ "Error: Unable to get status of all sites\n" -+ ) -+ -+ @mock.patch("pcs.cli.common.console_report.sys.stderr.write") -+ def test_invalid_response(self, mock_stderr, mock_print): -+ self.lib.dr.status_all_sites_plaintext.return_value = [ -+ "wrong response", -+ {"x": "y"}, -+ ] -+ with self.assertRaises(SystemExit) as cm: -+ self._call_cmd([]) -+ self.assertEqual(cm.exception.code, 1) -+ self.lib.dr.status_all_sites_plaintext.assert_called_once_with( -+ hide_inactive_resources=False, verbose=False -+ ) -+ mock_print.assert_not_called() -+ mock_stderr.assert_called_once_with( -+ "Error: Unable to communicate with pcsd, received response:\n" -+ "['wrong response', {'x': 'y'}]\n" -+ ) -+ -+ -+class Destroy(TestCase): -+ def setUp(self): -+ self.lib = mock.Mock(spec_set=["dr"]) -+ self.dr = mock.Mock(spec_set=["destroy"]) -+ self.lib.dr = self.dr -+ -+ def call_cmd(self, argv, modifiers=None): -+ modifiers = modifiers or {} -+ dr.destroy(self.lib, argv, dict_to_modifiers(modifiers)) -+ -+ def test_some_args(self): -+ with self.assertRaises(CmdLineInputError) as cm: -+ self.call_cmd(["arg"]) -+ self.assertIsNone(cm.exception.message) -+ -+ def test_success(self): -+ self.call_cmd([]) -+ self.dr.destroy.assert_called_once_with(force_flags=[]) -+ -+ def test_skip_offline(self): -+ self.call_cmd([], modifiers={"skip-offline": True}) -+ self.dr.destroy.assert_called_once_with( -+ force_flags=[report_codes.SKIP_OFFLINE_NODES] -+ ) -diff --git a/pcs_test/tier0/common/test_dr.py b/pcs_test/tier0/common/test_dr.py -new file mode 100644 -index 00000000..2ef12855 ---- /dev/null -+++ b/pcs_test/tier0/common/test_dr.py -@@ -0,0 +1,167 @@ -+from unittest import TestCase -+ -+from pcs.common import dr -+ -+ -+class DrConfigNodeDto(TestCase): -+ def setUp(self): -+ self.name = "node-name" -+ -+ def _fixture_dto(self): -+ return dr.DrConfigNodeDto(self.name) -+ -+ def _fixture_dict(self): -+ return dict(name=self.name) -+ -+ def test_to_dict(self): -+ self.assertEqual( -+ self._fixture_dict(), -+ self._fixture_dto().to_dict() -+ ) -+ -+ def test_from_dict(self): -+ dto = dr.DrConfigNodeDto.from_dict(self._fixture_dict()) -+ self.assertEqual(dto.name, self.name) -+ -+ -+class DrConfigSiteDto(TestCase): -+ def setUp(self): -+ self.role = dr.DrRole.PRIMARY -+ self.node_name_list = ["node1", "node2"] -+ -+ def _fixture_dto(self): -+ return dr.DrConfigSiteDto( -+ self.role, -+ [dr.DrConfigNodeDto(name) for name in self.node_name_list] -+ ) -+ -+ def _fixture_dict(self): -+ return dict( -+ site_role=self.role, -+ node_list=[dict(name=name) for name in self.node_name_list] -+ ) -+ -+ def test_to_dict(self): -+ self.assertEqual( -+ self._fixture_dict(), -+ self._fixture_dto().to_dict() -+ ) -+ -+ def test_from_dict(self): -+ dto = dr.DrConfigSiteDto.from_dict(self._fixture_dict()) -+ self.assertEqual(dto.site_role, self.role) -+ self.assertEqual(len(dto.node_list), len(self.node_name_list)) -+ for i, dto_node in enumerate(dto.node_list): -+ self.assertEqual( -+ dto_node.name, -+ self.node_name_list[i], -+ f"index: {i}" -+ ) -+ -+ -+class DrConfig(TestCase): -+ @staticmethod -+ def _fixture_site_dto(role, node_name_list): -+ return dr.DrConfigSiteDto( -+ role, -+ [dr.DrConfigNodeDto(name) for name in node_name_list] -+ ) -+ -+ @staticmethod -+ def _fixture_dict(): -+ return { -+ "local_site": { -+ "node_list": [], -+ "site_role": "RECOVERY", -+ }, -+ "remote_site_list": [ -+ { -+ "node_list": [ -+ {"name": "nodeA1"}, -+ {"name": "nodeA2"}, -+ ], -+ "site_role": "PRIMARY", -+ }, -+ { -+ "node_list": [ -+ {"name": "nodeB1"}, -+ ], -+ "site_role": "RECOVERY", -+ } -+ ], -+ } -+ -+ def test_to_dict(self): -+ self.assertEqual( -+ self._fixture_dict(), -+ dr.DrConfigDto( -+ self._fixture_site_dto(dr.DrRole.RECOVERY, []), -+ [ -+ self._fixture_site_dto( -+ dr.DrRole.PRIMARY, -+ ["nodeA1", "nodeA2"] -+ ), -+ self._fixture_site_dto( -+ dr.DrRole.RECOVERY, -+ ["nodeB1"] -+ ), -+ ] -+ ).to_dict() -+ ) -+ -+ def test_from_dict(self): -+ dto = dr.DrConfigDto.from_dict(self._fixture_dict()) -+ self.assertEqual( -+ dto.local_site.to_dict(), -+ self._fixture_site_dto(dr.DrRole.RECOVERY, []).to_dict() -+ ) -+ self.assertEqual(len(dto.remote_site_list), 2) -+ self.assertEqual( -+ dto.remote_site_list[0].to_dict(), -+ self._fixture_site_dto( -+ dr.DrRole.PRIMARY, ["nodeA1", "nodeA2"] -+ ).to_dict() -+ ) -+ self.assertEqual( -+ dto.remote_site_list[1].to_dict(), -+ self._fixture_site_dto(dr.DrRole.RECOVERY, ["nodeB1"]).to_dict() -+ ) -+ -+class DrSiteStatusDto(TestCase): -+ def setUp(self): -+ self.local = False -+ self.role = dr.DrRole.PRIMARY -+ self.status_plaintext = "plaintext status" -+ self.status_successfully_obtained = True -+ -+ def dto_fixture(self): -+ return dr.DrSiteStatusDto( -+ self.local, -+ self.role, -+ self.status_plaintext, -+ self.status_successfully_obtained, -+ ) -+ -+ def dict_fixture(self): -+ return dict( -+ local_site=self.local, -+ site_role=self.role.value, -+ status_plaintext=self.status_plaintext, -+ status_successfully_obtained=self.status_successfully_obtained, -+ ) -+ -+ def test_to_dict(self): -+ self.assertEqual( -+ self.dict_fixture(), -+ self.dto_fixture().to_dict() -+ ) -+ -+ def test_from_dict(self): -+ dto = dr.DrSiteStatusDto.from_dict(self.dict_fixture()) -+ self.assertEqual(dto.local_site, self.local) -+ self.assertEqual(dto.site_role, self.role) -+ self.assertEqual(dto.status_plaintext, self.status_plaintext) -+ self.assertEqual( -+ dto.status_successfully_obtained, -+ self.status_successfully_obtained -+ ) -diff --git a/pcs_test/tier0/lib/commands/cluster/test_add_nodes.py b/pcs_test/tier0/lib/commands/cluster/test_add_nodes.py -index a570d67e..295c1e6a 100644 ---- a/pcs_test/tier0/lib/commands/cluster/test_add_nodes.py -+++ b/pcs_test/tier0/lib/commands/cluster/test_add_nodes.py -@@ -470,6 +470,11 @@ class LocalConfig(): - return_value=False, - name=f"{local_prefix}fs.isfile.pacemaker_authkey" - ) -+ .fs.isfile( -+ settings.pcsd_dr_config_location, -+ return_value=False, -+ name=f"{local_prefix}fs.isfile.pcsd_disaster_recovery" -+ ) - .fs.isfile( - settings.pcsd_settings_conf_location, - return_value=False, -@@ -480,10 +485,12 @@ class LocalConfig(): - def files_sync(self, node_labels): - corosync_authkey_content = b"corosync authfile" - pcmk_authkey_content = b"pcmk authfile" -- pcs_settings_content = "pcs_settigns.conf data" -+ pcs_disaster_recovery_content = b"disaster recovery config data" -+ pcs_settings_content = "pcs_settings.conf data" - file_list = [ - "corosync authkey", - "pacemaker authkey", -+ "disaster-recovery config", - "pcs_settings.conf", - ] - local_prefix = "local.files_sync." -@@ -512,6 +519,19 @@ class LocalConfig(): - mode="rb", - name=f"{local_prefix}fs.open.pcmk_authkey_read", - ) -+ .fs.isfile( -+ settings.pcsd_dr_config_location, -+ return_value=True, -+ name=f"{local_prefix}fs.isfile.pcsd_disaster_recovery" -+ ) -+ .fs.open( -+ settings.pcsd_dr_config_location, -+ return_value=( -+ mock.mock_open(read_data=pcs_disaster_recovery_content)() -+ ), -+ mode="rb", -+ name=f"{local_prefix}fs.open.pcsd_disaster_recovery_read", -+ ) - .fs.isfile( - settings.pcsd_settings_conf_location, - return_value=True, -@@ -526,6 +546,7 @@ class LocalConfig(): - node_labels=node_labels, - pcmk_authkey=pcmk_authkey_content, - corosync_authkey=corosync_authkey_content, -+ pcs_disaster_recovery_conf=pcs_disaster_recovery_content, - pcs_settings_conf=pcs_settings_content, - name=f"{local_prefix}http.files.put_files", - ) -@@ -2105,13 +2126,16 @@ class FailureFilesDistribution(TestCase): - self.expected_reports = [] - self.pcmk_authkey_content = b"pcmk authkey content" - self.corosync_authkey_content = b"corosync authkey content" -+ self.pcsd_dr_config_content = b"disaster recovery config data" - self.pcmk_authkey_file_id = "pacemaker_remote authkey" - self.corosync_authkey_file_id = "corosync authkey" -+ self.pcsd_dr_config_file_id = "disaster-recovery config" - self.unsuccessful_nodes = self.new_nodes[:1] - self.successful_nodes = self.new_nodes[1:] - self.err_msg = "an error message" - self.corosync_key_open_before_position = "fs.isfile.pacemaker_authkey" -- self.pacemaker_key_open_before_position = "fs.isfile.pcsd_settings" -+ self.pacemaker_key_open_before_position = "fs.isfile.pcsd_dr_config" -+ self.pcsd_dr_config_open_before_position = "fs.isfile.pcsd_settings" - patch_getaddrinfo(self, self.new_nodes) - self.existing_corosync_nodes = [ - node_fixture(node, node_id) -@@ -2149,9 +2173,14 @@ class FailureFilesDistribution(TestCase): - ) - # open will be inserted here - .fs.isfile( -- settings.pcsd_settings_conf_location, return_value=False, -+ settings.pcsd_dr_config_location, return_value=True, - name=self.pacemaker_key_open_before_position - ) -+ # open will be inserted here -+ .fs.isfile( -+ settings.pcsd_settings_conf_location, return_value=False, -+ name=self.pcsd_dr_config_open_before_position -+ ) - ) - self.expected_reports.extend( - [ -@@ -2165,7 +2194,11 @@ class FailureFilesDistribution(TestCase): - self.distribution_started_reports = [ - fixture.info( - report_codes.FILES_DISTRIBUTION_STARTED, -- file_list=["corosync authkey", "pacemaker authkey"], -+ file_list=[ -+ self.corosync_authkey_file_id, -+ "pacemaker authkey", -+ self.pcsd_dr_config_file_id, -+ ], - node_list=self.new_nodes, - ) - ] -@@ -2181,6 +2214,12 @@ class FailureFilesDistribution(TestCase): - node=node, - file_description="pacemaker authkey", - ) for node in self.successful_nodes -+ ] + [ -+ fixture.info( -+ report_codes.FILE_DISTRIBUTION_SUCCESS, -+ node=node, -+ file_description=self.pcsd_dr_config_file_id, -+ ) for node in self.successful_nodes - ] - - def _add_nodes_with_lib_error(self): -@@ -2210,6 +2249,15 @@ class FailureFilesDistribution(TestCase): - name="fs.open.pacemaker_authkey", - before=self.pacemaker_key_open_before_position, - ) -+ self.config.fs.open( -+ settings.pcsd_dr_config_location, -+ mode="rb", -+ side_effect=EnvironmentError( -+ 1, self.err_msg, settings.pcsd_dr_config_location -+ ), -+ name="fs.open.pcsd_dr_config", -+ before=self.pcsd_dr_config_open_before_position, -+ ) - - self._add_nodes_with_lib_error() - -@@ -2236,7 +2284,17 @@ class FailureFilesDistribution(TestCase): - f"{self.err_msg}: '{settings.pacemaker_authkey_file}'" - ), - operation=RawFileError.ACTION_READ, -- ) -+ ), -+ fixture.error( -+ report_codes.FILE_IO_ERROR, -+ force_code=report_codes.SKIP_FILE_DISTRIBUTION_ERRORS, -+ file_type_code=file_type_codes.PCS_DR_CONFIG, -+ file_path=settings.pcsd_dr_config_location, -+ reason=( -+ f"{self.err_msg}: '{settings.pcsd_dr_config_location}'" -+ ), -+ operation=RawFileError.ACTION_READ, -+ ), - ] - ) - -@@ -2260,6 +2318,15 @@ class FailureFilesDistribution(TestCase): - name="fs.open.pacemaker_authkey", - before=self.pacemaker_key_open_before_position, - ) -+ .fs.open( -+ settings.pcsd_dr_config_location, -+ mode="rb", -+ side_effect=EnvironmentError( -+ 1, self.err_msg, settings.pcsd_dr_config_location -+ ), -+ name="fs.open.pcsd_dr_config", -+ before=self.pcsd_dr_config_open_before_position, -+ ) - .local.distribute_and_reload_corosync_conf( - corosync_conf_fixture( - self.existing_corosync_nodes + [ -@@ -2301,7 +2368,16 @@ class FailureFilesDistribution(TestCase): - f"{self.err_msg}: '{settings.pacemaker_authkey_file}'" - ), - operation=RawFileError.ACTION_READ, -- ) -+ ), -+ fixture.warn( -+ report_codes.FILE_IO_ERROR, -+ file_type_code=file_type_codes.PCS_DR_CONFIG, -+ file_path=settings.pcsd_dr_config_location, -+ reason=( -+ f"{self.err_msg}: '{settings.pcsd_dr_config_location}'" -+ ), -+ operation=RawFileError.ACTION_READ, -+ ), - ] - ) - -@@ -2325,9 +2401,19 @@ class FailureFilesDistribution(TestCase): - name="fs.open.pacemaker_authkey", - before=self.pacemaker_key_open_before_position, - ) -+ .fs.open( -+ settings.pcsd_dr_config_location, -+ return_value=mock.mock_open( -+ read_data=self.pcsd_dr_config_content -+ )(), -+ mode="rb", -+ name="fs.open.pcsd_dr_config", -+ before=self.pcsd_dr_config_open_before_position, -+ ) - .http.files.put_files( - pcmk_authkey=self.pcmk_authkey_content, - corosync_authkey=self.corosync_authkey_content, -+ pcs_disaster_recovery_conf=self.pcsd_dr_config_content, - communication_list=[ - dict( - label=node, -@@ -2339,7 +2425,11 @@ class FailureFilesDistribution(TestCase): - self.pcmk_authkey_file_id: dict( - code="unexpected", - message=self.err_msg -- ) -+ ), -+ self.pcsd_dr_config_file_id: dict( -+ code="unexpected", -+ message=self.err_msg -+ ), - })) - ) for node in self.unsuccessful_nodes - ] + [ -@@ -2374,6 +2464,15 @@ class FailureFilesDistribution(TestCase): - reason=self.err_msg, - ) for node in self.unsuccessful_nodes - ] -+ + -+ [ -+ fixture.error( -+ report_codes.FILE_DISTRIBUTION_ERROR, -+ node=node, -+ file_description=self.pcsd_dr_config_file_id, -+ reason=self.err_msg, -+ ) for node in self.unsuccessful_nodes -+ ] - ) - - def test_communication_failure(self): -@@ -2396,9 +2495,19 @@ class FailureFilesDistribution(TestCase): - name="fs.open.pacemaker_authkey", - before=self.pacemaker_key_open_before_position, - ) -+ .fs.open( -+ settings.pcsd_dr_config_location, -+ return_value=mock.mock_open( -+ read_data=self.pcsd_dr_config_content -+ )(), -+ mode="rb", -+ name="fs.open.pcsd_dr_config", -+ before=self.pcsd_dr_config_open_before_position, -+ ) - .http.files.put_files( - pcmk_authkey=self.pcmk_authkey_content, - corosync_authkey=self.corosync_authkey_content, -+ pcs_disaster_recovery_conf=self.pcsd_dr_config_content, - communication_list=[ - dict( - label=node, -@@ -2450,9 +2559,19 @@ class FailureFilesDistribution(TestCase): - name="fs.open.pacemaker_authkey", - before=self.pacemaker_key_open_before_position, - ) -+ .fs.open( -+ settings.pcsd_dr_config_location, -+ return_value=mock.mock_open( -+ read_data=self.pcsd_dr_config_content -+ )(), -+ mode="rb", -+ name="fs.open.pcsd_dr_config", -+ before=self.pcsd_dr_config_open_before_position, -+ ) - .http.files.put_files( - pcmk_authkey=self.pcmk_authkey_content, - corosync_authkey=self.corosync_authkey_content, -+ pcs_disaster_recovery_conf=self.pcsd_dr_config_content, - communication_list=[ - dict( - label=node, -@@ -2501,9 +2620,19 @@ class FailureFilesDistribution(TestCase): - name="fs.open.pacemaker_authkey", - before=self.pacemaker_key_open_before_position, - ) -+ .fs.open( -+ settings.pcsd_dr_config_location, -+ return_value=mock.mock_open( -+ read_data=self.pcsd_dr_config_content -+ )(), -+ mode="rb", -+ name="fs.open.pcsd_dr_config", -+ before=self.pcsd_dr_config_open_before_position, -+ ) - .http.files.put_files( - pcmk_authkey=self.pcmk_authkey_content, - corosync_authkey=self.corosync_authkey_content, -+ pcs_disaster_recovery_conf=self.pcsd_dr_config_content, - communication_list=[ - dict( - label=node, -diff --git a/pcs_test/tier0/lib/commands/dr/__init__.py b/pcs_test/tier0/lib/commands/dr/__init__.py -new file mode 100644 -index 00000000..e69de29b -diff --git a/pcs_test/tier0/lib/commands/dr/test_destroy.py b/pcs_test/tier0/lib/commands/dr/test_destroy.py -new file mode 100644 -index 00000000..de50b21c ---- /dev/null -+++ b/pcs_test/tier0/lib/commands/dr/test_destroy.py -@@ -0,0 +1,342 @@ -+import json -+from unittest import TestCase -+ -+from pcs_test.tools import fixture -+from pcs_test.tools.command_env import get_env_tools -+ -+from pcs import settings -+from pcs.common import ( -+ file_type_codes, -+ report_codes, -+) -+from pcs.common.file import RawFileError -+from pcs.lib.commands import dr -+ -+ -+DR_CONF = "pcs disaster-recovery config" -+REASON = "error msg" -+ -+ -+def generate_nodes(nodes_num, prefix=""): -+ return [f"{prefix}node{i}" for i in range(1, nodes_num + 1)] -+ -+ -+class CheckLive(TestCase): -+ def setUp(self): -+ self.env_assist, self.config = get_env_tools(self) -+ -+ def assert_live_required(self, forbidden_options): -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.destroy(self.env_assist.get_env()), -+ [ -+ fixture.error( -+ report_codes.LIVE_ENVIRONMENT_REQUIRED, -+ forbidden_options=forbidden_options -+ ) -+ ], -+ expected_in_processor=False -+ ) -+ -+ def test_mock_corosync(self): -+ self.config.env.set_corosync_conf_data("corosync conf data") -+ self.assert_live_required([file_type_codes.COROSYNC_CONF]) -+ -+ def test_mock_cib(self): -+ self.config.env.set_cib_data("") -+ self.assert_live_required([file_type_codes.CIB]) -+ -+ def test_mock(self): -+ self.config.env.set_corosync_conf_data("corosync conf data") -+ self.config.env.set_cib_data("") -+ self.assert_live_required([ -+ file_type_codes.CIB, -+ file_type_codes.COROSYNC_CONF, -+ ]) -+ -+ -+class FixtureMixin: -+ def _fixture_load_configs(self): -+ self.config.raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ ) -+ self.config.raw_file.read( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ content=""" -+ {{ -+ "local": {{ -+ "role": "PRIMARY" -+ }}, -+ "remote_sites": [ -+ {{ -+ "nodes": [{nodes}], -+ "role": "RECOVERY" -+ }} -+ ] -+ }} -+ """.format( -+ nodes=", ".join([ -+ json.dumps(dict(name=node)) -+ for node in self.remote_nodes -+ ]) -+ ) -+ ) -+ self.config.corosync_conf.load(node_name_list=self.local_nodes) -+ -+ def _success_reports(self): -+ return [ -+ fixture.info( -+ report_codes.FILES_REMOVE_FROM_NODES_STARTED, -+ file_list=[DR_CONF], -+ node_list=self.remote_nodes + self.local_nodes, -+ ) -+ ] + [ -+ fixture.info( -+ report_codes.FILE_REMOVE_FROM_NODE_SUCCESS, -+ file_description=DR_CONF, -+ node=node, -+ ) for node in (self.remote_nodes + self.local_nodes) -+ ] -+ -+ -+class Success(FixtureMixin, TestCase): -+ def setUp(self): -+ self.env_assist, self.config = get_env_tools(self) -+ self.local_nodes = generate_nodes(5) -+ self.remote_nodes = generate_nodes(3, prefix="remote-") -+ self.config.env.set_known_nodes(self.local_nodes + self.remote_nodes) -+ -+ def test_minimal(self): -+ self._fixture_load_configs() -+ self.config.http.files.remove_files( -+ node_labels=self.remote_nodes + self.local_nodes, -+ pcs_disaster_recovery_conf=True, -+ ) -+ dr.destroy(self.env_assist.get_env()) -+ self.env_assist.assert_reports(self._success_reports()) -+ -+ -+class FatalConfigIssue(FixtureMixin, TestCase): -+ def setUp(self): -+ self.env_assist, self.config = get_env_tools(self) -+ self.local_nodes = generate_nodes(5) -+ self.remote_nodes = generate_nodes(3, prefix="remote-") -+ -+ def test_config_missing(self): -+ self.config.raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ exists=False, -+ ) -+ -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.destroy(self.env_assist.get_env()), -+ ) -+ self.env_assist.assert_reports([ -+ fixture.error( -+ report_codes.DR_CONFIG_DOES_NOT_EXIST, -+ ), -+ ]) -+ -+ def test_config_read_error(self): -+ self.config.raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ ) -+ self.config.raw_file.read( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ exception_msg=REASON, -+ ) -+ -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.destroy(self.env_assist.get_env()), -+ ) -+ self.env_assist.assert_reports([ -+ fixture.error( -+ report_codes.FILE_IO_ERROR, -+ file_type_code=file_type_codes.PCS_DR_CONFIG, -+ file_path=settings.pcsd_dr_config_location, -+ operation=RawFileError.ACTION_READ, -+ reason=REASON, -+ ), -+ ]) -+ -+ def test_config_parse_error(self): -+ self.config.raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ ) -+ self.config.raw_file.read( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ content="bad content", -+ ) -+ -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.destroy(self.env_assist.get_env()), -+ ) -+ self.env_assist.assert_reports([ -+ fixture.error( -+ report_codes.PARSE_ERROR_JSON_FILE, -+ file_type_code=file_type_codes.PCS_DR_CONFIG, -+ file_path=settings.pcsd_dr_config_location, -+ line_number=1, -+ column_number=1, -+ position=0, -+ reason="Expecting value", -+ full_msg="Expecting value: line 1 column 1 (char 0)", -+ ), -+ ]) -+ -+ def test_corosync_conf_read_error(self): -+ self._fixture_load_configs() -+ self.config.corosync_conf.load_content( -+ "", exception_msg=REASON, instead="corosync_conf.load" -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.destroy(self.env_assist.get_env()), -+ [ -+ fixture.error( -+ report_codes.UNABLE_TO_READ_COROSYNC_CONFIG, -+ path=settings.corosync_conf_file, -+ reason=REASON, -+ ), -+ ], -+ expected_in_processor=False -+ ) -+ -+ def test_corosync_conf_parse_error(self): -+ self._fixture_load_configs() -+ self.config.corosync_conf.load_content( -+ "wrong {\n corosync", instead="corosync_conf.load" -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.destroy(self.env_assist.get_env()), -+ [ -+ fixture.error( -+ report_codes -+ .PARSE_ERROR_COROSYNC_CONF_LINE_IS_NOT_SECTION_NOR_KEY_VALUE -+ ), -+ ], -+ expected_in_processor=False -+ ) -+ -+ -+class CommunicationIssue(FixtureMixin, TestCase): -+ def setUp(self): -+ self.env_assist, self.config = get_env_tools(self) -+ self.local_nodes = generate_nodes(5) -+ self.remote_nodes = generate_nodes(3, prefix="remote-") -+ -+ def test_unknown_node(self): -+ self.config.env.set_known_nodes( -+ self.local_nodes[1:] + self.remote_nodes[1:] -+ ) -+ self._fixture_load_configs() -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.destroy(self.env_assist.get_env()) -+ ) -+ self.env_assist.assert_reports([ -+ fixture.error( -+ report_codes.HOST_NOT_FOUND, -+ host_list=self.local_nodes[:1] + self.remote_nodes[:1], -+ force_code=report_codes.SKIP_OFFLINE_NODES, -+ ), -+ ]) -+ -+ def test_unknown_node_force(self): -+ existing_nodes = self.remote_nodes[1:] + self.local_nodes[1:] -+ self.config.env.set_known_nodes(existing_nodes) -+ self._fixture_load_configs() -+ self.config.http.files.remove_files( -+ node_labels=existing_nodes, -+ pcs_disaster_recovery_conf=True, -+ ) -+ dr.destroy( -+ self.env_assist.get_env(), -+ force_flags=[report_codes.SKIP_OFFLINE_NODES], -+ ) -+ self.env_assist.assert_reports([ -+ fixture.warn( -+ report_codes.HOST_NOT_FOUND, -+ host_list=self.local_nodes[:1] + self.remote_nodes[:1], -+ ), -+ ] + [ -+ fixture.info( -+ report_codes.FILES_REMOVE_FROM_NODES_STARTED, -+ file_list=[DR_CONF], -+ node_list=existing_nodes, -+ ) -+ ] + [ -+ fixture.info( -+ report_codes.FILE_REMOVE_FROM_NODE_SUCCESS, -+ file_description=DR_CONF, -+ node=node, -+ ) for node in existing_nodes -+ ]) -+ -+ def test_node_issues(self): -+ self.config.env.set_known_nodes(self.local_nodes + self.remote_nodes) -+ self._fixture_load_configs() -+ self.config.http.files.remove_files( -+ pcs_disaster_recovery_conf=True, -+ communication_list=[ -+ dict(label=node) for node in self.remote_nodes -+ ] + [ -+ dict( -+ label=self.local_nodes[0], -+ was_connected=False, -+ error_msg=REASON, -+ ), -+ dict( -+ label=self.local_nodes[1], -+ output="invalid data", -+ ), -+ dict( -+ label=self.local_nodes[2], -+ output=json.dumps(dict(files={ -+ DR_CONF: dict( -+ code="unexpected", -+ message=REASON, -+ ), -+ })), -+ ), -+ ] + [ -+ dict(label=node) for node in self.local_nodes[3:] -+ ] -+ ) -+ -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.destroy(self.env_assist.get_env()) -+ ) -+ self.env_assist.assert_reports([ -+ fixture.info( -+ report_codes.FILES_REMOVE_FROM_NODES_STARTED, -+ file_list=[DR_CONF], -+ node_list=self.remote_nodes + self.local_nodes, -+ ), -+ fixture.error( -+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, -+ command="remote/remove_file", -+ node=self.local_nodes[0], -+ reason=REASON, -+ ), -+ fixture.error( -+ report_codes.INVALID_RESPONSE_FORMAT, -+ node=self.local_nodes[1], -+ ), -+ fixture.error( -+ report_codes.FILE_REMOVE_FROM_NODE_ERROR, -+ file_description=DR_CONF, -+ reason=REASON, -+ node=self.local_nodes[2], -+ ), -+ ] + [ -+ fixture.info( -+ report_codes.FILE_REMOVE_FROM_NODE_SUCCESS, -+ file_description=DR_CONF, -+ node=node, -+ ) for node in self.local_nodes[3:] + self.remote_nodes -+ ]) -diff --git a/pcs_test/tier0/lib/commands/dr/test_get_config.py b/pcs_test/tier0/lib/commands/dr/test_get_config.py -new file mode 100644 -index 00000000..b2297c8a ---- /dev/null -+++ b/pcs_test/tier0/lib/commands/dr/test_get_config.py -@@ -0,0 +1,134 @@ -+from unittest import TestCase -+ -+from pcs import settings -+from pcs.common import ( -+ file_type_codes, -+ report_codes, -+) -+from pcs.common.file import RawFileError -+from pcs.lib.commands import dr -+ -+from pcs_test.tools.command_env import get_env_tools -+from pcs_test.tools import fixture -+ -+REASON = "error msg" -+ -+class Config(TestCase): -+ def setUp(self): -+ self.env_assist, self.config = get_env_tools(self) -+ -+ def test_success(self): -+ (self.config -+ .raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ ) -+ .raw_file.read( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ content=""" -+ { -+ "local": { -+ "role": "PRIMARY" -+ }, -+ "remote_sites": [ -+ { -+ "nodes": [ -+ { -+ "name": "recovery-node" -+ } -+ ], -+ "role": "RECOVERY" -+ } -+ ] -+ } -+ """, -+ ) -+ ) -+ self.assertEqual( -+ dr.get_config(self.env_assist.get_env()), -+ { -+ "local_site": { -+ "node_list": [], -+ "site_role": "PRIMARY", -+ }, -+ "remote_site_list": [ -+ { -+ "node_list": [ -+ {"name": "recovery-node"}, -+ ], -+ "site_role": "RECOVERY", -+ }, -+ ], -+ } -+ ) -+ -+ def test_config_missing(self): -+ (self.config -+ .raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ exists=False, -+ ) -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.get_config(self.env_assist.get_env()), -+ ) -+ self.env_assist.assert_reports([ -+ fixture.error( -+ report_codes.DR_CONFIG_DOES_NOT_EXIST, -+ ), -+ ]) -+ -+ def test_config_read_error(self): -+ (self.config -+ .raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ ) -+ .raw_file.read( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ exception_msg=REASON, -+ ) -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.get_config(self.env_assist.get_env()), -+ ) -+ self.env_assist.assert_reports([ -+ fixture.error( -+ report_codes.FILE_IO_ERROR, -+ file_type_code=file_type_codes.PCS_DR_CONFIG, -+ file_path=settings.pcsd_dr_config_location, -+ operation=RawFileError.ACTION_READ, -+ reason=REASON, -+ ), -+ ]) -+ -+ def test_config_parse_error(self): -+ (self.config -+ .raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ ) -+ .raw_file.read( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ content="bad content", -+ ) -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.get_config(self.env_assist.get_env()), -+ ) -+ self.env_assist.assert_reports([ -+ fixture.error( -+ report_codes.PARSE_ERROR_JSON_FILE, -+ file_type_code=file_type_codes.PCS_DR_CONFIG, -+ file_path=settings.pcsd_dr_config_location, -+ line_number=1, -+ column_number=1, -+ position=0, -+ reason="Expecting value", -+ full_msg="Expecting value: line 1 column 1 (char 0)", -+ ), -+ ]) -diff --git a/pcs_test/tier0/lib/commands/dr/test_set_recovery_site.py b/pcs_test/tier0/lib/commands/dr/test_set_recovery_site.py -new file mode 100644 -index 00000000..06d80df1 ---- /dev/null -+++ b/pcs_test/tier0/lib/commands/dr/test_set_recovery_site.py -@@ -0,0 +1,702 @@ -+import json -+from unittest import TestCase -+ -+from pcs_test.tools import fixture -+from pcs_test.tools.command_env import get_env_tools -+ -+from pcs import settings -+from pcs.common import ( -+ file_type_codes, -+ report_codes, -+) -+from pcs.lib.dr.config.facade import DrRole -+from pcs.lib.commands import dr -+ -+DR_CFG_DESC = "disaster-recovery config" -+ -+COROSYNC_CONF_TEMPLATE = """\ -+totem {{ -+ version: 2 -+ cluster_name: cluster_name -+}} -+ -+nodelist {{ -+{node_list}}} -+""" -+ -+NODE_TEMPLATE_NO_NAME = """\ -+ node {{ -+ ring0_addr: {node} -+ nodeid: {id} -+ }} -+""" -+ -+NODE_TEMPLATE = """\ -+ node {{ -+ ring0_addr: {node} -+ name: {node} -+ nodeid: {id} -+ }} -+""" -+ -+ -+def export_cfg(cfg_struct): -+ return json.dumps(cfg_struct, indent=4, sort_keys=True).encode("utf-8") -+ -+def dr_cfg_fixture(local_role, remote_role, nodes): -+ return export_cfg(dict( -+ local=dict( -+ role=local_role.value, -+ ), -+ remote_sites=[ -+ dict( -+ role=remote_role.value, -+ nodes=[dict(name=node) for node in nodes], -+ ), -+ ] -+ )) -+ -+def corosync_conf_fixture(node_list): -+ return COROSYNC_CONF_TEMPLATE.format( -+ node_list="\n".join(node_list_fixture(node_list)), -+ ) -+ -+def node_list_fixture(node_list): -+ return [ -+ NODE_TEMPLATE.format(node=node, id=i) -+ for i, node in enumerate(node_list, start=1) -+ ] -+ -+ -+def generate_nodes(nodes_num, prefix=""): -+ return [f"{prefix}node{i}" for i in range(1, nodes_num + 1)] -+ -+ -+class CheckLive(TestCase): -+ def setUp(self): -+ self.env_assist, self.config = get_env_tools(self) -+ -+ def assert_live_required(self, forbidden_options): -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.set_recovery_site(self.env_assist.get_env(), "node"), -+ [ -+ fixture.error( -+ report_codes.LIVE_ENVIRONMENT_REQUIRED, -+ forbidden_options=forbidden_options -+ ) -+ ], -+ expected_in_processor=False -+ ) -+ -+ def test_mock_corosync(self): -+ self.config.env.set_corosync_conf_data( -+ corosync_conf_fixture(generate_nodes(3)) -+ ) -+ self.assert_live_required([file_type_codes.COROSYNC_CONF]) -+ -+ def test_mock_cib(self): -+ self.config.env.set_cib_data("") -+ self.assert_live_required([file_type_codes.CIB]) -+ -+ def test_mock(self): -+ self.config.env.set_corosync_conf_data( -+ corosync_conf_fixture(generate_nodes(3)) -+ ) -+ self.config.env.set_cib_data("") -+ self.assert_live_required([ -+ file_type_codes.CIB, -+ file_type_codes.COROSYNC_CONF, -+ ]) -+ -+ -+class SetRecoverySiteSuccess(TestCase): -+ def setUp(self): -+ self.env_assist, self.config = get_env_tools(self) -+ -+ def _test_minimal(self, local_cluster_size, recovery_cluster_size): -+ local_nodes = generate_nodes(local_cluster_size) -+ remote_nodes = generate_nodes(recovery_cluster_size, prefix="recovery-") -+ orig_node = remote_nodes[-1] -+ cfg = self.config -+ cfg.env.set_known_nodes(local_nodes + remote_nodes) -+ cfg.raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ exists=False, -+ ) -+ cfg.corosync_conf.load_content(corosync_conf_fixture(local_nodes)) -+ cfg.http.corosync.get_corosync_conf( -+ corosync_conf_fixture(remote_nodes), node_labels=[orig_node] -+ ) -+ cfg.http.files.put_files( -+ node_labels=remote_nodes, -+ pcs_disaster_recovery_conf=dr_cfg_fixture( -+ DrRole.RECOVERY, DrRole.PRIMARY, local_nodes -+ ), -+ name="distribute_remote", -+ ) -+ cfg.http.files.put_files( -+ node_labels=local_nodes, -+ pcs_disaster_recovery_conf=dr_cfg_fixture( -+ DrRole.PRIMARY, DrRole.RECOVERY, remote_nodes -+ ), -+ name="distribute_local", -+ ) -+ dr.set_recovery_site(self.env_assist.get_env(), orig_node) -+ self.env_assist.assert_reports( -+ [ -+ fixture.info( -+ report_codes.FILES_DISTRIBUTION_STARTED, -+ file_list=[DR_CFG_DESC], -+ node_list=remote_nodes, -+ ) -+ ] + [ -+ fixture.info( -+ report_codes.FILE_DISTRIBUTION_SUCCESS, -+ file_description=DR_CFG_DESC, -+ node=node, -+ ) for node in remote_nodes -+ ] + [ -+ fixture.info( -+ report_codes.FILES_DISTRIBUTION_STARTED, -+ file_list=[DR_CFG_DESC], -+ node_list=local_nodes, -+ ) -+ ] + [ -+ fixture.info( -+ report_codes.FILE_DISTRIBUTION_SUCCESS, -+ file_description=DR_CFG_DESC, -+ node=node, -+ ) for node in local_nodes -+ ] -+ ) -+ -+ def test_minimal_local_1_remote_1(self): -+ self._test_minimal(1, 1) -+ -+ def test_minimal_local_1_remote_2(self): -+ self._test_minimal(1, 2) -+ -+ def test_minimal_local_1_remote_3(self): -+ self._test_minimal(1, 3) -+ -+ def test_minimal_local_2_remote_1(self): -+ self._test_minimal(2, 1) -+ -+ def test_minimal_local_2_remote_2(self): -+ self._test_minimal(2, 2) -+ -+ def test_minimal_local_2_remote_3(self): -+ self._test_minimal(2, 3) -+ -+ def test_minimal_local_3_remote_1(self): -+ self._test_minimal(3, 1) -+ -+ def test_minimal_local_3_remote_2(self): -+ self._test_minimal(3, 2) -+ -+ def test_minimal_local_3_remote_3(self): -+ self._test_minimal(3, 3) -+ -+ -+class FailureValidations(TestCase): -+ def setUp(self): -+ self.env_assist, self.config = get_env_tools(self) -+ self.local_nodes = generate_nodes(4) -+ -+ def test_dr_cfg_exist(self): -+ orig_node = "node" -+ cfg = self.config -+ cfg.env.set_known_nodes(self.local_nodes + [orig_node]) -+ cfg.raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ exists=True, -+ ) -+ cfg.corosync_conf.load_content(corosync_conf_fixture(self.local_nodes)) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.set_recovery_site(self.env_assist.get_env(), orig_node), -+ ) -+ self.env_assist.assert_reports([ -+ fixture.error( -+ report_codes.DR_CONFIG_ALREADY_EXIST, -+ ) -+ ]) -+ -+ def test_local_nodes_name_missing(self): -+ orig_node = "node" -+ cfg = self.config -+ cfg.env.set_known_nodes(self.local_nodes + [orig_node]) -+ cfg.raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ exists=False, -+ ) -+ cfg.corosync_conf.load_content( -+ COROSYNC_CONF_TEMPLATE.format( -+ node_list="\n".join( -+ [ -+ NODE_TEMPLATE_NO_NAME.format( -+ node=self.local_nodes[0], id=len(self.local_nodes) -+ ) -+ ] + node_list_fixture(self.local_nodes[1:]) -+ ) -+ ) -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.set_recovery_site(self.env_assist.get_env(), orig_node), -+ ) -+ self.env_assist.assert_reports([ -+ fixture.error( -+ report_codes.COROSYNC_CONFIG_MISSING_NAMES_OF_NODES, -+ fatal=True, -+ ) -+ ]) -+ -+ def test_node_part_of_local_cluster(self): -+ orig_node = self.local_nodes[-1] -+ cfg = self.config -+ cfg.env.set_known_nodes(self.local_nodes + [orig_node]) -+ cfg.raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ exists=False, -+ ) -+ cfg.corosync_conf.load_content(corosync_conf_fixture(self.local_nodes)) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.set_recovery_site(self.env_assist.get_env(), orig_node), -+ ) -+ self.env_assist.assert_reports([ -+ fixture.error( -+ report_codes.NODE_IN_LOCAL_CLUSTER, -+ node=orig_node, -+ ) -+ ]) -+ -+ def test_tokens_missing_for_local_nodes(self): -+ orig_node = "node" -+ cfg = self.config -+ cfg.env.set_known_nodes(self.local_nodes[:-1] + [orig_node]) -+ cfg.raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ exists=False, -+ ) -+ cfg.corosync_conf.load_content(corosync_conf_fixture(self.local_nodes)) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.set_recovery_site(self.env_assist.get_env(), orig_node), -+ ) -+ self.env_assist.assert_reports([ -+ fixture.error( -+ report_codes.HOST_NOT_FOUND, -+ host_list=self.local_nodes[-1:], -+ ) -+ ]) -+ -+ def test_token_missing_for_node(self): -+ orig_node = "node" -+ cfg = self.config -+ cfg.env.set_known_nodes(self.local_nodes) -+ cfg.raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ exists=False, -+ ) -+ cfg.corosync_conf.load_content(corosync_conf_fixture(self.local_nodes)) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.set_recovery_site(self.env_assist.get_env(), orig_node), -+ ) -+ self.env_assist.assert_reports([ -+ fixture.error( -+ report_codes.HOST_NOT_FOUND, -+ host_list=[orig_node], -+ ) -+ ]) -+ -+ def test_tokens_missing_for_remote_cluster(self): -+ remote_nodes = generate_nodes(3, prefix="recovery-") -+ orig_node = remote_nodes[0] -+ cfg = self.config -+ cfg.env.set_known_nodes(self.local_nodes + remote_nodes[:-1]) -+ cfg.raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ exists=False, -+ ) -+ cfg.corosync_conf.load_content(corosync_conf_fixture(self.local_nodes)) -+ cfg.http.corosync.get_corosync_conf( -+ corosync_conf_fixture(remote_nodes), node_labels=[orig_node] -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.set_recovery_site(self.env_assist.get_env(), orig_node), -+ ) -+ self.env_assist.assert_reports([ -+ fixture.error( -+ report_codes.HOST_NOT_FOUND, -+ host_list=remote_nodes[-1:], -+ ) -+ ]) -+ -+ -+REASON = "error msg" -+ -+ -+class FailureRemoteCorocyncConf(TestCase): -+ def setUp(self): -+ self.env_assist, self.config = get_env_tools(self) -+ self.local_nodes = generate_nodes(4) -+ self.remote_nodes = generate_nodes(3, prefix="recovery-") -+ self.node = self.remote_nodes[0] -+ -+ self.config.env.set_known_nodes(self.local_nodes + self.remote_nodes) -+ self.config.raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ exists=False, -+ ) -+ self.config.corosync_conf.load_content( -+ corosync_conf_fixture(self.local_nodes) -+ ) -+ -+ def test_network_issue(self): -+ self.config.http.corosync.get_corosync_conf( -+ communication_list=[ -+ dict( -+ label=self.node, -+ was_connected=False, -+ error_msg=REASON, -+ ) -+ ] -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.set_recovery_site(self.env_assist.get_env(), self.node), -+ ) -+ self.env_assist.assert_reports([ -+ fixture.warn( -+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, -+ node=self.node, -+ command="remote/get_corosync_conf", -+ reason=REASON, -+ -+ ), -+ fixture.error(report_codes.UNABLE_TO_PERFORM_OPERATION_ON_ANY_NODE) -+ ]) -+ -+ def test_file_does_not_exist(self): -+ self.config.http.corosync.get_corosync_conf( -+ communication_list=[ -+ dict( -+ label=self.node, -+ response_code=400, -+ output=REASON, -+ ) -+ ] -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.set_recovery_site(self.env_assist.get_env(), self.node), -+ ) -+ self.env_assist.assert_reports([ -+ fixture.warn( -+ report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL, -+ node=self.node, -+ command="remote/get_corosync_conf", -+ reason=REASON, -+ -+ ), -+ fixture.error(report_codes.UNABLE_TO_PERFORM_OPERATION_ON_ANY_NODE) -+ ]) -+ -+ def test_node_names_missing(self): -+ self.config.http.corosync.get_corosync_conf( -+ COROSYNC_CONF_TEMPLATE.format( -+ node_list="\n".join( -+ [ -+ NODE_TEMPLATE_NO_NAME.format( -+ node=self.remote_nodes[-1], -+ id=len(self.remote_nodes), -+ ) -+ ] + node_list_fixture(self.remote_nodes[:-1]) -+ ) -+ ), -+ node_labels=[self.node], -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.set_recovery_site(self.env_assist.get_env(), self.node), -+ ) -+ self.env_assist.assert_reports([ -+ fixture.error( -+ report_codes.COROSYNC_CONFIG_MISSING_NAMES_OF_NODES, -+ fatal=True, -+ ) -+ ]) -+ -+ -+class FailureRemoteDrCfgDistribution(TestCase): -+ # pylint: disable=too-many-instance-attributes -+ def setUp(self): -+ self.env_assist, self.config = get_env_tools(self) -+ self.local_nodes = generate_nodes(4) -+ self.remote_nodes = generate_nodes(3, prefix="recovery-") -+ self.node = self.remote_nodes[0] -+ self.failed_nodes = self.remote_nodes[-1:] -+ successful_nodes = self.remote_nodes[:-1] -+ -+ self.config.env.set_known_nodes(self.local_nodes + self.remote_nodes) -+ self.config.raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ exists=False, -+ ) -+ self.config.corosync_conf.load_content( -+ corosync_conf_fixture(self.local_nodes) -+ ) -+ self.config.http.corosync.get_corosync_conf( -+ corosync_conf_fixture(self.remote_nodes), node_labels=[self.node] -+ ) -+ -+ self.success_communication = [ -+ dict(label=node) for node in successful_nodes -+ ] -+ self.expected_reports = [ -+ fixture.info( -+ report_codes.FILES_DISTRIBUTION_STARTED, -+ file_list=[DR_CFG_DESC], -+ node_list=self.remote_nodes, -+ ) -+ ] + [ -+ fixture.info( -+ report_codes.FILE_DISTRIBUTION_SUCCESS, -+ file_description=DR_CFG_DESC, -+ node=node, -+ ) for node in successful_nodes -+ ] -+ -+ def test_write_failure(self): -+ self.config.http.files.put_files( -+ communication_list=self.success_communication + [ -+ dict( -+ label=node, -+ output=json.dumps(dict(files={ -+ DR_CFG_DESC: dict( -+ code="unexpected", -+ message=REASON -+ ), -+ })) -+ ) for node in self.failed_nodes -+ ], -+ pcs_disaster_recovery_conf=dr_cfg_fixture( -+ DrRole.RECOVERY, DrRole.PRIMARY, self.local_nodes -+ ), -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.set_recovery_site(self.env_assist.get_env(), self.node), -+ ) -+ self.env_assist.assert_reports( -+ self.expected_reports + [ -+ fixture.error( -+ report_codes.FILE_DISTRIBUTION_ERROR, -+ file_description=DR_CFG_DESC, -+ reason=REASON, -+ node=node, -+ ) for node in self.failed_nodes -+ ] -+ ) -+ -+ def test_network_failure(self): -+ self.config.http.files.put_files( -+ communication_list=self.success_communication + [ -+ dict( -+ label=node, -+ was_connected=False, -+ error_msg=REASON, -+ ) for node in self.failed_nodes -+ ], -+ pcs_disaster_recovery_conf=dr_cfg_fixture( -+ DrRole.RECOVERY, DrRole.PRIMARY, self.local_nodes -+ ), -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.set_recovery_site(self.env_assist.get_env(), self.node), -+ ) -+ self.env_assist.assert_reports( -+ self.expected_reports + [ -+ fixture.error( -+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, -+ command="remote/put_file", -+ reason=REASON, -+ node=node, -+ ) for node in self.failed_nodes -+ ] -+ ) -+ -+ def test_communication_error(self): -+ self.config.http.files.put_files( -+ communication_list=self.success_communication + [ -+ dict( -+ label=node, -+ response_code=400, -+ output=REASON, -+ ) for node in self.failed_nodes -+ ], -+ pcs_disaster_recovery_conf=dr_cfg_fixture( -+ DrRole.RECOVERY, DrRole.PRIMARY, self.local_nodes -+ ), -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.set_recovery_site(self.env_assist.get_env(), self.node), -+ ) -+ self.env_assist.assert_reports( -+ self.expected_reports + [ -+ fixture.error( -+ report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL, -+ command="remote/put_file", -+ reason=REASON, -+ node=node, -+ ) for node in self.failed_nodes -+ ] -+ ) -+ -+ -+class FailureLocalDrCfgDistribution(TestCase): -+ # pylint: disable=too-many-instance-attributes -+ def setUp(self): -+ self.env_assist, self.config = get_env_tools(self) -+ local_nodes = generate_nodes(4) -+ self.remote_nodes = generate_nodes(3, prefix="recovery-") -+ self.node = self.remote_nodes[0] -+ self.failed_nodes = local_nodes[-1:] -+ successful_nodes = local_nodes[:-1] -+ -+ self.config.env.set_known_nodes(local_nodes + self.remote_nodes) -+ self.config.raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ exists=False, -+ ) -+ self.config.corosync_conf.load_content( -+ corosync_conf_fixture(local_nodes) -+ ) -+ self.config.http.corosync.get_corosync_conf( -+ corosync_conf_fixture(self.remote_nodes), node_labels=[self.node] -+ ) -+ self.config.http.files.put_files( -+ node_labels=self.remote_nodes, -+ pcs_disaster_recovery_conf=dr_cfg_fixture( -+ DrRole.RECOVERY, DrRole.PRIMARY, local_nodes -+ ), -+ name="distribute_remote", -+ ) -+ -+ self.success_communication = [ -+ dict(label=node) for node in successful_nodes -+ ] -+ self.expected_reports = [ -+ fixture.info( -+ report_codes.FILES_DISTRIBUTION_STARTED, -+ file_list=[DR_CFG_DESC], -+ node_list=self.remote_nodes, -+ ) -+ ] + [ -+ fixture.info( -+ report_codes.FILE_DISTRIBUTION_SUCCESS, -+ file_description=DR_CFG_DESC, -+ node=node, -+ ) for node in self.remote_nodes -+ ] + [ -+ fixture.info( -+ report_codes.FILES_DISTRIBUTION_STARTED, -+ file_list=[DR_CFG_DESC], -+ node_list=local_nodes, -+ ) -+ ] + [ -+ fixture.info( -+ report_codes.FILE_DISTRIBUTION_SUCCESS, -+ file_description=DR_CFG_DESC, -+ node=node, -+ ) for node in successful_nodes -+ ] -+ -+ def test_write_failure(self): -+ self.config.http.files.put_files( -+ communication_list=self.success_communication + [ -+ dict( -+ label=node, -+ output=json.dumps(dict(files={ -+ DR_CFG_DESC: dict( -+ code="unexpected", -+ message=REASON -+ ), -+ })) -+ ) for node in self.failed_nodes -+ ], -+ pcs_disaster_recovery_conf=dr_cfg_fixture( -+ DrRole.PRIMARY, DrRole.RECOVERY, self.remote_nodes -+ ), -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.set_recovery_site(self.env_assist.get_env(), self.node), -+ ) -+ self.env_assist.assert_reports( -+ self.expected_reports + [ -+ fixture.error( -+ report_codes.FILE_DISTRIBUTION_ERROR, -+ file_description=DR_CFG_DESC, -+ reason=REASON, -+ node=node, -+ ) for node in self.failed_nodes -+ ] -+ ) -+ -+ def test_network_failure(self): -+ self.config.http.files.put_files( -+ communication_list=self.success_communication + [ -+ dict( -+ label=node, -+ was_connected=False, -+ error_msg=REASON, -+ ) for node in self.failed_nodes -+ ], -+ pcs_disaster_recovery_conf=dr_cfg_fixture( -+ DrRole.PRIMARY, DrRole.RECOVERY, self.remote_nodes -+ ), -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.set_recovery_site(self.env_assist.get_env(), self.node), -+ ) -+ self.env_assist.assert_reports( -+ self.expected_reports + [ -+ fixture.error( -+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, -+ command="remote/put_file", -+ reason=REASON, -+ node=node, -+ ) for node in self.failed_nodes -+ ] -+ ) -+ -+ def test_communication_error(self): -+ self.config.http.files.put_files( -+ communication_list=self.success_communication + [ -+ dict( -+ label=node, -+ response_code=400, -+ output=REASON, -+ ) for node in self.failed_nodes -+ ], -+ pcs_disaster_recovery_conf=dr_cfg_fixture( -+ DrRole.PRIMARY, DrRole.RECOVERY, self.remote_nodes -+ ), -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.set_recovery_site(self.env_assist.get_env(), self.node), -+ ) -+ self.env_assist.assert_reports( -+ self.expected_reports + [ -+ fixture.error( -+ report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL, -+ command="remote/put_file", -+ reason=REASON, -+ node=node, -+ ) for node in self.failed_nodes -+ ] -+ ) -diff --git a/pcs_test/tier0/lib/commands/dr/test_status.py b/pcs_test/tier0/lib/commands/dr/test_status.py -new file mode 100644 -index 00000000..b46eb757 ---- /dev/null -+++ b/pcs_test/tier0/lib/commands/dr/test_status.py -@@ -0,0 +1,756 @@ -+import json -+import re -+from unittest import TestCase -+ -+from pcs import settings -+from pcs.common import ( -+ file_type_codes, -+ report_codes, -+) -+from pcs.common.dr import DrRole -+from pcs.common.file import RawFileError -+from pcs.lib.commands import dr -+ -+from pcs_test.tools.command_env import get_env_tools -+from pcs_test.tools import fixture -+ -+ -+REASON = "error msg" -+ -+class CheckLive(TestCase): -+ def setUp(self): -+ self.env_assist, self.config = get_env_tools(self) -+ -+ def assert_live_required(self, forbidden_options): -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.status_all_sites_plaintext(self.env_assist.get_env()), -+ [ -+ fixture.error( -+ report_codes.LIVE_ENVIRONMENT_REQUIRED, -+ forbidden_options=forbidden_options -+ ) -+ ], -+ expected_in_processor=False -+ ) -+ -+ def test_mock_corosync(self): -+ self.config.env.set_corosync_conf_data("corosync conf") -+ self.assert_live_required([file_type_codes.COROSYNC_CONF]) -+ -+ def test_mock_cib(self): -+ self.config.env.set_cib_data("") -+ self.assert_live_required([file_type_codes.CIB]) -+ -+ def test_mock(self): -+ self.config.env.set_corosync_conf_data("corosync conf") -+ self.config.env.set_cib_data("") -+ self.assert_live_required([ -+ file_type_codes.CIB, -+ file_type_codes.COROSYNC_CONF, -+ ]) -+ -+class FixtureMixin(): -+ def _set_up(self, local_node_count=2): -+ self.local_node_name_list = [ -+ f"node{i}" for i in range(1, local_node_count + 1) -+ ] -+ self.remote_node_name_list = ["recovery-node"] -+ self.config.env.set_known_nodes( -+ self.local_node_name_list + self.remote_node_name_list -+ ) -+ self.local_status = "local cluster\nstatus\n" -+ self.remote_status = "remote cluster\nstatus\n" -+ -+ def _fixture_load_configs(self): -+ (self.config -+ .raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ ) -+ .raw_file.read( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ content=""" -+ { -+ "local": { -+ "role": "PRIMARY" -+ }, -+ "remote_sites": [ -+ { -+ "nodes": [ -+ { -+ "name": "recovery-node" -+ } -+ ], -+ "role": "RECOVERY" -+ } -+ ] -+ } -+ """, -+ ) -+ .corosync_conf.load(node_name_list=self.local_node_name_list) -+ ) -+ -+ def _fixture_result(self, local_success=True, remote_success=True): -+ return [ -+ { -+ "local_site": True, -+ "site_role": DrRole.PRIMARY, -+ "status_plaintext": self.local_status if local_success else "", -+ "status_successfully_obtained": local_success, -+ }, -+ { -+ "local_site": False, -+ "site_role": DrRole.RECOVERY, -+ "status_plaintext": ( -+ self.remote_status if remote_success else "" -+ ), -+ "status_successfully_obtained": remote_success, -+ } -+ ] -+ -+class Success(FixtureMixin, TestCase): -+ def setUp(self): -+ self.env_assist, self.config = get_env_tools(self) -+ self._set_up() -+ -+ def _assert_success(self, hide_inactive_resources, verbose): -+ self._fixture_load_configs() -+ (self.config -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.local", -+ node_labels=self.local_node_name_list[:1], -+ hide_inactive_resources=hide_inactive_resources, -+ verbose=verbose, -+ cluster_status_plaintext=self.local_status, -+ ) -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.remote", -+ node_labels=self.remote_node_name_list[:1], -+ hide_inactive_resources=hide_inactive_resources, -+ verbose=verbose, -+ cluster_status_plaintext=self.remote_status, -+ ) -+ ) -+ result = dr.status_all_sites_plaintext( -+ self.env_assist.get_env(), -+ hide_inactive_resources=hide_inactive_resources, -+ verbose=verbose, -+ ) -+ self.assertEqual(result, self._fixture_result()) -+ -+ def test_success_minimal(self): -+ self._assert_success(False, False) -+ -+ def test_success_full(self): -+ self._assert_success(False, True) -+ -+ def test_success_hide_inactive(self): -+ self._assert_success(True, False) -+ -+ def test_success_all_flags(self): -+ self._assert_success(True, True) -+ -+ def test_local_not_running_first_node(self): -+ self._fixture_load_configs() -+ (self.config -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.local", -+ cluster_status_plaintext=self.local_status, -+ communication_list=[ -+ [dict( -+ label=self.local_node_name_list[0], -+ output=json.dumps(dict( -+ status="error", -+ status_msg="", -+ data=None, -+ report_list=[ -+ { -+ "severity": "ERROR", -+ "code": "CRM_MON_ERROR", -+ "info": { -+ "reason": REASON, -+ }, -+ "forceable": None, -+ "report_text": "translated report", -+ } -+ ] -+ )), -+ )], -+ [dict( -+ label=self.local_node_name_list[1], -+ )], -+ ] -+ ) -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.remote", -+ node_labels=self.remote_node_name_list[:1], -+ cluster_status_plaintext=self.remote_status, -+ ) -+ ) -+ result = dr.status_all_sites_plaintext(self.env_assist.get_env()) -+ self.assertEqual(result, self._fixture_result()) -+ self.env_assist.assert_reports([ -+ fixture.error( -+ report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL, -+ node=self.local_node_name_list[0], -+ command="remote/cluster_status_plaintext", -+ reason="translated report", -+ ), -+ ]) -+ -+ def test_local_not_running(self): -+ self._fixture_load_configs() -+ (self.config -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.local", -+ cmd_status="error", -+ cmd_status_msg="", -+ cluster_status_plaintext="", -+ report_list=[ -+ { -+ "severity": "ERROR", -+ "code": "CRM_MON_ERROR", -+ "info": { -+ "reason": REASON, -+ }, -+ "forceable": None, -+ "report_text": "translated report", -+ } -+ ], -+ communication_list=[ -+ [dict( -+ label=self.local_node_name_list[0], -+ )], -+ [dict( -+ label=self.local_node_name_list[1], -+ )], -+ ] -+ ) -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.remote", -+ node_labels=self.remote_node_name_list[:1], -+ cluster_status_plaintext=self.remote_status, -+ ) -+ ) -+ result = dr.status_all_sites_plaintext(self.env_assist.get_env()) -+ self.assertEqual(result, self._fixture_result(local_success=False)) -+ self.env_assist.assert_reports( -+ [ -+ fixture.error( -+ report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL, -+ node=node, -+ command="remote/cluster_status_plaintext", -+ reason="translated report", -+ ) -+ for node in self.local_node_name_list -+ ] -+ ) -+ -+ def test_remote_not_running(self): -+ self._fixture_load_configs() -+ (self.config -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.local", -+ node_labels=self.local_node_name_list[:1], -+ cluster_status_plaintext=self.local_status, -+ ) -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.remote", -+ node_labels=self.remote_node_name_list[:1], -+ cmd_status="error", -+ cmd_status_msg="", -+ cluster_status_plaintext="", -+ report_list=[ -+ { -+ "severity": "ERROR", -+ "code": "CRM_MON_ERROR", -+ "info": { -+ "reason": REASON, -+ }, -+ "forceable": None, -+ "report_text": "translated report", -+ } -+ ], -+ ) -+ ) -+ result = dr.status_all_sites_plaintext(self.env_assist.get_env()) -+ self.assertEqual(result, self._fixture_result(remote_success=False)) -+ self.env_assist.assert_reports( -+ [ -+ fixture.error( -+ report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL, -+ node=node, -+ command="remote/cluster_status_plaintext", -+ reason="translated report", -+ ) -+ for node in self.remote_node_name_list -+ ] -+ ) -+ -+ def test_both_not_running(self): -+ self._fixture_load_configs() -+ (self.config -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.local", -+ cmd_status="error", -+ cmd_status_msg="", -+ cluster_status_plaintext="", -+ report_list=[ -+ { -+ "severity": "ERROR", -+ "code": "CRM_MON_ERROR", -+ "info": { -+ "reason": REASON, -+ }, -+ "forceable": None, -+ "report_text": "translated report", -+ } -+ ], -+ communication_list=[ -+ [dict( -+ label=self.local_node_name_list[0], -+ )], -+ [dict( -+ label=self.local_node_name_list[1], -+ )], -+ ] -+ ) -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.remote", -+ node_labels=self.remote_node_name_list[:1], -+ cmd_status="error", -+ cmd_status_msg="", -+ cluster_status_plaintext="", -+ report_list=[ -+ { -+ "severity": "ERROR", -+ "code": "CRM_MON_ERROR", -+ "info": { -+ "reason": REASON, -+ }, -+ "forceable": None, -+ "report_text": "translated report", -+ } -+ ], -+ ) -+ ) -+ result = dr.status_all_sites_plaintext(self.env_assist.get_env()) -+ self.assertEqual(result, self._fixture_result( -+ local_success=False, remote_success=False -+ )) -+ self.env_assist.assert_reports( -+ [ -+ fixture.error( -+ report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL, -+ node=node, -+ command="remote/cluster_status_plaintext", -+ reason="translated report", -+ ) -+ for node in ( -+ self.local_node_name_list + self.remote_node_name_list -+ ) -+ ] -+ ) -+ -+ -+class CommunicationIssue(FixtureMixin, TestCase): -+ def setUp(self): -+ self.env_assist, self.config = get_env_tools(self) -+ self._set_up() -+ -+ def test_unknown_node(self): -+ self.config.env.set_known_nodes( -+ self.local_node_name_list[1:] + self.remote_node_name_list -+ ) -+ self._fixture_load_configs() -+ (self.config -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.local", -+ node_labels=self.local_node_name_list[1:], -+ cluster_status_plaintext=self.local_status, -+ ) -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.remote", -+ node_labels=self.remote_node_name_list[:1], -+ cluster_status_plaintext=self.remote_status, -+ ) -+ ) -+ result = dr.status_all_sites_plaintext(self.env_assist.get_env()) -+ self.assertEqual(result, self._fixture_result()) -+ self.env_assist.assert_reports([ -+ fixture.warn( -+ report_codes.HOST_NOT_FOUND, -+ host_list=["node1"], -+ ), -+ ]) -+ -+ def test_unknown_all_nodes_in_site(self): -+ self.config.env.set_known_nodes( -+ self.local_node_name_list -+ ) -+ self._fixture_load_configs() -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.status_all_sites_plaintext(self.env_assist.get_env()), -+ ) -+ self.env_assist.assert_reports([ -+ fixture.warn( -+ report_codes.HOST_NOT_FOUND, -+ host_list=self.remote_node_name_list, -+ ), -+ fixture.error( -+ report_codes.NONE_HOST_FOUND, -+ ), -+ ]) -+ -+ def test_missing_node_names(self): -+ self._fixture_load_configs() -+ coro_call = self.config.calls.get("corosync_conf.load") -+ (self.config -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.local", -+ node_labels=[], -+ ) -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.remote", -+ node_labels=self.remote_node_name_list[:1], -+ cluster_status_plaintext=self.remote_status, -+ ) -+ ) -+ coro_call.content = re.sub(r"name: node\d", "", coro_call.content) -+ result = dr.status_all_sites_plaintext(self.env_assist.get_env()) -+ self.assertEqual(result, self._fixture_result(local_success=False)) -+ self.env_assist.assert_reports([ -+ fixture.warn( -+ report_codes.COROSYNC_CONFIG_MISSING_NAMES_OF_NODES, -+ fatal=False, -+ ), -+ ]) -+ -+ def test_node_issues(self): -+ self._set_up(local_node_count=7) -+ self._fixture_load_configs() -+ (self.config -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.local", -+ cluster_status_plaintext=self.local_status, -+ communication_list=[ -+ [dict( -+ label=self.local_node_name_list[0], -+ was_connected=False, -+ )], -+ [dict( -+ label=self.local_node_name_list[1], -+ response_code=401, -+ )], -+ [dict( -+ label=self.local_node_name_list[2], -+ response_code=500, -+ )], -+ [dict( -+ label=self.local_node_name_list[3], -+ response_code=404, -+ )], -+ [dict( -+ label=self.local_node_name_list[4], -+ output="invalid data", -+ )], -+ [dict( -+ label=self.local_node_name_list[5], -+ output=json.dumps(dict(status="success")) -+ )], -+ [dict( -+ label=self.local_node_name_list[6], -+ )], -+ ] -+ ) -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.remote", -+ node_labels=self.remote_node_name_list[:1], -+ cluster_status_plaintext=self.remote_status, -+ ) -+ ) -+ result = dr.status_all_sites_plaintext(self.env_assist.get_env()) -+ self.assertEqual(result, self._fixture_result()) -+ self.env_assist.assert_reports([ -+ fixture.warn( -+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, -+ command="remote/cluster_status_plaintext", -+ node="node1", -+ reason=None, -+ ), -+ fixture.warn( -+ report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED, -+ command="remote/cluster_status_plaintext", -+ node="node2", -+ reason="HTTP error: 401", -+ ), -+ fixture.warn( -+ report_codes.NODE_COMMUNICATION_ERROR, -+ command="remote/cluster_status_plaintext", -+ node="node3", -+ reason="HTTP error: 500", -+ ), -+ fixture.warn( -+ report_codes.NODE_COMMUNICATION_ERROR_UNSUPPORTED_COMMAND, -+ command="remote/cluster_status_plaintext", -+ node="node4", -+ reason="HTTP error: 404", -+ ), -+ fixture.warn( -+ report_codes.INVALID_RESPONSE_FORMAT, -+ node="node5", -+ ), -+ fixture.warn( -+ report_codes.INVALID_RESPONSE_FORMAT, -+ node="node6", -+ ), -+ ]) -+ -+ def test_local_site_down(self): -+ self._fixture_load_configs() -+ (self.config -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.local", -+ cluster_status_plaintext=self.local_status, -+ communication_list=[ -+ [dict( -+ label=self.local_node_name_list[0], -+ was_connected=False, -+ )], -+ [dict( -+ label=self.local_node_name_list[1], -+ was_connected=False, -+ )], -+ ] -+ ) -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.remote", -+ node_labels=self.remote_node_name_list[:1], -+ cluster_status_plaintext=self.remote_status, -+ ) -+ ) -+ result = dr.status_all_sites_plaintext(self.env_assist.get_env()) -+ self.assertEqual(result, self._fixture_result(local_success=False)) -+ self.env_assist.assert_reports([ -+ fixture.warn( -+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, -+ command="remote/cluster_status_plaintext", -+ node="node1", -+ reason=None, -+ ), -+ fixture.warn( -+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, -+ command="remote/cluster_status_plaintext", -+ node="node2", -+ reason=None, -+ ), -+ ]) -+ -+ def test_remote_site_down(self): -+ self._fixture_load_configs() -+ (self.config -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.local", -+ node_labels=self.local_node_name_list[:1], -+ cluster_status_plaintext=self.local_status, -+ ) -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.remote", -+ cluster_status_plaintext=self.remote_status, -+ communication_list=[ -+ [dict( -+ label=self.remote_node_name_list[0], -+ was_connected=False, -+ )], -+ ] -+ ) -+ ) -+ result = dr.status_all_sites_plaintext(self.env_assist.get_env()) -+ self.assertEqual(result, self._fixture_result(remote_success=False)) -+ self.env_assist.assert_reports([ -+ fixture.warn( -+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, -+ command="remote/cluster_status_plaintext", -+ node="recovery-node", -+ reason=None, -+ ), -+ ]) -+ -+ def test_both_sites_down(self): -+ self._fixture_load_configs() -+ (self.config -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.local", -+ cluster_status_plaintext=self.local_status, -+ communication_list=[ -+ [dict( -+ label=self.local_node_name_list[0], -+ was_connected=False, -+ )], -+ [dict( -+ label=self.local_node_name_list[1], -+ was_connected=False, -+ )], -+ ] -+ ) -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.remote", -+ cluster_status_plaintext=self.remote_status, -+ communication_list=[ -+ [dict( -+ label=self.remote_node_name_list[0], -+ was_connected=False, -+ )], -+ ] -+ ) -+ ) -+ result = dr.status_all_sites_plaintext(self.env_assist.get_env()) -+ self.assertEqual( -+ result, -+ self._fixture_result(local_success=False, remote_success=False) -+ ) -+ self.env_assist.assert_reports([ -+ fixture.warn( -+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, -+ command="remote/cluster_status_plaintext", -+ node="node1", -+ reason=None, -+ ), -+ fixture.warn( -+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, -+ command="remote/cluster_status_plaintext", -+ node="node2", -+ reason=None, -+ ), -+ fixture.warn( -+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, -+ command="remote/cluster_status_plaintext", -+ node="recovery-node", -+ reason=None, -+ ), -+ ]) -+ -+ -+class FatalConfigIssue(TestCase): -+ def setUp(self): -+ self.env_assist, self.config = get_env_tools(self) -+ -+ def test_config_missing(self): -+ (self.config -+ .raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ exists=False, -+ ) -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.status_all_sites_plaintext(self.env_assist.get_env()), -+ ) -+ self.env_assist.assert_reports([ -+ fixture.error( -+ report_codes.DR_CONFIG_DOES_NOT_EXIST, -+ ), -+ ]) -+ -+ def test_config_read_error(self): -+ (self.config -+ .raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ ) -+ .raw_file.read( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ exception_msg=REASON, -+ ) -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.status_all_sites_plaintext(self.env_assist.get_env()), -+ ) -+ self.env_assist.assert_reports([ -+ fixture.error( -+ report_codes.FILE_IO_ERROR, -+ file_type_code=file_type_codes.PCS_DR_CONFIG, -+ file_path=settings.pcsd_dr_config_location, -+ operation=RawFileError.ACTION_READ, -+ reason=REASON, -+ ), -+ ]) -+ -+ def test_config_parse_error(self): -+ (self.config -+ .raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ ) -+ .raw_file.read( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ content="bad content", -+ ) -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.status_all_sites_plaintext(self.env_assist.get_env()), -+ ) -+ self.env_assist.assert_reports([ -+ fixture.error( -+ report_codes.PARSE_ERROR_JSON_FILE, -+ file_type_code=file_type_codes.PCS_DR_CONFIG, -+ file_path=settings.pcsd_dr_config_location, -+ line_number=1, -+ column_number=1, -+ position=0, -+ reason="Expecting value", -+ full_msg="Expecting value: line 1 column 1 (char 0)", -+ ), -+ ]) -+ -+ def test_corosync_conf_read_error(self): -+ (self.config -+ .raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ ) -+ .raw_file.read( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ content="{}", -+ ) -+ .corosync_conf.load_content("", exception_msg=REASON) -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.status_all_sites_plaintext(self.env_assist.get_env()), -+ [ -+ fixture.error( -+ report_codes.UNABLE_TO_READ_COROSYNC_CONFIG, -+ path=settings.corosync_conf_file, -+ reason=REASON, -+ ), -+ ], -+ expected_in_processor=False -+ ) -+ -+ def test_corosync_conf_parse_error(self): -+ (self.config -+ .raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ ) -+ .raw_file.read( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ content="{}", -+ ) -+ .corosync_conf.load_content("wrong {\n corosync") -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.status_all_sites_plaintext(self.env_assist.get_env()), -+ [ -+ fixture.error( -+ report_codes -+ .PARSE_ERROR_COROSYNC_CONF_LINE_IS_NOT_SECTION_NOR_KEY_VALUE -+ ), -+ ], -+ expected_in_processor=False -+ ) -diff --git a/pcs_test/tier0/lib/communication/test_status.py b/pcs_test/tier0/lib/communication/test_status.py -new file mode 100644 -index 00000000..b8db7a73 ---- /dev/null -+++ b/pcs_test/tier0/lib/communication/test_status.py -@@ -0,0 +1,7 @@ -+from unittest import TestCase -+ -+class GetFullClusterStatusPlaintext(TestCase): -+ """ -+ tested in: -+ pcs_test.tier0.lib.commands.dr.test_status -+ """ -diff --git a/pcs_test/tier0/lib/dr/__init__.py b/pcs_test/tier0/lib/dr/__init__.py -new file mode 100644 -index 00000000..e69de29b -diff --git a/pcs_test/tier0/lib/dr/test_facade.py b/pcs_test/tier0/lib/dr/test_facade.py -new file mode 100644 -index 00000000..baa17b1e ---- /dev/null -+++ b/pcs_test/tier0/lib/dr/test_facade.py -@@ -0,0 +1,138 @@ -+from unittest import TestCase -+ -+from pcs.common.dr import DrRole -+from pcs.lib.dr.config import facade -+ -+ -+class Facade(TestCase): -+ def test_create(self): -+ for role in DrRole: -+ with self.subTest(local_role=role.value): -+ self.assertEqual( -+ dict( -+ local=dict( -+ role=role.value, -+ ), -+ remote_sites=[], -+ ), -+ facade.Facade.create(role).config, -+ ) -+ -+ def test_local_role(self): -+ for role in DrRole: -+ with self.subTest(local_role=role.value): -+ cfg = facade.Facade({ -+ "local": { -+ "role": role.value, -+ }, -+ "remote_sites": [ -+ ], -+ }) -+ self.assertEqual(cfg.local_role, role) -+ -+ def test_add_site(self): -+ node_list = [f"node{i}" for i in range(4)] -+ cfg = facade.Facade.create(DrRole.PRIMARY) -+ cfg.add_site(DrRole.RECOVERY, node_list) -+ self.assertEqual( -+ dict( -+ local=dict( -+ role=DrRole.PRIMARY.value, -+ ), -+ remote_sites=[ -+ dict( -+ role=DrRole.RECOVERY.value, -+ nodes=[dict(name=node) for node in node_list], -+ ), -+ ] -+ ), -+ cfg.config -+ ) -+ -+class GetRemoteSiteList(TestCase): -+ def test_no_sites(self): -+ cfg = facade.Facade({ -+ "local": { -+ "role": DrRole.PRIMARY.value, -+ }, -+ "remote_sites": [ -+ ], -+ }) -+ self.assertEqual( -+ cfg.get_remote_site_list(), -+ [] -+ ) -+ -+ def test_one_site(self): -+ cfg = facade.Facade({ -+ "local": { -+ "role": DrRole.PRIMARY.value, -+ }, -+ "remote_sites": [ -+ { -+ "role": DrRole.RECOVERY.value, -+ "nodes": [ -+ {"name": "node1"}, -+ ], -+ }, -+ ], -+ }) -+ self.assertEqual( -+ cfg.get_remote_site_list(), -+ [ -+ facade.DrSite(role=DrRole.RECOVERY, node_name_list=["node1"]), -+ ] -+ ) -+ -+ def test_more_sites(self): -+ cfg = facade.Facade({ -+ "local": { -+ "role": DrRole.RECOVERY.value, -+ }, -+ "remote_sites": [ -+ { -+ "role": DrRole.PRIMARY.value, -+ "nodes": [ -+ {"name": "nodeA1"}, -+ {"name": "nodeA2"}, -+ ], -+ }, -+ { -+ "role": DrRole.RECOVERY.value, -+ "nodes": [ -+ {"name": "nodeB1"}, -+ {"name": "nodeB2"}, -+ ], -+ }, -+ ], -+ }) -+ self.assertEqual( -+ cfg.get_remote_site_list(), -+ [ -+ facade.DrSite( -+ role=DrRole.PRIMARY, node_name_list=["nodeA1", "nodeA2"] -+ ), -+ facade.DrSite( -+ role=DrRole.RECOVERY, node_name_list=["nodeB1", "nodeB2"] -+ ), -+ ] -+ ) -+ -+ def test_no_nodes(self): -+ cfg = facade.Facade({ -+ "local": { -+ "role": DrRole.PRIMARY.value, -+ }, -+ "remote_sites": [ -+ { -+ "role": DrRole.RECOVERY.value, -+ "nodes": [], -+ }, -+ ], -+ }) -+ self.assertEqual( -+ cfg.get_remote_site_list(), -+ [ -+ facade.DrSite(role=DrRole.RECOVERY, node_name_list=[]), -+ ] -+ ) -diff --git a/pcs_test/tier0/lib/test_env.py b/pcs_test/tier0/lib/test_env.py -index edab9dc6..5c1c6a39 100644 ---- a/pcs_test/tier0/lib/test_env.py -+++ b/pcs_test/tier0/lib/test_env.py -@@ -9,7 +9,7 @@ from pcs_test.tools.misc import ( - get_test_resource as rc, - ) - --from pcs.common import report_codes -+from pcs.common import file_type_codes, report_codes - from pcs.lib.env import LibraryEnvironment - from pcs.lib.errors import ReportItemSeverity as severity - -@@ -57,6 +57,46 @@ class LibraryEnvironmentTest(TestCase): - env = LibraryEnvironment(self.mock_logger, self.mock_reporter) - self.assertEqual([], env.user_groups) - -+class GhostFileCodes(TestCase): -+ def setUp(self): -+ self.mock_logger = mock.MagicMock(logging.Logger) -+ self.mock_reporter = MockLibraryReportProcessor() -+ -+ def _fixture_get_env(self, cib_data=None, corosync_conf_data=None): -+ return LibraryEnvironment( -+ self.mock_logger, -+ self.mock_reporter, -+ cib_data=cib_data, -+ corosync_conf_data=corosync_conf_data -+ ) -+ -+ def test_nothing(self): -+ self.assertEqual( -+ self._fixture_get_env().ghost_file_codes, -+ set() -+ ) -+ -+ def test_corosync(self): -+ self.assertEqual( -+ self._fixture_get_env(corosync_conf_data="x").ghost_file_codes, -+ set([file_type_codes.COROSYNC_CONF]) -+ ) -+ -+ def test_cib(self): -+ self.assertEqual( -+ self._fixture_get_env(cib_data="x").ghost_file_codes, -+ set([file_type_codes.CIB]) -+ ) -+ -+ def test_all(self): -+ self.assertEqual( -+ self._fixture_get_env( -+ cib_data="x", -+ corosync_conf_data="x", -+ ).ghost_file_codes, -+ set([file_type_codes.COROSYNC_CONF, file_type_codes.CIB]) -+ ) -+ - @patch_env("CommandRunner") - class CmdRunner(TestCase): - def setUp(self): -diff --git a/pcs_test/tools/command_env/config_corosync_conf.py b/pcs_test/tools/command_env/config_corosync_conf.py -index 3db57cee..a0bd9f33 100644 ---- a/pcs_test/tools/command_env/config_corosync_conf.py -+++ b/pcs_test/tools/command_env/config_corosync_conf.py -@@ -9,9 +9,14 @@ class CorosyncConf: - self.__calls = call_collection - - def load_content( -- self, content, name="corosync_conf.load_content", instead=None -+ self, content, name="corosync_conf.load_content", instead=None, -+ exception_msg=None - ): -- self.__calls.place(name, Call(content), instead=instead) -+ self.__calls.place( -+ name, -+ Call(content, exception_msg=exception_msg), -+ instead=instead -+ ) - - def load( - self, node_name_list=None, name="corosync_conf.load", -diff --git a/pcs_test/tools/command_env/config_http.py b/pcs_test/tools/command_env/config_http.py -index 6827c2b1..911a82df 100644 ---- a/pcs_test/tools/command_env/config_http.py -+++ b/pcs_test/tools/command_env/config_http.py -@@ -7,6 +7,7 @@ from pcs_test.tools.command_env.config_http_files import FilesShortcuts - from pcs_test.tools.command_env.config_http_host import HostShortcuts - from pcs_test.tools.command_env.config_http_pcmk import PcmkShortcuts - from pcs_test.tools.command_env.config_http_sbd import SbdShortcuts -+from pcs_test.tools.command_env.config_http_status import StatusShortcuts - from pcs_test.tools.command_env.mock_node_communicator import( - place_communication, - place_requests, -@@ -34,6 +35,7 @@ def _mutual_exclusive(param_names, **kwargs): - - - class HttpConfig: -+ # pylint: disable=too-many-instance-attributes - def __init__(self, call_collection, wrap_helper): - self.__calls = call_collection - -@@ -43,6 +45,7 @@ class HttpConfig: - self.host = wrap_helper(HostShortcuts(self.__calls)) - self.pcmk = wrap_helper(PcmkShortcuts(self.__calls)) - self.sbd = wrap_helper(SbdShortcuts(self.__calls)) -+ self.status = wrap_helper(StatusShortcuts(self.__calls)) - - def add_communication(self, name, communication_list, **kwargs): - """ -diff --git a/pcs_test/tools/command_env/config_http_corosync.py b/pcs_test/tools/command_env/config_http_corosync.py -index f7df73c1..3d89e649 100644 ---- a/pcs_test/tools/command_env/config_http_corosync.py -+++ b/pcs_test/tools/command_env/config_http_corosync.py -@@ -29,6 +29,30 @@ class CorosyncShortcuts: - output='{"corosync":false}' - ) - -+ def get_corosync_conf( -+ self, -+ corosync_conf="", -+ node_labels=None, -+ communication_list=None, -+ name="http.corosync.get_corosync_conf", -+ ): -+ """ -+ Create a call for loading corosync.conf text from remote nodes -+ -+ string corosync_conf -- corosync.conf text to be loaded -+ list node_labels -- create success responses from these nodes -+ list communication_list -- create custom responses -+ string name -- the key of this call -+ """ -+ place_multinode_call( -+ self.__calls, -+ name, -+ node_labels, -+ communication_list, -+ action="remote/get_corosync_conf", -+ output=corosync_conf, -+ ) -+ - def set_corosync_conf( - self, corosync_conf, node_labels=None, communication_list=None, - name="http.corosync.set_corosync_conf" -diff --git a/pcs_test/tools/command_env/config_http_files.py b/pcs_test/tools/command_env/config_http_files.py -index 8cc9b878..b4e93d64 100644 ---- a/pcs_test/tools/command_env/config_http_files.py -+++ b/pcs_test/tools/command_env/config_http_files.py -@@ -11,9 +11,11 @@ class FilesShortcuts: - - def put_files( - self, node_labels=None, pcmk_authkey=None, corosync_authkey=None, -- corosync_conf=None, pcs_settings_conf=None, communication_list=None, -+ corosync_conf=None, pcs_disaster_recovery_conf=None, -+ pcs_settings_conf=None, communication_list=None, - name="http.files.put_files", - ): -+ # pylint: disable=too-many-arguments - """ - Create a call for the files distribution to the nodes. - -@@ -21,6 +23,7 @@ class FilesShortcuts: - pcmk_authkey bytes -- content of pacemaker authkey file - corosync_authkey bytes -- content of corosync authkey file - corosync_conf string -- content of corosync.conf -+ pcs_disaster_recovery_conf string -- content of pcs DR config - pcs_settings_conf string -- content of pcs_settings.conf - communication_list list -- create custom responses - name string -- the key of this call -@@ -58,6 +61,17 @@ class FilesShortcuts: - ) - output_data[file_id] = written_output_dict - -+ if pcs_disaster_recovery_conf: -+ file_id = "disaster-recovery config" -+ input_data[file_id] = dict( -+ data=base64.b64encode( -+ pcs_disaster_recovery_conf -+ ).decode("utf-8"), -+ type="pcs_disaster_recovery_conf", -+ rewrite_existing=True, -+ ) -+ output_data[file_id] = written_output_dict -+ - if pcs_settings_conf: - file_id = "pcs_settings.conf" - input_data[file_id] = dict( -@@ -78,7 +92,8 @@ class FilesShortcuts: - ) - - def remove_files( -- self, node_labels=None, pcsd_settings=False, communication_list=None, -+ self, node_labels=None, pcsd_settings=False, -+ pcs_disaster_recovery_conf=False, communication_list=None, - name="http.files.remove_files" - ): - """ -@@ -86,6 +101,7 @@ class FilesShortcuts: - - node_labels list -- create success responses from these nodes - pcsd_settings bool -- if True, remove file pcsd_settings -+ pcs_disaster_recovery_conf bool -- if True, remove pcs DR config - communication_list list -- create custom responses - name string -- the key of this call - """ -@@ -100,6 +116,14 @@ class FilesShortcuts: - message="", - ) - -+ if pcs_disaster_recovery_conf: -+ file_id = "pcs disaster-recovery config" -+ input_data[file_id] = dict(type="pcs_disaster_recovery_conf") -+ output_data[file_id] = dict( -+ code="deleted", -+ message="", -+ ) -+ - place_multinode_call( - self.__calls, - name, -diff --git a/pcs_test/tools/command_env/config_http_status.py b/pcs_test/tools/command_env/config_http_status.py -new file mode 100644 -index 00000000..888b27bb ---- /dev/null -+++ b/pcs_test/tools/command_env/config_http_status.py -@@ -0,0 +1,52 @@ -+import json -+ -+from pcs_test.tools.command_env.mock_node_communicator import ( -+ place_multinode_call, -+) -+ -+class StatusShortcuts: -+ def __init__(self, calls): -+ self.__calls = calls -+ -+ def get_full_cluster_status_plaintext( -+ self, node_labels=None, communication_list=None, -+ name="http.status.get_full_cluster_status_plaintext", -+ hide_inactive_resources=False, verbose=False, -+ cmd_status="success", cmd_status_msg="", report_list=None, -+ cluster_status_plaintext="", -+ ): -+ # pylint: disable=too-many-arguments -+ """ -+ Create a call for getting cluster status in plaintext -+ -+ node_labels list -- create success responses from these nodes -+ communication_list list -- create custom responses -+ name string -- the key of this call -+ bool hide_inactive_resources -- input flag -+ bool verbose -- input flag -+ string cmd_status -- did the command succeed? -+ string_cmd_status_msg -- details for cmd_status -+ iterable report_list -- reports from a remote node -+ string cluster_status_plaintext -- resulting cluster status -+ """ -+ report_list = report_list or [] -+ place_multinode_call( -+ self.__calls, -+ name, -+ node_labels, -+ communication_list, -+ action="remote/cluster_status_plaintext", -+ param_list=[( -+ "data_json", -+ json.dumps(dict( -+ hide_inactive_resources=hide_inactive_resources, -+ verbose=verbose, -+ )) -+ )], -+ output=json.dumps(dict( -+ status=cmd_status, -+ status_msg=cmd_status_msg, -+ data=cluster_status_plaintext, -+ report_list=report_list, -+ )), -+ ) -diff --git a/pcs_test/tools/command_env/mock_get_local_corosync_conf.py b/pcs_test/tools/command_env/mock_get_local_corosync_conf.py -index 854cb8f0..01eca5f1 100644 ---- a/pcs_test/tools/command_env/mock_get_local_corosync_conf.py -+++ b/pcs_test/tools/command_env/mock_get_local_corosync_conf.py -@@ -1,10 +1,15 @@ -+from pcs import settings -+from pcs.lib import reports -+from pcs.lib.errors import LibraryError -+ - CALL_TYPE_GET_LOCAL_COROSYNC_CONF = "CALL_TYPE_GET_LOCAL_COROSYNC_CONF" - - class Call: - type = CALL_TYPE_GET_LOCAL_COROSYNC_CONF - -- def __init__(self, content): -+ def __init__(self, content, exception_msg=None): - self.content = content -+ self.exception_msg = exception_msg - - def __repr__(self): - return str("") -@@ -13,5 +18,10 @@ class Call: - def get_get_local_corosync_conf(call_queue): - def get_local_corosync_conf(): - _, expected_call = call_queue.take(CALL_TYPE_GET_LOCAL_COROSYNC_CONF) -+ if expected_call.exception_msg: -+ raise LibraryError(reports.corosync_config_read_error( -+ settings.corosync_conf_file, -+ expected_call.exception_msg, -+ )) - return expected_call.content - return get_local_corosync_conf -diff --git a/pcsd/capabilities.xml b/pcsd/capabilities.xml -index f9a76a22..1adb57ce 100644 ---- a/pcsd/capabilities.xml -+++ b/pcsd/capabilities.xml -@@ -1696,6 +1696,18 @@ - - - -+ -+ -+ Configure disaster-recovery with the local cluster as the primary site -+ and one recovery site. Display local disaster-recovery config. Display -+ status of all sites. Remove disaster-recovery config. -+ -+ pcs commands: dr config, dr destroy, dr set-recovery-site, dr status -+ -+ -+ -+ -+ - - - Describe a resource agent - present its metadata. -diff --git a/pcsd/pcsd_file.rb b/pcsd/pcsd_file.rb -index 486b764d..d82b55d2 100644 ---- a/pcsd/pcsd_file.rb -+++ b/pcsd/pcsd_file.rb -@@ -198,6 +198,20 @@ module PcsdFile - end - end - -+ class PutPcsDrConf < PutFile -+ def full_file_name -+ @full_file_name ||= PCSD_DR_CONFIG_LOCATION -+ end -+ -+ def binary?() -+ return true -+ end -+ -+ def permissions() -+ return 0600 -+ end -+ end -+ - TYPES = { - "booth_authfile" => PutFileBoothAuthfile, - "booth_config" => PutFileBoothConfig, -@@ -205,6 +219,7 @@ module PcsdFile - "corosync_authkey" => PutFileCorosyncAuthkey, - "corosync_conf" => PutFileCorosyncConf, - "pcs_settings_conf" => PutPcsSettingsConf, -+ "pcs_disaster_recovery_conf" => PutPcsDrConf, - } - end - -diff --git a/pcsd/pcsd_remove_file.rb b/pcsd/pcsd_remove_file.rb -index 1038402d..ffaed8e3 100644 ---- a/pcsd/pcsd_remove_file.rb -+++ b/pcsd/pcsd_remove_file.rb -@@ -41,8 +41,15 @@ module PcsdRemoveFile - end - end - -+ class RemovePcsDrConf < RemoveFile -+ def full_file_name -+ @full_file_name ||= PCSD_DR_CONFIG_LOCATION -+ end -+ end -+ - TYPES = { - "pcmk_remote_authkey" => RemovePcmkRemoteAuthkey, - "pcsd_settings" => RemovePcsdSettings, -+ "pcs_disaster_recovery_conf" => RemovePcsDrConf, - } - end -diff --git a/pcsd/remote.rb b/pcsd/remote.rb -index 6f454681..28b91382 100644 ---- a/pcsd/remote.rb -+++ b/pcsd/remote.rb -@@ -27,6 +27,7 @@ def remote(params, request, auth_user) - :status => method(:node_status), - :status_all => method(:status_all), - :cluster_status => method(:cluster_status_remote), -+ :cluster_status_plaintext => method(:cluster_status_plaintext), - :auth => method(:auth), - :check_auth => method(:check_auth), - :cluster_setup => method(:cluster_setup), -@@ -219,6 +220,18 @@ def cluster_status_remote(params, request, auth_user) - return JSON.generate(status) - end - -+# get cluster status in plaintext (over-the-network version of 'pcs status') -+def cluster_status_plaintext(params, request, auth_user) -+ if not allowed_for_local_cluster(auth_user, Permissions::READ) -+ return 403, 'Permission denied' -+ end -+ return pcs_internal_proxy( -+ auth_user, -+ params.fetch(:data_json, ""), -+ "status.full_cluster_status_plaintext" -+ ) -+end -+ - def cluster_start(params, request, auth_user) - if params[:name] - code, response = send_request_with_token( -@@ -444,7 +457,11 @@ def get_corosync_conf_remote(params, request, auth_user) - if not allowed_for_local_cluster(auth_user, Permissions::READ) - return 403, 'Permission denied' - end -- return get_corosync_conf() -+ begin -+ return get_corosync_conf() -+ rescue -+ return 400, 'Unable to read corosync.conf' -+ end - end - - # deprecated, use /remote/put_file (note that put_file doesn't support backup -diff --git a/pcsd/settings.rb b/pcsd/settings.rb -index a6fd0a26..e8dc0c96 100644 ---- a/pcsd/settings.rb -+++ b/pcsd/settings.rb -@@ -9,6 +9,7 @@ KEY_FILE = PCSD_VAR_LOCATION + 'pcsd.key' - KNOWN_HOSTS_FILE_NAME = 'known-hosts' - PCSD_SETTINGS_CONF_LOCATION = PCSD_VAR_LOCATION + "pcs_settings.conf" - PCSD_USERS_CONF_LOCATION = PCSD_VAR_LOCATION + "pcs_users.conf" -+PCSD_DR_CONFIG_LOCATION = PCSD_VAR_LOCATION + "disaster-recovery" - - CRM_MON = "/usr/sbin/crm_mon" - CRM_NODE = "/usr/sbin/crm_node" -diff --git a/pcsd/settings.rb.debian b/pcsd/settings.rb.debian -index 5d830af9..daaae37b 100644 ---- a/pcsd/settings.rb.debian -+++ b/pcsd/settings.rb.debian -@@ -9,6 +9,7 @@ KEY_FILE = PCSD_VAR_LOCATION + 'pcsd.key' - KNOWN_HOSTS_FILE_NAME = 'known-hosts' - PCSD_SETTINGS_CONF_LOCATION = PCSD_VAR_LOCATION + "pcs_settings.conf" - PCSD_USERS_CONF_LOCATION = PCSD_VAR_LOCATION + "pcs_users.conf" -+PCSD_DR_CONFIG_LOCATION = PCSD_VAR_LOCATION + "disaster-recovery" - - CRM_MON = "/usr/sbin/crm_mon" - CRM_NODE = "/usr/sbin/crm_node" -diff --git a/pylintrc b/pylintrc -index 5fc4c200..9255a804 100644 ---- a/pylintrc -+++ b/pylintrc -@@ -19,7 +19,7 @@ max-parents=10 - min-public-methods=0 - - [BASIC] --good-names=e, i, op, ip, el, maxDiff, cm, ok, T -+good-names=e, i, op, ip, el, maxDiff, cm, ok, T, dr - - [VARIABLES] - # A regular expression matching the name of dummy variables (i.e. expectedly --- -2.21.0 - diff --git a/SOURCES/bz1743731-01-fix-error-msg-when-cluster-is-not-set-up.patch b/SOURCES/bz1743731-01-fix-error-msg-when-cluster-is-not-set-up.patch deleted file mode 100644 index 06f551e..0000000 --- a/SOURCES/bz1743731-01-fix-error-msg-when-cluster-is-not-set-up.patch +++ /dev/null @@ -1,130 +0,0 @@ -From 8058591d0d79942bf6c61f105a180592bac7cf69 Mon Sep 17 00:00:00 2001 -From: Ondrej Mular -Date: Thu, 28 Nov 2019 16:57:24 +0100 -Subject: [PATCH 2/3] fix error msg when cluster is not set up - ---- - CHANGELOG.md | 4 +++ - pcs/cluster.py | 3 +++ - pcs/lib/commands/qdevice.py | 2 ++ - pcs_test/tier0/lib/commands/test_qdevice.py | 27 +++++++++++++++++++-- - 4 files changed, 34 insertions(+), 2 deletions(-) - -diff --git a/CHANGELOG.md b/CHANGELOG.md -index 889436c3..5a7ec377 100644 ---- a/CHANGELOG.md -+++ b/CHANGELOG.md -@@ -6,7 +6,11 @@ - - It is possible to configure a disaster-recovery site and display its status - ([rhbz#1676431]) - -+### Fixed -+- Error messages in cases when cluster is not set up ([rhbz#1743731]) -+ - [rhbz#1676431]: https://bugzilla.redhat.com/show_bug.cgi?id=1676431 -+[rhbz#1743731]: https://bugzilla.redhat.com/show_bug.cgi?id=1743731 - - - ## [0.10.4] - 2019-11-28 -diff --git a/pcs/cluster.py b/pcs/cluster.py -index 9473675f..0e9b3365 100644 ---- a/pcs/cluster.py -+++ b/pcs/cluster.py -@@ -190,6 +190,9 @@ def start_cluster(argv): - wait_for_nodes_started(nodes, wait_timeout) - return - -+ if not utils.hasCorosyncConf(): -+ utils.err("cluster is not currently configured on this node") -+ - print("Starting Cluster...") - service_list = ["corosync"] - if utils.need_to_handle_qdevice_service(): -diff --git a/pcs/lib/commands/qdevice.py b/pcs/lib/commands/qdevice.py -index 3d7af234..41f7c296 100644 ---- a/pcs/lib/commands/qdevice.py -+++ b/pcs/lib/commands/qdevice.py -@@ -81,6 +81,8 @@ def qdevice_start(lib_env, model): - start qdevice now on local host - """ - _check_model(model) -+ if not qdevice_net.qdevice_initialized(): -+ raise LibraryError(reports.qdevice_not_initialized(model)) - _service_start(lib_env, qdevice_net.qdevice_start) - - def qdevice_stop(lib_env, model, proceed_if_used=False): -diff --git a/pcs_test/tier0/lib/commands/test_qdevice.py b/pcs_test/tier0/lib/commands/test_qdevice.py -index b2c83ca4..af23db61 100644 ---- a/pcs_test/tier0/lib/commands/test_qdevice.py -+++ b/pcs_test/tier0/lib/commands/test_qdevice.py -@@ -689,6 +689,7 @@ class QdeviceNetDisableTest(QdeviceTestCase): - ) - - -+@mock.patch("pcs.lib.corosync.qdevice_net.qdevice_initialized") - @mock.patch("pcs.lib.external.start_service") - @mock.patch.object( - LibraryEnvironment, -@@ -696,9 +697,11 @@ class QdeviceNetDisableTest(QdeviceTestCase): - lambda self: "mock_runner" - ) - class QdeviceNetStartTest(QdeviceTestCase): -- def test_success(self, mock_net_start): -+ def test_success(self, mock_net_start, mock_qdevice_initialized): -+ mock_qdevice_initialized.return_value = True - lib.qdevice_start(self.lib_env, "net") - mock_net_start.assert_called_once_with("mock_runner", "corosync-qnetd") -+ mock_qdevice_initialized.assert_called_once_with() - assert_report_item_list_equal( - self.mock_reporter.report_item_list, - [ -@@ -719,11 +722,12 @@ class QdeviceNetStartTest(QdeviceTestCase): - ] - ) - -- def test_failed(self, mock_net_start): -+ def test_failed(self, mock_net_start, mock_qdevice_initialized): - mock_net_start.side_effect = StartServiceError( - "test service", - "test error" - ) -+ mock_qdevice_initialized.return_value = True - - assert_raise_library_error( - lambda: lib.qdevice_start(self.lib_env, "net"), -@@ -737,6 +741,7 @@ class QdeviceNetStartTest(QdeviceTestCase): - ) - ) - mock_net_start.assert_called_once_with("mock_runner", "corosync-qnetd") -+ mock_qdevice_initialized.assert_called_once_with() - assert_report_item_list_equal( - self.mock_reporter.report_item_list, - [ -@@ -750,6 +755,24 @@ class QdeviceNetStartTest(QdeviceTestCase): - ] - ) - -+ def test_qdevice_not_initialized( -+ self, mock_net_start, mock_qdevice_initialized -+ ): -+ mock_qdevice_initialized.return_value = False -+ -+ assert_raise_library_error( -+ lambda: lib.qdevice_start(self.lib_env, "net"), -+ ( -+ severity.ERROR, -+ report_codes.QDEVICE_NOT_INITIALIZED, -+ { -+ "model": "net", -+ } -+ ) -+ ) -+ mock_net_start.assert_not_called() -+ mock_qdevice_initialized.assert_called_once_with() -+ - - @mock.patch("pcs.lib.corosync.qdevice_net.qdevice_status_cluster_text") - @mock.patch("pcs.lib.external.stop_service") --- -2.21.0 - diff --git a/SOURCES/bz1750427-01-link-to-sbd-man-page-from-sbd-enable-doc.patch b/SOURCES/bz1750427-01-link-to-sbd-man-page-from-sbd-enable-doc.patch deleted file mode 100644 index bfae069..0000000 --- a/SOURCES/bz1750427-01-link-to-sbd-man-page-from-sbd-enable-doc.patch +++ /dev/null @@ -1,40 +0,0 @@ -From e4ab588efe0f4cc6b5fcf0853293c93bd4f31604 Mon Sep 17 00:00:00 2001 -From: Ondrej Mular -Date: Wed, 29 Jan 2020 13:13:45 +0100 -Subject: [PATCH 4/7] link to sbd man page from `sbd enable` doc - ---- - pcs/pcs.8 | 2 +- - pcs/usage.py | 3 ++- - 2 files changed, 3 insertions(+), 2 deletions(-) - -diff --git a/pcs/pcs.8 b/pcs/pcs.8 -index 651fda83..ff2ba0b0 100644 ---- a/pcs/pcs.8 -+++ b/pcs/pcs.8 -@@ -531,7 +531,7 @@ history update - Update fence history from all nodes. - .TP - sbd enable [watchdog=[@]]... [device=[@]]... [=]... [\fB\-\-no\-watchdog\-validation\fR] --Enable SBD in cluster. Default path for watchdog device is /dev/watchdog. Allowed SBD options: SBD_WATCHDOG_TIMEOUT (default: 5), SBD_DELAY_START (default: no), SBD_STARTMODE (default: always) and SBD_TIMEOUT_ACTION. It is possible to specify up to 3 devices per node. If \fB\-\-no\-watchdog\-validation\fR is specified, validation of watchdogs will be skipped. -+Enable SBD in cluster. Default path for watchdog device is /dev/watchdog. Allowed SBD options: SBD_WATCHDOG_TIMEOUT (default: 5), SBD_DELAY_START (default: no), SBD_STARTMODE (default: always) and SBD_TIMEOUT_ACTION. SBD options are documented in sbd(8) man page. It is possible to specify up to 3 devices per node. If \fB\-\-no\-watchdog\-validation\fR is specified, validation of watchdogs will be skipped. - - .B WARNING: Cluster has to be restarted in order to apply these changes. - -diff --git a/pcs/usage.py b/pcs/usage.py -index e4f5af32..30c63964 100644 ---- a/pcs/usage.py -+++ b/pcs/usage.py -@@ -1147,7 +1147,8 @@ Commands: - Enable SBD in cluster. Default path for watchdog device is - /dev/watchdog. Allowed SBD options: SBD_WATCHDOG_TIMEOUT (default: 5), - SBD_DELAY_START (default: no), SBD_STARTMODE (default: always) and -- SBD_TIMEOUT_ACTION. It is possible to specify up to 3 devices per node. -+ SBD_TIMEOUT_ACTION. SBD options are documented in sbd(8) man page. It -+ is possible to specify up to 3 devices per node. - If --no-watchdog-validation is specified, validation of watchdogs will - be skipped. - --- -2.21.1 - diff --git a/SOURCES/bz1781303-01-fix-safe-disabling-clones-groups-bundles.patch b/SOURCES/bz1781303-01-fix-safe-disabling-clones-groups-bundles.patch deleted file mode 100644 index 02a5533..0000000 --- a/SOURCES/bz1781303-01-fix-safe-disabling-clones-groups-bundles.patch +++ /dev/null @@ -1,636 +0,0 @@ -From e56f42bf31ae0a52618fe8754fd0b2ae623e6a7a Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek -Date: Thu, 12 Dec 2019 14:46:44 +0100 -Subject: [PATCH 1/7] squash bz1781303 fix safe-disabling clones, groups, - bundles - -fix simulate_cib_error report - -Putting only one CIB in the report is not enough info. Both original and -changed CIB as well as crm_simulate output would be needed. All that -info can be seen in debug messages. So there is no need to put it in the -report. ---- - pcs/cli/common/console_report.py | 7 +- - pcs/lib/cib/resource/common.py | 21 +- - pcs/lib/commands/resource.py | 27 +- - pcs/lib/pacemaker/live.py | 8 +- - pcs/lib/reports.py | 4 +- - .../tier0/cli/common/test_console_report.py | 10 +- - .../tier0/lib/cib/test_resource_common.py | 60 ++++- - .../resource/test_resource_enable_disable.py | 242 +++++++++++++++++- - pcs_test/tier0/lib/pacemaker/test_live.py | 7 - - 9 files changed, 350 insertions(+), 36 deletions(-) - -diff --git a/pcs/cli/common/console_report.py b/pcs/cli/common/console_report.py -index d349c823..60dbb2a0 100644 ---- a/pcs/cli/common/console_report.py -+++ b/pcs/cli/common/console_report.py -@@ -1269,8 +1269,11 @@ CODE_TO_MESSAGE_BUILDER_MAP = { - , - - codes.CIB_SIMULATE_ERROR: lambda info: -- "Unable to simulate changes in CIB: {reason}\n{cib}" -- .format(**info) -+ "Unable to simulate changes in CIB{_reason}" -+ .format( -+ _reason=format_optional(info["reason"], ": {0}"), -+ **info -+ ) - , - - codes.CIB_PUSH_FORCED_FULL_DUE_TO_CRM_FEATURE_SET: lambda info: -diff --git a/pcs/lib/cib/resource/common.py b/pcs/lib/cib/resource/common.py -index f1891003..e30c5e69 100644 ---- a/pcs/lib/cib/resource/common.py -+++ b/pcs/lib/cib/resource/common.py -@@ -1,8 +1,9 @@ - from collections import namedtuple - from typing import ( - cast, -+ List, - Optional, -- Sequence, -+ Set, - ) - from xml.etree.ElementTree import Element - -@@ -114,7 +115,23 @@ def find_primitives(resource_el): - return [resource_el] - return [] - --def get_inner_resources(resource_el: Element) -> Sequence[Element]: -+def get_all_inner_resources(resource_el: Element) -> Set[Element]: -+ """ -+ Return all inner resources (both direct and indirect) of a resource -+ Example: for a clone containing a group, this function will return both -+ the group and the resources inside the group -+ -+ resource_el -- resource element to get its inner resources -+ """ -+ all_inner: Set[Element] = set() -+ to_process = set([resource_el]) -+ while to_process: -+ new_inner = get_inner_resources(to_process.pop()) -+ to_process.update(set(new_inner) - all_inner) -+ all_inner.update(new_inner) -+ return all_inner -+ -+def get_inner_resources(resource_el: Element) -> List[Element]: - """ - Return list of inner resources (direct descendants) of a resource - specified as resource_el. -diff --git a/pcs/lib/commands/resource.py b/pcs/lib/commands/resource.py -index 1b652ea4..4f975c7f 100644 ---- a/pcs/lib/commands/resource.py -+++ b/pcs/lib/commands/resource.py -@@ -802,7 +802,28 @@ def disable_safe(env, resource_ids, strict, wait): - with resource_environment( - env, wait, resource_ids, _ensure_disabled_after_wait(True) - ) as resources_section: -- _disable_validate_and_edit_cib(env, resources_section, resource_ids) -+ id_provider = IdProvider(resources_section) -+ resource_el_list = _find_resources_or_raise( -+ resources_section, -+ resource_ids -+ ) -+ env.report_processor.process_list( -+ _resource_list_enable_disable( -+ resource_el_list, -+ resource.common.disable, -+ id_provider, -+ env.get_cluster_state() -+ ) -+ ) -+ -+ inner_resources_names_set = set() -+ for resource_el in resource_el_list: -+ inner_resources_names_set.update({ -+ inner_resource_el.get("id") -+ for inner_resource_el -+ in resource.common.get_all_inner_resources(resource_el) -+ }) -+ - plaintext_status, transitions, dummy_cib = simulate_cib( - env.cmd_runner(), - get_root(resources_section) -@@ -830,6 +851,10 @@ def disable_safe(env, resource_ids, strict, wait): - exclude=resource_ids - ) - ) -+ -+ # Stopping a clone stops all its inner resources. That should not block -+ # stopping the clone. -+ other_affected = other_affected - inner_resources_names_set - if other_affected: - raise LibraryError( - reports.resource_disable_affects_other_resources( -diff --git a/pcs/lib/pacemaker/live.py b/pcs/lib/pacemaker/live.py -index 83274af0..233f2e2d 100644 ---- a/pcs/lib/pacemaker/live.py -+++ b/pcs/lib/pacemaker/live.py -@@ -271,7 +271,7 @@ def simulate_cib_xml(runner, cib_xml): - transitions_file = write_tmpfile(None) - except OSError as e: - raise LibraryError( -- reports.cib_simulate_error(format_os_error(e), cib_xml) -+ reports.cib_simulate_error(format_os_error(e)) - ) - - cmd = [ -@@ -284,7 +284,7 @@ def simulate_cib_xml(runner, cib_xml): - stdout, stderr, retval = runner.run(cmd, stdin_string=cib_xml) - if retval != 0: - raise LibraryError( -- reports.cib_simulate_error(stderr.strip(), cib_xml) -+ reports.cib_simulate_error(stderr.strip()) - ) - - try: -@@ -297,7 +297,7 @@ def simulate_cib_xml(runner, cib_xml): - return stdout, transitions_xml, new_cib_xml - except OSError as e: - raise LibraryError( -- reports.cib_simulate_error(format_os_error(e), cib_xml) -+ reports.cib_simulate_error(format_os_error(e)) - ) - - def simulate_cib(runner, cib): -@@ -319,7 +319,7 @@ def simulate_cib(runner, cib): - ) - except (etree.XMLSyntaxError, etree.DocumentInvalid) as e: - raise LibraryError( -- reports.cib_simulate_error(str(e), cib_xml) -+ reports.cib_simulate_error(str(e)) - ) - - ### wait for idle -diff --git a/pcs/lib/reports.py b/pcs/lib/reports.py -index 1f081007..c9b4a25d 100644 ---- a/pcs/lib/reports.py -+++ b/pcs/lib/reports.py -@@ -1935,18 +1935,16 @@ def cib_diff_error(reason, cib_old, cib_new): - } - ) - --def cib_simulate_error(reason, cib): -+def cib_simulate_error(reason): - """ - cannot simulate effects a CIB would have on a live cluster - - string reason -- error description -- string cib -- the CIB whose effects were to be simulated - """ - return ReportItem.error( - report_codes.CIB_SIMULATE_ERROR, - info={ - "reason": reason, -- "cib": cib, - } - ) - -diff --git a/pcs_test/tier0/cli/common/test_console_report.py b/pcs_test/tier0/cli/common/test_console_report.py -index 0d0c2457..29e9614d 100644 ---- a/pcs_test/tier0/cli/common/test_console_report.py -+++ b/pcs_test/tier0/cli/common/test_console_report.py -@@ -2238,8 +2238,14 @@ class CibDiffError(NameBuildTest): - class CibSimulateError(NameBuildTest): - def test_success(self): - self.assert_message_from_report( -- "Unable to simulate changes in CIB: error message\n", -- reports.cib_simulate_error("error message", "") -+ "Unable to simulate changes in CIB: error message", -+ reports.cib_simulate_error("error message") -+ ) -+ -+ def test_empty_reason(self): -+ self.assert_message_from_report( -+ "Unable to simulate changes in CIB", -+ reports.cib_simulate_error("") - ) - - -diff --git a/pcs_test/tier0/lib/cib/test_resource_common.py b/pcs_test/tier0/lib/cib/test_resource_common.py -index ebba09da..cd716ba2 100644 ---- a/pcs_test/tier0/lib/cib/test_resource_common.py -+++ b/pcs_test/tier0/lib/cib/test_resource_common.py -@@ -200,10 +200,12 @@ class FindOneOrMoreResources(TestCase): - - - class FindResourcesMixin: -+ _iterable_type = list -+ - def assert_find_resources(self, input_resource_id, output_resource_ids): - self.assertEqual( -- output_resource_ids, -- [ -+ self._iterable_type(output_resource_ids), -+ self._iterable_type([ - element.get("id", "") - for element in - self._tested_fn( -@@ -211,7 +213,7 @@ class FindResourcesMixin: - './/*[@id="{0}"]'.format(input_resource_id) - ) - ) -- ] -+ ]) - ) - - def test_group(self): -@@ -235,6 +237,27 @@ class FindResourcesMixin: - def test_bundle_with_primitive(self): - self.assert_find_resources("H-bundle", ["H"]) - -+ def test_primitive(self): -+ raise NotImplementedError() -+ -+ def test_primitive_in_clone(self): -+ raise NotImplementedError() -+ -+ def test_primitive_in_master(self): -+ raise NotImplementedError() -+ -+ def test_primitive_in_group(self): -+ raise NotImplementedError() -+ -+ def test_primitive_in_bundle(self): -+ raise NotImplementedError() -+ -+ def test_cloned_group(self): -+ raise NotImplementedError() -+ -+ def test_mastered_group(self): -+ raise NotImplementedError() -+ - - class FindPrimitives(TestCase, FindResourcesMixin): - _tested_fn = staticmethod(common.find_primitives) -@@ -266,6 +289,37 @@ class FindPrimitives(TestCase, FindResourcesMixin): - self.assert_find_resources("F-master", ["F1", "F2"]) - - -+class GetAllInnerResources(TestCase, FindResourcesMixin): -+ _iterable_type = set -+ _tested_fn = staticmethod(common.get_all_inner_resources) -+ -+ def test_primitive(self): -+ self.assert_find_resources("A", set()) -+ -+ def test_primitive_in_clone(self): -+ self.assert_find_resources("B", set()) -+ -+ def test_primitive_in_master(self): -+ self.assert_find_resources("C", set()) -+ -+ def test_primitive_in_group(self): -+ self.assert_find_resources("D1", set()) -+ self.assert_find_resources("D2", set()) -+ self.assert_find_resources("E1", set()) -+ self.assert_find_resources("E2", set()) -+ self.assert_find_resources("F1", set()) -+ self.assert_find_resources("F2", set()) -+ -+ def test_primitive_in_bundle(self): -+ self.assert_find_resources("H", set()) -+ -+ def test_cloned_group(self): -+ self.assert_find_resources("E-clone", {"E", "E1", "E2"}) -+ -+ def test_mastered_group(self): -+ self.assert_find_resources("F-master", {"F", "F1", "F2"}) -+ -+ - class GetInnerResources(TestCase, FindResourcesMixin): - _tested_fn = staticmethod(common.get_inner_resources) - -diff --git a/pcs_test/tier0/lib/commands/resource/test_resource_enable_disable.py b/pcs_test/tier0/lib/commands/resource/test_resource_enable_disable.py -index 634f0f33..62899940 100644 ---- a/pcs_test/tier0/lib/commands/resource/test_resource_enable_disable.py -+++ b/pcs_test/tier0/lib/commands/resource/test_resource_enable_disable.py -@@ -1729,12 +1729,6 @@ class DisableSimulate(TestCase): - fixture.error( - report_codes.CIB_SIMULATE_ERROR, - reason="some stderr", -- # curently, there is no way to normalize xml with our lxml -- # version 4.2.3, so this never passes equality tests -- # cib=self.config.calls.get( -- # "runner.pcmk.simulate_cib" -- # ).check_stdin.expected_stdin -- # , - ), - ], - expected_in_processor=False -@@ -1988,12 +1982,6 @@ class DisableSafeMixin(): - fixture.error( - report_codes.CIB_SIMULATE_ERROR, - reason="some stderr", -- # curently, there is no way to normalize xml with our lxml -- # version 4.2.3, so this never passes equality tests -- # cib=self.config.calls.get( -- # "runner.pcmk.simulate_cib" -- # ).check_stdin.expected_stdin -- # , - ), - ], - expected_in_processor=False -@@ -2118,6 +2106,236 @@ class DisableSafeMixin(): - fixture.report_resource_not_running("B"), - ]) - -+ def test_inner_resources(self, mock_write_tmpfile): -+ cib_xml = """ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ """ -+ status_xml = """ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ """ -+ synapses = [] -+ index = 0 -+ for res_name, is_clone in [ -+ ("A", False), -+ ("B", True), -+ ("C", True), -+ ("D1", False), -+ ("D2", False), -+ ("E1", True), -+ ("E2", True), -+ ("F1", True), -+ ("F2", True), -+ ("H", False), -+ ]: -+ if is_clone: -+ synapses.append(f""" -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ """) -+ index += 2 -+ else: -+ synapses.append(f""" -+ -+ -+ -+ -+ -+ -+ -+ """) -+ index += 1 -+ transitions_xml = ( -+ "" + "\n".join(synapses) + "" -+ ) -+ -+ self.tmpfile_transitions.read.return_value = transitions_xml -+ mock_write_tmpfile.side_effect = [ -+ self.tmpfile_new_cib, self.tmpfile_transitions, -+ AssertionError("No other write_tmpfile call expected") -+ ] -+ (self.config -+ .runner.cib.load(resources=cib_xml) -+ .runner.pcmk.load_state(resources=status_xml) -+ ) -+ self.config.runner.pcmk.simulate_cib( -+ self.tmpfile_new_cib.name, -+ self.tmpfile_transitions.name, -+ stdout="simulate output", -+ resources=""" -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ """ -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: resource.disable_safe( -+ self.env_assist.get_env(), -+ ["B-clone", "C-master", "D", "E-clone", "F-master", "H-bundle"], -+ self.strict, -+ False, -+ ), -+ [ -+ fixture.error( -+ report_codes.RESOURCE_DISABLE_AFFECTS_OTHER_RESOURCES, -+ disabled_resource_list=[ -+ "B-clone", "C-master", "D", "E-clone", "F-master", -+ "H-bundle" -+ ], -+ affected_resource_list=["A"], -+ crm_simulate_plaintext_output="simulate output", -+ ), -+ ], -+ expected_in_processor=False -+ ) -+ - @mock.patch("pcs.lib.pacemaker.live.write_tmpfile") - class DisableSafe(DisableSafeMixin, TestCase): - strict = False -diff --git a/pcs_test/tier0/lib/pacemaker/test_live.py b/pcs_test/tier0/lib/pacemaker/test_live.py -index dfebcb17..1ea5454e 100644 ---- a/pcs_test/tier0/lib/pacemaker/test_live.py -+++ b/pcs_test/tier0/lib/pacemaker/test_live.py -@@ -686,7 +686,6 @@ class SimulateCibXml(LibraryPacemakerTest): - fixture.error( - report_codes.CIB_SIMULATE_ERROR, - reason="some error", -- cib="", - ), - ) - mock_runner.run.assert_not_called() -@@ -703,7 +702,6 @@ class SimulateCibXml(LibraryPacemakerTest): - fixture.error( - report_codes.CIB_SIMULATE_ERROR, - reason="some error", -- cib="", - ), - ) - mock_runner.run.assert_not_called() -@@ -729,7 +727,6 @@ class SimulateCibXml(LibraryPacemakerTest): - fixture.error( - report_codes.CIB_SIMULATE_ERROR, - reason="some error", -- cib="", - ), - ) - -@@ -755,7 +752,6 @@ class SimulateCibXml(LibraryPacemakerTest): - fixture.error( - report_codes.CIB_SIMULATE_ERROR, - reason="some error", -- cib="", - ), - ) - -@@ -782,7 +778,6 @@ class SimulateCibXml(LibraryPacemakerTest): - fixture.error( - report_codes.CIB_SIMULATE_ERROR, - reason="some error", -- cib="", - ), - ) - -@@ -819,7 +814,6 @@ class SimulateCib(TestCase): - "Start tag expected, '<' not found, line 1, column 1 " - "(, line 1)" - ), -- cib=self.cib_xml, - ), - ) - -@@ -835,7 +829,6 @@ class SimulateCib(TestCase): - "Start tag expected, '<' not found, line 1, column 1 " - "(, line 1)" - ), -- cib=self.cib_xml, - ), - ) - --- -2.21.1 - diff --git a/SOURCES/bz1783106-01-fix-sinatra-wrapper-performance-issue.patch b/SOURCES/bz1783106-01-fix-sinatra-wrapper-performance-issue.patch deleted file mode 100644 index d79e1a2..0000000 --- a/SOURCES/bz1783106-01-fix-sinatra-wrapper-performance-issue.patch +++ /dev/null @@ -1,1295 +0,0 @@ -From d54c102cee7a61dd3eccd62d60af218aa97a85fc Mon Sep 17 00:00:00 2001 -From: Ivan Devat -Date: Thu, 9 Jan 2020 15:53:37 +0100 -Subject: [PATCH 6/7] squash bz1783106 fix-sinatra-wrapper-performance-issue - -create prototype of tornado - thin communication - -put socket path to settings - -don't mix logs from threads in ruby daemon - -run ruby daemon via systemd units - -support trailing slash by gui urls e.g. /manage/ - -decode body from ruby response for log - -configure ruby wrapper by socket path - -remove env values not used for ruby calls any more - -deal with ruby daemon communication issues - -fix tests - -cleanup ruby server code - -deal with errors from ruby daemon in python daemon - -remove unused cmdline wrapper - -add ruby daemon infrastructure to spec etc. - -stop logging to stderr from ruby daemon - -fix spec file - -* add missing cp for new rubygems -* make sure to start the new ruby daemon on package upgrade -* tests: give the new daemon enough time to start ---- - .gitlab-ci.yml | 7 +- - Makefile | 6 + - pcs.spec.in | 30 ++++- - pcs/daemon/app/sinatra_ui.py | 2 +- - pcs/daemon/env.py | 36 ------ - pcs/daemon/ruby_pcsd.py | 136 +++++++++++----------- - pcs/daemon/run.py | 8 +- - pcs/settings_default.py | 1 + - pcs_test/tier0/daemon/app/fixtures_app.py | 3 +- - pcs_test/tier0/daemon/test_env.py | 66 +---------- - pcs_test/tier0/daemon/test_ruby_pcsd.py | 13 +-- - pcsd/Gemfile | 1 + - pcsd/Gemfile.lock | 7 ++ - pcsd/Makefile | 3 + - pcsd/bootstrap.rb | 20 +++- - pcsd/cfgsync.rb | 6 +- - pcsd/pcs.rb | 9 +- - pcsd/pcsd-cli.rb | 3 +- - pcsd/pcsd-ruby.service | 20 ++++ - pcsd/pcsd.conf | 4 + - pcsd/pcsd.rb | 31 ++--- - pcsd/pcsd.service | 2 + - pcsd/pcsd.service-runner | 24 ++++ - pcsd/remote.rb | 6 +- - pcsd/rserver.rb | 98 ++++++++++++++++ - pcsd/settings.rb | 1 + - pcsd/settings.rb.debian | 1 + - pcsd/sinatra_cmdline_wrapper.rb | 63 ---------- - 28 files changed, 330 insertions(+), 277 deletions(-) - create mode 100644 pcsd/pcsd-ruby.service - create mode 100644 pcsd/pcsd.service-runner - create mode 100644 pcsd/rserver.rb - delete mode 100644 pcsd/sinatra_cmdline_wrapper.rb - -diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml -index 23ab56a9..92b32033 100644 ---- a/.gitlab-ci.yml -+++ b/.gitlab-ci.yml -@@ -116,8 +116,11 @@ python_smoke_tests: - procps-ng - rpms/pcs-ci-*.rpm - " -- - /usr/sbin/pcsd & # start pcsd -- - sleep 10 # wait for pcsd to start up properly -+ - export GEM_HOME=/usr/lib/pcsd/vendor/bundle/ruby -+ - /usr/lib/pcsd/pcsd & # start pcsd (ruby - thin) -+ - sleep 10 # wait for pcsd (ruby - thin) to start up properly -+ - /usr/sbin/pcsd & # start pcsd (python - tornado) -+ - sleep 10 # wait for pcsd (python - tornado) to start up properly - - pcs_test/smoke.sh - artifacts: - paths: -diff --git a/Makefile b/Makefile -index f2b0d9b9..b9f64acd 100644 ---- a/Makefile -+++ b/Makefile -@@ -267,7 +267,12 @@ ifeq ($(IS_DEBIAN)$(IS_SYSTEMCTL),truefalse) - else - install -d ${DEST_SYSTEMD_SYSTEM} - install -m 644 ${SYSTEMD_SERVICE_FILE} ${DEST_SYSTEMD_SYSTEM}/pcsd.service -+ install -m 644 pcsd/pcsd-ruby.service ${DEST_SYSTEMD_SYSTEM}/pcsd-ruby.service - endif -+ # ${DEST_LIB}/pcsd/pcsd holds the selinux context -+ install -m 755 pcsd/pcsd.service-runner ${DEST_LIB}/pcsd/pcsd -+ rm ${DEST_LIB}/pcsd/pcsd.service-runner -+ - install -m 700 -d ${DESTDIR}/var/lib/pcsd - install -m 644 -D pcsd/pcsd.logrotate ${DESTDIR}/etc/logrotate.d/pcsd - install -m644 -D pcsd/pcsd.8 ${DEST_MAN}/pcsd.8 -@@ -293,6 +298,7 @@ ifeq ($(IS_DEBIAN)$(IS_SYSTEMCTL),truefalse) - rm -f ${DEST_INIT}/pcsd - else - rm -f ${DEST_SYSTEMD_SYSTEM}/pcsd.service -+ rm -f ${DEST_SYSTEMD_SYSTEM}/pcsd-ruby.service - rm -f ${DEST_SYSTEMD_SYSTEM}/pcs_snmp_agent.service - endif - rm -f ${DESTDIR}/etc/pam.d/pcsd -diff --git a/pcs.spec.in b/pcs.spec.in -index 5195dc51..32fbf614 100644 ---- a/pcs.spec.in -+++ b/pcs.spec.in -@@ -28,7 +28,9 @@ Summary: Pacemaker Configuration System - %global pyagentx_version 0.4.pcs.2 - %global tornado_version 6.0.3 - %global version_rubygem_backports 3.11.4 -+%global version_rubygem_daemons 1.3.1 - %global version_rubygem_ethon 0.11.0 -+%global version_rubygem_eventmachine 1.2.7 - %global version_rubygem_ffi 1.9.25 - %global version_rubygem_json 2.1.0 - %global version_rubygem_mustermann 1.0.3 -@@ -37,6 +39,7 @@ Summary: Pacemaker Configuration System - %global version_rubygem_rack_protection 2.0.4 - %global version_rubygem_rack_test 1.0.0 - %global version_rubygem_sinatra 2.0.4 -+%global version_rubygem_thin 1.7.2 - %global version_rubygem_tilt 2.0.9 - - # We do not use _libdir macro because upstream is not prepared for it. -@@ -83,6 +86,9 @@ Source89: https://rubygems.org/downloads/rack-protection-%{version_rubygem_rack_ - Source90: https://rubygems.org/downloads/rack-test-%{version_rubygem_rack_test}.gem - Source91: https://rubygems.org/downloads/sinatra-%{version_rubygem_sinatra}.gem - Source92: https://rubygems.org/downloads/tilt-%{version_rubygem_tilt}.gem -+Source93: https://rubygems.org/downloads/eventmachine-%{version_rubygem_eventmachine}.gem -+Source94: https://rubygems.org/downloads/daemons-%{version_rubygem_daemons}.gem -+Source95: https://rubygems.org/downloads/thin-%{version_rubygem_thin}.gem - - Source100: https://github.com/idevat/pcs-web-ui/archive/%{ui_commit}/%{ui_src_name}.tar.gz - Source101: https://github.com/idevat/pcs-web-ui/releases/download/%{ui_commit}/pcs-web-ui-node-modules-%{ui_commit}.tar.xz -@@ -164,7 +170,9 @@ Recommends: overpass-fonts - - Provides: bundled(tornado) = %{tornado_version} - Provides: bundled(backports) = %{version_rubygem_backports} -+Provides: bundled(daemons) = %{version_rubygem_daemons} - Provides: bundled(ethon) = %{version_rubygem_ethon} -+Provides: bundled(eventmachine) = %{version_rubygem_eventmachine} - Provides: bundled(ffi) = %{version_rubygem_ffi} - Provides: bundled(json) = %{version_rubygem_json} - Provides: bundled(mustermann) = %{version_rubygem_mustermann} -@@ -173,6 +181,7 @@ Provides: bundled(rack) = %{version_rubygem_rack} - Provides: bundled(rack) = %{version_rubygem_rack_protection} - Provides: bundled(rack) = %{version_rubygem_rack_test} - Provides: bundled(sinatra) = %{version_rubygem_sinatra} -+Provides: bundled(thin) = %{version_rubygem_thin} - Provides: bundled(tilt) = %{version_rubygem_tilt} - - %description -@@ -228,6 +237,9 @@ cp -f %SOURCE89 pcsd/vendor/cache - cp -f %SOURCE90 pcsd/vendor/cache - cp -f %SOURCE91 pcsd/vendor/cache - cp -f %SOURCE92 pcsd/vendor/cache -+cp -f %SOURCE93 pcsd/vendor/cache -+cp -f %SOURCE94 pcsd/vendor/cache -+cp -f %SOURCE95 pcsd/vendor/cache - - - # 3) dir for python bundles -@@ -262,15 +274,18 @@ gem install \ - --force --verbose -l --no-user-install %{gem_install_params} \ - -i %{rubygem_bundle_dir} \ - %{rubygem_cache_dir}/backports-%{version_rubygem_backports}.gem \ -+ %{rubygem_cache_dir}/daemons-%{version_rubygem_daemons}.gem \ - %{rubygem_cache_dir}/ethon-%{version_rubygem_ethon}.gem \ -+ %{rubygem_cache_dir}/eventmachine-%{version_rubygem_eventmachine}.gem \ - %{rubygem_cache_dir}/ffi-%{version_rubygem_ffi}.gem \ - %{rubygem_cache_dir}/json-%{version_rubygem_json}.gem \ - %{rubygem_cache_dir}/mustermann-%{version_rubygem_mustermann}.gem \ - %{rubygem_cache_dir}/open4-%{version_rubygem_open4}.gem \ -- %{rubygem_cache_dir}/rack-%{version_rubygem_rack}.gem \ - %{rubygem_cache_dir}/rack-protection-%{version_rubygem_rack_protection}.gem \ - %{rubygem_cache_dir}/rack-test-%{version_rubygem_rack_test}.gem \ -+ %{rubygem_cache_dir}/rack-%{version_rubygem_rack}.gem \ - %{rubygem_cache_dir}/sinatra-%{version_rubygem_sinatra}.gem \ -+ %{rubygem_cache_dir}/thin-%{version_rubygem_thin}.gem \ - %{rubygem_cache_dir}/tilt-%{version_rubygem_tilt}.gem \ - -- '--with-ldflags=-Wl,-z,relro -Wl,-z,ibt -Wl,-z,now -Wl,--gc-sections' \ - '--with-cflags=-O2 -ffunction-sections' -@@ -324,20 +339,31 @@ rm -r -v ${pcsd_dir}/test - # remove javascript testing files - rm -r -v ${pcsd_dir}/public/js/dev - -+%posttrans -+# Make sure the new version of the daemon is runnning. -+# Also, make sure to start pcsd-ruby if it hasn't been started or even -+# installed before. This is done by restarting pcsd.service. -+%{_bindir}/systemctl daemon-reload -+%{_bindir}/systemctl try-restart pcsd.service -+ -+ - %post - %systemd_post pcsd.service -+%systemd_post pcsd-ruby.service - - %post -n %{pcs_snmp_pkg_name} - %systemd_post pcs_snmp_agent.service - - %preun - %systemd_preun pcsd.service -+%systemd_preun pcsd-ruby.service - - %preun -n %{pcs_snmp_pkg_name} - %systemd_preun pcs_snmp_agent.service - - %postun - %systemd_postun_with_restart pcsd.service -+%systemd_postun_with_restart pcsd-ruby.service - - %postun -n %{pcs_snmp_pkg_name} - %systemd_postun_with_restart pcs_snmp_agent.service -@@ -357,6 +383,7 @@ rm -r -v ${pcsd_dir}/public/js/dev - %{pcs_libdir}/pcsd/.bundle/config - %{pcs_libdir}/pcs/bundled/packages/tornado* - %{_unitdir}/pcsd.service -+%{_unitdir}/pcsd-ruby.service - %{_datadir}/bash-completion/completions/pcs - %{_sharedstatedir}/pcsd - %{_sysconfdir}/pam.d/pcsd -@@ -374,6 +401,7 @@ rm -r -v ${pcsd_dir}/public/js/dev - %{_mandir}/man8/pcsd.* - %exclude %{pcs_libdir}/pcsd/*.debian - %exclude %{pcs_libdir}/pcsd/pcsd.service -+%exclude %{pcs_libdir}/pcsd/pcsd-ruby.service - %exclude %{pcs_libdir}/pcsd/pcsd.conf - %exclude %{pcs_libdir}/pcsd/pcsd.8 - %exclude %{pcs_libdir}/pcsd/public/js/dev/* -diff --git a/pcs/daemon/app/sinatra_ui.py b/pcs/daemon/app/sinatra_ui.py -index 1348134d..5315a48f 100644 ---- a/pcs/daemon/app/sinatra_ui.py -+++ b/pcs/daemon/app/sinatra_ui.py -@@ -153,7 +153,7 @@ def get_routes( - # The protection by session was moved from ruby code to python code - # (tornado). - ( -- r"/($|manage$|permissions$|managec/.+/main)", -+ r"/($|manage/?$|permissions/?$|managec/.+/main)", - SinatraGuiProtected, - {**sessions, **ruby_wrapper} - ), -diff --git a/pcs/daemon/env.py b/pcs/daemon/env.py -index 54a9819f..26cdcf9b 100644 ---- a/pcs/daemon/env.py -+++ b/pcs/daemon/env.py -@@ -15,7 +15,6 @@ from pcs.lib.validate import is_port_number - # Relative location instead of system location is used for development purposes. - PCSD_LOCAL_DIR = realpath(dirname(abspath(__file__)) + "/../../pcsd") - --PCSD_CMDLINE_ENTRY_RB_SCRIPT = "sinatra_cmdline_wrapper.rb" - PCSD_STATIC_FILES_DIR_NAME = "public" - - PCSD_PORT = "PCSD_PORT" -@@ -26,12 +25,8 @@ NOTIFY_SOCKET = "NOTIFY_SOCKET" - PCSD_DEBUG = "PCSD_DEBUG" - PCSD_DISABLE_GUI = "PCSD_DISABLE_GUI" - PCSD_SESSION_LIFETIME = "PCSD_SESSION_LIFETIME" --GEM_HOME = "GEM_HOME" - PCSD_DEV = "PCSD_DEV" --PCSD_CMDLINE_ENTRY = "PCSD_CMDLINE_ENTRY" - PCSD_STATIC_FILES_DIR = "PCSD_STATIC_FILES_DIR" --HTTPS_PROXY = "HTTPS_PROXY" --NO_PROXY = "NO_PROXY" - - Env = namedtuple("Env", [ - PCSD_PORT, -@@ -42,11 +37,7 @@ Env = namedtuple("Env", [ - PCSD_DEBUG, - PCSD_DISABLE_GUI, - PCSD_SESSION_LIFETIME, -- GEM_HOME, -- PCSD_CMDLINE_ENTRY, - PCSD_STATIC_FILES_DIR, -- HTTPS_PROXY, -- NO_PROXY, - PCSD_DEV, - "has_errors", - ]) -@@ -62,11 +53,7 @@ def prepare_env(environ, logger=None): - loader.pcsd_debug(), - loader.pcsd_disable_gui(), - loader.session_lifetime(), -- loader.gem_home(), -- loader.pcsd_cmdline_entry(), - loader.pcsd_static_files_dir(), -- loader.https_proxy(), -- loader.no_proxy(), - loader.pcsd_dev(), - loader.has_errors(), - ) -@@ -173,20 +160,6 @@ class EnvLoader: - def pcsd_debug(self): - return self.__has_true_in_environ(PCSD_DEBUG) - -- def gem_home(self): -- if settings.pcsd_gem_path is None: -- return None -- return self.__in_pcsd_path( -- settings.pcsd_gem_path, -- "Ruby gem location" -- ) -- -- def pcsd_cmdline_entry(self): -- return self.__in_pcsd_path( -- PCSD_CMDLINE_ENTRY_RB_SCRIPT, -- "Ruby handlers entrypoint" -- ) -- - def pcsd_static_files_dir(self): - return self.__in_pcsd_path( - PCSD_STATIC_FILES_DIR_NAME, -@@ -194,15 +167,6 @@ class EnvLoader: - existence_required=not self.pcsd_disable_gui() - ) - -- def https_proxy(self): -- for key in ["https_proxy", HTTPS_PROXY, "all_proxy", "ALL_PROXY"]: -- if key in self.environ: -- return self.environ[key] -- return None -- -- def no_proxy(self): -- return self.environ.get("no_proxy", self.environ.get(NO_PROXY, None)) -- - @lru_cache() - def pcsd_dev(self): - return self.__has_true_in_environ(PCSD_DEV) -diff --git a/pcs/daemon/ruby_pcsd.py b/pcs/daemon/ruby_pcsd.py -index 5bdaffeb..e612f8da 100644 ---- a/pcs/daemon/ruby_pcsd.py -+++ b/pcs/daemon/ruby_pcsd.py -@@ -1,14 +1,16 @@ - import json - import logging --import os.path --from base64 import b64decode -+from base64 import b64decode, b64encode, binascii - from collections import namedtuple - from time import time as now - --from tornado.gen import multi, convert_yielded -+import pycurl -+from tornado.gen import convert_yielded - from tornado.web import HTTPError - from tornado.httputil import split_host_and_port, HTTPServerRequest --from tornado.process import Subprocess -+from tornado.httpclient import AsyncHTTPClient -+from tornado.curl_httpclient import CurlError -+ - - from pcs.daemon import log - -@@ -33,7 +35,7 @@ class SinatraResult(namedtuple("SinatraResult", "headers, status, body")): - return cls( - response["headers"], - response["status"], -- b64decode(response["body"]) -+ response["body"] - ) - - def log_group_id_generator(): -@@ -58,24 +60,12 @@ def process_response_logs(rb_log_list): - group_id=group_id - ) - --def log_communication(request_json, stdout, stderr): -- log.pcsd.debug("Request for ruby pcsd wrapper: '%s'", request_json) -- log.pcsd.debug("Response stdout from ruby pcsd wrapper: '%s'", stdout) -- log.pcsd.debug("Response stderr from ruby pcsd wrapper: '%s'", stderr) -- - class Wrapper: -- # pylint: disable=too-many-instance-attributes -- def __init__( -- self, pcsd_cmdline_entry, gem_home=None, debug=False, -- ruby_executable="ruby", https_proxy=None, no_proxy=None -- ): -- self.__gem_home = gem_home -- self.__pcsd_cmdline_entry = pcsd_cmdline_entry -- self.__pcsd_dir = os.path.dirname(pcsd_cmdline_entry) -- self.__ruby_executable = ruby_executable -+ def __init__(self, pcsd_ruby_socket, debug=False): - self.__debug = debug -- self.__https_proxy = https_proxy -- self.__no_proxy = no_proxy -+ AsyncHTTPClient.configure('tornado.curl_httpclient.CurlAsyncHTTPClient') -+ self.__client = AsyncHTTPClient() -+ self.__pcsd_ruby_socket = pcsd_ruby_socket - - @staticmethod - def get_sinatra_request(request: HTTPServerRequest): -@@ -102,55 +92,76 @@ class Wrapper: - "rack.input": request.body.decode("utf8"), - }} - -+ def prepare_curl_callback(self, curl): -+ curl.setopt(pycurl.UNIX_SOCKET_PATH, self.__pcsd_ruby_socket) -+ curl.setopt(pycurl.TIMEOUT, 70) -+ - async def send_to_ruby(self, request_json): -- env = { -- "PCSD_DEBUG": "true" if self.__debug else "false" -- } -- if self.__gem_home is not None: -- env["GEM_HOME"] = self.__gem_home -- -- if self.__no_proxy is not None: -- env["NO_PROXY"] = self.__no_proxy -- if self.__https_proxy is not None: -- env["HTTPS_PROXY"] = self.__https_proxy -- -- pcsd_ruby = Subprocess( -- [ -- self.__ruby_executable, "-I", -- self.__pcsd_dir, -- self.__pcsd_cmdline_entry -- ], -- stdin=Subprocess.STREAM, -- stdout=Subprocess.STREAM, -- stderr=Subprocess.STREAM, -- env=env -- ) -- await pcsd_ruby.stdin.write(str.encode(request_json)) -- pcsd_ruby.stdin.close() -- return await multi([ -- pcsd_ruby.stdout.read_until_close(), -- pcsd_ruby.stderr.read_until_close(), -- pcsd_ruby.wait_for_exit(raise_error=False), -- ]) -+ # We do not need location for communication with ruby itself since we -+ # communicate via unix socket. But it is required by AsyncHTTPClient so -+ # "localhost" is used. -+ tornado_request = b64encode(request_json.encode()).decode() -+ return (await self.__client.fetch( -+ "localhost", -+ method="POST", -+ body=f"TORNADO_REQUEST={tornado_request}", -+ prepare_curl_callback=self.prepare_curl_callback, -+ )).body - - async def run_ruby(self, request_type, request=None): -+ """ -+ request_type: SINATRA_GUI|SINATRA_REMOTE|SYNC_CONFIGS -+ request: result of get_sinatra_request|None -+ i.e. it has structure returned by get_sinatra_request if the request -+ is not None - so we can get SERVER_NAME and SERVER_PORT -+ """ - request = request or {} - request.update({"type": request_type}) - request_json = json.dumps(request) -- stdout, stderr, dummy_status = await self.send_to_ruby(request_json) -+ -+ if self.__debug: -+ log.pcsd.debug("Ruby daemon request: '%s'", request_json) - try: -- response = json.loads(stdout) -- except json.JSONDecodeError as e: -- self.__log_bad_response( -- f"Cannot decode json from ruby pcsd wrapper: '{e}'", -- request_json, stdout, stderr -+ ruby_response = await self.send_to_ruby(request_json) -+ except CurlError as e: -+ log.pcsd.error( -+ "Cannot connect to ruby daemon (message: '%s'). Is it running?", -+ e - ) - raise HTTPError(500) -- else: -- if self.__debug: -- log_communication(request_json, stdout, stderr) -- process_response_logs(response["logs"]) -+ -+ try: -+ response = json.loads(ruby_response) -+ if "error" in response: -+ log.pcsd.error( -+ "Ruby daemon response contains an error: '%s'", -+ json.dumps(response) -+ ) -+ raise HTTPError(500) -+ -+ logs = response.pop("logs", []) -+ if "body" in response: -+ body = b64decode(response.pop("body")) -+ if self.__debug: -+ log.pcsd.debug( -+ "Ruby daemon response (without logs and body): '%s'", -+ json.dumps(response) -+ ) -+ log.pcsd.debug("Ruby daemon response body: '%s'", body) -+ response["body"] = body -+ -+ elif self.__debug: -+ log.pcsd.debug( -+ "Ruby daemon response (without logs): '%s'", -+ json.dumps(response) -+ ) -+ process_response_logs(logs) - return response -+ except (json.JSONDecodeError, binascii.Error) as e: -+ if self.__debug: -+ log.pcsd.debug("Ruby daemon response: '%s'", ruby_response) -+ log.pcsd.error("Cannot decode json from ruby pcsd wrapper: '%s'", e) -+ raise HTTPError(500) - - async def request_gui( - self, request: HTTPServerRequest, user, groups, is_authenticated -@@ -186,8 +197,3 @@ class Wrapper: - except HTTPError: - log.pcsd.error("Config synchronization failed") - return int(now()) + DEFAULT_SYNC_CONFIG_DELAY -- -- def __log_bad_response(self, error_message, request_json, stdout, stderr): -- log.pcsd.error(error_message) -- if self.__debug: -- log_communication(request_json, stdout, stderr) -diff --git a/pcs/daemon/run.py b/pcs/daemon/run.py -index bafd9f3c..874ee2f1 100644 ---- a/pcs/daemon/run.py -+++ b/pcs/daemon/run.py -@@ -65,6 +65,8 @@ def configure_app( - # old web ui by default - [(r"/", RedirectHandler, dict(url="/manage"))] - + -+ [(r"/ui", RedirectHandler, dict(url="/ui/"))] -+ + - ui.get_routes( - url_prefix="/ui/", - app_dir=os.path.join(public_dir, "ui"), -@@ -101,12 +103,8 @@ def main(): - - sync_config_lock = Lock() - ruby_pcsd_wrapper = ruby_pcsd.Wrapper( -- pcsd_cmdline_entry=env.PCSD_CMDLINE_ENTRY, -- gem_home=env.GEM_HOME, -+ settings.pcsd_ruby_socket, - debug=env.PCSD_DEBUG, -- ruby_executable=settings.ruby_executable, -- https_proxy=env.HTTPS_PROXY, -- no_proxy=env.NO_PROXY, - ) - make_app = configure_app( - session.Storage(env.PCSD_SESSION_LIFETIME), -diff --git a/pcs/settings_default.py b/pcs/settings_default.py -index 6d8f33ac..f761ce43 100644 ---- a/pcs/settings_default.py -+++ b/pcs/settings_default.py -@@ -43,6 +43,7 @@ cibadmin = os.path.join(pacemaker_binaries, "cibadmin") - crm_mon_schema = '/usr/share/pacemaker/crm_mon.rng' - agent_metadata_schema = "/usr/share/resource-agents/ra-api-1.dtd" - pcsd_var_location = "/var/lib/pcsd/" -+pcsd_ruby_socket = "/run/pcsd-ruby.socket" - pcsd_cert_location = os.path.join(pcsd_var_location, "pcsd.crt") - pcsd_key_location = os.path.join(pcsd_var_location, "pcsd.key") - pcsd_known_hosts_location = os.path.join(pcsd_var_location, "known-hosts") -diff --git a/pcs_test/tier0/daemon/app/fixtures_app.py b/pcs_test/tier0/daemon/app/fixtures_app.py -index 2e4feba4..8d5b8f4c 100644 ---- a/pcs_test/tier0/daemon/app/fixtures_app.py -+++ b/pcs_test/tier0/daemon/app/fixtures_app.py -@@ -1,4 +1,3 @@ --from base64 import b64encode - from pprint import pformat - from urllib.parse import urlencode - -@@ -30,7 +29,7 @@ class RubyPcsdWrapper(ruby_pcsd.Wrapper): - return { - "headers": self.headers, - "status": self.status_code, -- "body": b64encode(self.body), -+ "body": self.body, - } - - class AppTest(AsyncHTTPTestCase): -diff --git a/pcs_test/tier0/daemon/test_env.py b/pcs_test/tier0/daemon/test_env.py -index 9e78eafd..e2f7f5b1 100644 ---- a/pcs_test/tier0/daemon/test_env.py -+++ b/pcs_test/tier0/daemon/test_env.py -@@ -41,11 +41,7 @@ class Prepare(TestCase, create_setup_patch_mixin(env)): - env.PCSD_DEBUG: False, - env.PCSD_DISABLE_GUI: False, - env.PCSD_SESSION_LIFETIME: settings.gui_session_lifetime_seconds, -- env.GEM_HOME: pcsd_dir(settings.pcsd_gem_path), -- env.PCSD_CMDLINE_ENTRY: pcsd_dir(env.PCSD_CMDLINE_ENTRY_RB_SCRIPT), - env.PCSD_STATIC_FILES_DIR: pcsd_dir(env.PCSD_STATIC_FILES_DIR_NAME), -- env.HTTPS_PROXY: None, -- env.NO_PROXY: None, - env.PCSD_DEV: False, - "has_errors": False, - } -@@ -77,8 +73,6 @@ class Prepare(TestCase, create_setup_patch_mixin(env)): - env.PCSD_DISABLE_GUI: "true", - env.PCSD_SESSION_LIFETIME: str(session_lifetime), - env.PCSD_DEV: "true", -- env.HTTPS_PROXY: "proxy1", -- env.NO_PROXY: "host", - env.PCSD_DEV: "true", - } - self.assert_environ_produces_modified_pcsd_env( -@@ -92,15 +86,9 @@ class Prepare(TestCase, create_setup_patch_mixin(env)): - env.PCSD_DEBUG: True, - env.PCSD_DISABLE_GUI: True, - env.PCSD_SESSION_LIFETIME: session_lifetime, -- env.GEM_HOME: pcsd_dir(settings.pcsd_gem_path), -- env.PCSD_CMDLINE_ENTRY: pcsd_dir( -- env.PCSD_CMDLINE_ENTRY_RB_SCRIPT -- ), - env.PCSD_STATIC_FILES_DIR: pcsd_dir( - env.PCSD_STATIC_FILES_DIR_NAME - ), -- env.HTTPS_PROXY: environ[env.HTTPS_PROXY], -- env.NO_PROXY: environ[env.NO_PROXY], - env.PCSD_DEV: True, - }, - ) -@@ -167,13 +155,6 @@ class Prepare(TestCase, create_setup_patch_mixin(env)): - self.assert_environ_produces_modified_pcsd_env( - specific_env_values={"has_errors": True}, - errors=[ -- f"Ruby gem location '{pcsd_dir(settings.pcsd_gem_path)}'" -- " does not exist" -- , -- "Ruby handlers entrypoint" -- f" '{pcsd_dir(env.PCSD_CMDLINE_ENTRY_RB_SCRIPT)}'" -- " does not exist" -- , - "Directory with web UI assets" - f" '{pcsd_dir(env.PCSD_STATIC_FILES_DIR_NAME)}'" - " does not exist" -@@ -181,54 +162,13 @@ class Prepare(TestCase, create_setup_patch_mixin(env)): - ] - ) - -- def test_errors_on_missing_paths_disabled_gui(self): -+ def test_no_errors_on_missing_paths_disabled_gui(self): - self.path_exists.return_value = False -- pcsd_dir = partial(join_path, settings.pcsd_exec_location) - self.assert_environ_produces_modified_pcsd_env( - environ={env.PCSD_DISABLE_GUI: "true"}, - specific_env_values={ - env.PCSD_DISABLE_GUI: True, -- "has_errors": True, -+ "has_errors": False, - }, -- errors=[ -- f"Ruby gem location '{pcsd_dir(settings.pcsd_gem_path)}'" -- " does not exist" -- , -- "Ruby handlers entrypoint" -- f" '{pcsd_dir(env.PCSD_CMDLINE_ENTRY_RB_SCRIPT)}'" -- " does not exist" -- , -- ] -+ errors=[] - ) -- -- def test_lower_case_no_proxy_has_precedence(self): -- def it_selects(proxy_value): -- self.assert_environ_produces_modified_pcsd_env( -- environ=environ, -- specific_env_values={env.NO_PROXY: proxy_value} -- ) -- -- environ = {"NO_PROXY": "no_proxy_1"} -- it_selects("no_proxy_1") -- -- environ["no_proxy"] = "no_proxy_2" -- it_selects("no_proxy_2") -- -- def test_http_proxy_is_setup_by_precedence(self): -- def it_selects(proxy_value): -- self.assert_environ_produces_modified_pcsd_env( -- environ=environ, -- specific_env_values={env.HTTPS_PROXY: proxy_value} -- ) -- -- environ = {"ALL_PROXY": "all_proxy_1"} -- it_selects("all_proxy_1") -- -- environ["all_proxy"] = "all_proxy_2" -- it_selects("all_proxy_2") -- -- environ["HTTPS_PROXY"] = "https_proxy_1" -- it_selects("https_proxy_1") -- -- environ["https_proxy"] = "https_proxy_2" -- it_selects("https_proxy_2") -diff --git a/pcs_test/tier0/daemon/test_ruby_pcsd.py b/pcs_test/tier0/daemon/test_ruby_pcsd.py -index d7fd71a0..28f14c87 100644 ---- a/pcs_test/tier0/daemon/test_ruby_pcsd.py -+++ b/pcs_test/tier0/daemon/test_ruby_pcsd.py -@@ -16,10 +16,7 @@ from pcs.daemon import ruby_pcsd - logging.getLogger("pcs.daemon").setLevel(logging.CRITICAL) - - def create_wrapper(): -- return ruby_pcsd.Wrapper( -- rc("/path/to/gem_home"), -- rc("/path/to/pcsd/cmdline/entry"), -- ) -+ return ruby_pcsd.Wrapper(rc("/path/to/ruby_socket")) - - def create_http_request(): - return HTTPServerRequest( -@@ -63,9 +60,7 @@ patch_ruby_pcsd = create_patcher(ruby_pcsd) - - class RunRuby(AsyncTestCase): - def setUp(self): -- self.stdout = "" -- self.stderr = "" -- self.exit_status = 0 -+ self.ruby_response = "" - self.request = self.create_request() - self.wrapper = create_wrapper() - patcher = mock.patch.object( -@@ -79,14 +74,14 @@ class RunRuby(AsyncTestCase): - - async def send_to_ruby(self, request_json): - self.assertEqual(json.loads(request_json), self.request) -- return self.stdout, self.stderr, self.exit_status -+ return self.ruby_response - - @staticmethod - def create_request(_type=ruby_pcsd.SYNC_CONFIGS): - return {"type": _type} - - def set_run_result(self, run_result): -- self.stdout = json.dumps({**run_result, "logs": []}) -+ self.ruby_response = json.dumps({**run_result, "logs": []}) - - def assert_sinatra_result(self, result, headers, status, body): - self.assertEqual(result.headers, headers) -diff --git a/pcsd/Gemfile b/pcsd/Gemfile -index 27898f71..716991a6 100644 ---- a/pcsd/Gemfile -+++ b/pcsd/Gemfile -@@ -10,3 +10,4 @@ gem 'json' - gem 'open4' - gem 'ffi' - gem 'ethon' -+gem 'thin' -diff --git a/pcsd/Gemfile.lock b/pcsd/Gemfile.lock -index 6f833888..c8b02a94 100644 ---- a/pcsd/Gemfile.lock -+++ b/pcsd/Gemfile.lock -@@ -2,8 +2,10 @@ GEM - remote: https://rubygems.org/ - specs: - backports (3.11.4) -+ daemons (1.3.1) - ethon (0.11.0) - ffi (>= 1.3.0) -+ eventmachine (1.2.7) - ffi (1.9.25) - json (2.1.0) - mustermann (1.0.3) -@@ -18,6 +20,10 @@ GEM - rack (~> 2.0) - rack-protection (= 2.0.4) - tilt (~> 2.0) -+ thin (1.7.2) -+ daemons (~> 1.0, >= 1.0.9) -+ eventmachine (~> 1.0, >= 1.0.4) -+ rack (>= 1, < 3) - tilt (2.0.9) - - PLATFORMS -@@ -33,6 +39,7 @@ DEPENDENCIES - rack-protection - rack-test - sinatra -+ thin - tilt - - BUNDLED WITH -diff --git a/pcsd/Makefile b/pcsd/Makefile -index 5fe3f3f3..5dde50e3 100644 ---- a/pcsd/Makefile -+++ b/pcsd/Makefile -@@ -26,6 +26,9 @@ build_gems_without_bundler: - vendor/cache/rack-test-1.1.0.gem \ - vendor/cache/sinatra-2.0.4.gem \ - vendor/cache/tilt-2.0.9.gem \ -+ vendor/cache/eventmachine-1.2.7.gem \ -+ vendor/cache/daemons-1.3.1.gem \ -+ vendor/cache/thin-1.7.2.gem \ - -- '--with-ldflags="-Wl,-z,now -Wl,-z,relro"' - - get_gems: -diff --git a/pcsd/bootstrap.rb b/pcsd/bootstrap.rb -index ec6b535c..fc9d9b8c 100644 ---- a/pcsd/bootstrap.rb -+++ b/pcsd/bootstrap.rb -@@ -51,8 +51,23 @@ if not defined? $cur_node_name - $cur_node_name = `/bin/hostname`.chomp - end - --def configure_logger(log_device) -- logger = Logger.new(log_device) -+def configure_logger() -+ logger = Logger.new(StringIO.new()) -+ logger.formatter = proc {|severity, datetime, progname, msg| -+ if Thread.current.key?(:pcsd_logger_container) -+ Thread.current[:pcsd_logger_container] << { -+ :level => severity, -+ :timestamp_usec => (datetime.to_f * 1000000).to_i, -+ :message => msg, -+ } -+ else -+ STDERR.puts("#{datetime} #{progname} #{severity} #{msg}") -+ end -+ } -+ return logger -+end -+ -+def early_log(logger) - if ENV['PCSD_DEBUG'] and ENV['PCSD_DEBUG'].downcase == "true" then - logger.level = Logger::DEBUG - logger.info "PCSD Debugging enabled" -@@ -65,7 +80,6 @@ def configure_logger(log_device) - else - logger.debug "Detected systemd is not in use" - end -- return logger - end - - def get_capabilities(logger) -diff --git a/pcsd/cfgsync.rb b/pcsd/cfgsync.rb -index 16bcfbdc..1cab512e 100644 ---- a/pcsd/cfgsync.rb -+++ b/pcsd/cfgsync.rb -@@ -468,7 +468,8 @@ module Cfgsync - node_response = {} - threads = [] - @nodes.each { |node| -- threads << Thread.new { -+ threads << Thread.new(Thread.current[:pcsd_logger_container]) { |logger| -+ Thread.current[:pcsd_logger_container] = logger - code, out = send_request_with_token( - @auth_user, node, 'set_configs', true, data, true, nil, 30, - @additional_known_hosts -@@ -616,7 +617,8 @@ module Cfgsync - node_configs = {} - connected_to = {} - nodes.each { |node| -- threads << Thread.new { -+ threads << Thread.new(Thread.current[:pcsd_logger_container]) { |logger| -+ Thread.current[:pcsd_logger_container] = logger - code, out = send_request_with_token( - @auth_user, node, 'get_configs', false, data, true, nil, nil, - @additional_known_hosts -diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb -index 7b991ac0..9a0efb46 100644 ---- a/pcsd/pcs.rb -+++ b/pcsd/pcs.rb -@@ -923,7 +923,8 @@ def is_auth_against_nodes(auth_user, node_names, timeout=10) - offline_nodes = [] - - node_names.uniq.each { |node_name| -- threads << Thread.new { -+ threads << Thread.new(Thread.current[:pcsd_logger_container]) { |logger| -+ Thread.current[:pcsd_logger_container] = logger - code, response = send_request_with_token( - auth_user, node_name, 'check_auth', false, {}, true, nil, timeout - ) -@@ -963,7 +964,8 @@ def pcs_auth(auth_user, nodes) - auth_responses = {} - threads = [] - nodes.each { |node_name, node_data| -- threads << Thread.new { -+ threads << Thread.new(Thread.current[:pcsd_logger_container]) { |logger| -+ Thread.current[:pcsd_logger_container] = logger - begin - addr = node_data.fetch('dest_list').fetch(0).fetch('addr') - port = node_data.fetch('dest_list').fetch(0).fetch('port') -@@ -1199,7 +1201,8 @@ def cluster_status_from_nodes(auth_user, cluster_nodes, cluster_name) - - threads = [] - cluster_nodes.uniq.each { |node| -- threads << Thread.new { -+ threads << Thread.new(Thread.current[:pcsd_logger_container]) { |logger| -+ Thread.current[:pcsd_logger_container] = logger - code, response = send_request_with_token( - auth_user, - node, -diff --git a/pcsd/pcsd-cli.rb b/pcsd/pcsd-cli.rb -index 942bae84..4daa93ba 100755 ---- a/pcsd/pcsd-cli.rb -+++ b/pcsd/pcsd-cli.rb -@@ -29,7 +29,8 @@ end - auth_user = {} - PCS = get_pcs_path() - $logger_device = StringIO.new --$logger = configure_logger($logger_device) -+$logger = Logger.new($logger_device) -+early_log($logger) - - capabilities, capabilities_pcsd = get_capabilities($logger) - CAPABILITIES = capabilities.freeze -diff --git a/pcsd/pcsd-ruby.service b/pcsd/pcsd-ruby.service -new file mode 100644 -index 00000000..deefdf4f ---- /dev/null -+++ b/pcsd/pcsd-ruby.service -@@ -0,0 +1,20 @@ -+[Unit] -+Description=PCS GUI and remote configuration interface (Ruby) -+Documentation=man:pcsd(8) -+Documentation=man:pcs(8) -+Requires=network-online.target -+After=network-online.target -+# Stop the service automatically if nothing that depends on it is running -+StopWhenUnneeded=true -+# When stopping or restarting pcsd, stop or restart pcsd-ruby as well -+PartOf=pcsd.service -+ -+[Service] -+EnvironmentFile=/etc/sysconfig/pcsd -+Environment=GEM_HOME=/usr/lib/pcsd/vendor/bundle/ruby -+# This file holds the selinux context -+ExecStart=/usr/lib/pcsd/pcsd -+Type=notify -+ -+[Install] -+WantedBy=multi-user.target -diff --git a/pcsd/pcsd.conf b/pcsd/pcsd.conf -index 4761c73f..a968f459 100644 ---- a/pcsd/pcsd.conf -+++ b/pcsd/pcsd.conf -@@ -38,3 +38,7 @@ PCSD_SESSION_LIFETIME=3600 - #HTTPS_PROXY= - # Do not use proxy for specified hostnames - #NO_PROXY= -+ -+ -+# Do not change -+RACK_ENV=production -diff --git a/pcsd/pcsd.rb b/pcsd/pcsd.rb -index eff5c9a8..4cb98799 100644 ---- a/pcsd/pcsd.rb -+++ b/pcsd/pcsd.rb -@@ -22,6 +22,7 @@ require 'permissions.rb' - use Rack::CommonLogger - - set :app_file, __FILE__ -+set :logging, false - - def __msg_cluster_name_already_used(cluster_name) - return "The cluster name '#{cluster_name}' has already been added. You may not add two clusters with the same name." -@@ -44,17 +45,17 @@ end - - def getAuthUser() - return { -- :username => $tornado_username, -- :usergroups => $tornado_groups, -+ :username => Thread.current[:tornado_username], -+ :usergroups => Thread.current[:tornado_groups], - } - end - - before do - # nobody is logged in yet - @auth_user = nil -- @tornado_session_username = $tornado_username -- @tornado_session_groups = $tornado_groups -- @tornado_is_authenticated = $tornado_is_authenticated -+ @tornado_session_username = Thread.current[:tornado_username] -+ @tornado_session_groups = Thread.current[:tornado_groups] -+ @tornado_is_authenticated = Thread.current[:tornado_is_authenticated] - - if(request.path.start_with?('/remote/') and request.path != "/remote/auth") or request.path == '/run_pcs' - # Sets @auth_user to a hash containing info about logged in user or halts -@@ -71,18 +72,8 @@ end - configure do - PCS = get_pcs_path() - PCS_INTERNAL = get_pcs_internal_path() -- $logger = configure_logger(StringIO.new()) -- $logger.formatter = proc {|severity, datetime, progname, msg| -- # rushing a raw logging info into the global -- $tornado_logs << { -- :level => severity, -- :timestamp_usec => (datetime.to_f * 1000000).to_i, -- :message => msg, -- } -- # don't need any log to the stream -- "" -- } -- -+ $logger = configure_logger() -+ early_log($logger) - capabilities, capabilities_pcsd = get_capabilities($logger) - CAPABILITIES = capabilities.freeze - CAPABILITIES_PCSD = capabilities_pcsd.freeze -@@ -599,7 +590,8 @@ get '/manage/get_nodes_sw_versions' do - nodes = params[:node_list] - end - nodes.each {|node| -- threads << Thread.new { -+ threads << Thread.new(Thread.current[:pcsd_logger_container]) { |logger| -+ Thread.current[:pcsd_logger_container] = logger - code, response = send_request_with_token( - auth_user, node, 'get_sw_versions' - ) -@@ -625,7 +617,8 @@ post '/manage/auth_gui_against_nodes' do - - data = JSON.parse(params.fetch('data_json')) - data.fetch('nodes').each { |node_name, node_data| -- threads << Thread.new { -+ threads << Thread.new(Thread.current[:pcsd_logger_container]) { |logger| -+ Thread.current[:pcsd_logger_container] = logger - dest_list = node_data.fetch('dest_list') - addr = dest_list.fetch(0).fetch('addr') - port = dest_list.fetch(0).fetch('port') -diff --git a/pcsd/pcsd.service b/pcsd/pcsd.service -index 88d237af..0cab20ef 100644 ---- a/pcsd/pcsd.service -+++ b/pcsd/pcsd.service -@@ -4,6 +4,8 @@ Documentation=man:pcsd(8) - Documentation=man:pcs(8) - Requires=network-online.target - After=network-online.target -+Requires=pcsd-ruby.service -+After=pcsd-ruby.service - - [Service] - EnvironmentFile=/etc/sysconfig/pcsd -diff --git a/pcsd/pcsd.service-runner b/pcsd/pcsd.service-runner -new file mode 100644 -index 00000000..40c401fa ---- /dev/null -+++ b/pcsd/pcsd.service-runner -@@ -0,0 +1,24 @@ -+#!/usr/bin/ruby -+# This file is a runner for ruby part of pcsd callable from a systemd unit. -+# It also serves as a holder of a selinux context. -+ -+begin -+ # add pcsd to the load path (ruby -I) -+ libdir = File.dirname(__FILE__) -+ $LOAD_PATH.unshift(libdir) unless $LOAD_PATH.include?(libdir) -+ -+ # change current directory (ruby -C) -+ Dir.chdir('/var/lib/pcsd') -+ -+ # import and run ruby daemon -+ require 'rserver.rb' -+rescue SignalException => e -+ if [Signal.list['INT'], Signal.list['TERM']].include?(e.signo) -+ # gracefully exit on SIGINT and SIGTERM -+ # pcsd sets up signal handlers later, this catches exceptions which occur -+ # by recieving signals before the handlers have been set up. -+ exit -+ else -+ raise -+ end -+end -diff --git a/pcsd/remote.rb b/pcsd/remote.rb -index 28b91382..760d3374 100644 ---- a/pcsd/remote.rb -+++ b/pcsd/remote.rb -@@ -938,7 +938,8 @@ def status_all(params, request, auth_user, nodes=[], dont_update_config=false) - threads = [] - forbidden_nodes = {} - nodes.each {|node| -- threads << Thread.new { -+ threads << Thread.new(Thread.current[:pcsd_logger_container]) { |logger| -+ Thread.current[:pcsd_logger_container] = logger - code, response = send_request_with_token(auth_user, node, 'status') - if 403 == code - forbidden_nodes[node] = true -@@ -994,7 +995,8 @@ def clusters_overview(params, request, auth_user) - threads = [] - config = PCSConfig.new(Cfgsync::PcsdSettings.from_file().text()) - config.clusters.each { |cluster| -- threads << Thread.new { -+ threads << Thread.new(Thread.current[:pcsd_logger_container]) { |logger| -+ Thread.current[:pcsd_logger_container] = logger - cluster_map[cluster.name] = { - 'cluster_name' => cluster.name, - 'error_list' => [ -diff --git a/pcsd/rserver.rb b/pcsd/rserver.rb -new file mode 100644 -index 00000000..6002a73c ---- /dev/null -+++ b/pcsd/rserver.rb -@@ -0,0 +1,98 @@ -+require "base64" -+require "date" -+require "json" -+require 'rack' -+require 'sinatra' -+require 'thin' -+ -+require 'settings.rb' -+ -+def pack_response(response) -+ return [200, {}, [response.to_json.to_str]] -+end -+ -+def unpack_request(transport_env) -+ return JSON.parse(Base64.strict_decode64( -+ transport_env["rack.request.form_hash"]["TORNADO_REQUEST"] -+ )) -+end -+ -+class TornadoCommunicationMiddleware -+ def initialize(app) -+ @app = app -+ end -+ -+ def call(transport_env) -+ Thread.current[:pcsd_logger_container] = [] -+ begin -+ request = unpack_request(transport_env) -+ -+ if ["sinatra_gui", "sinatra_remote"].include?(request["type"]) -+ if request["type"] == "sinatra_gui" -+ session = request["session"] -+ Thread.current[:tornado_username] = session["username"] -+ Thread.current[:tornado_groups] = session["groups"] -+ Thread.current[:tornado_is_authenticated] = session["is_authenticated"] -+ end -+ -+ # Keys rack.input and rack.errors are required. We make sure they are -+ # there. -+ request_env = request["env"] -+ request_env["rack.input"] = StringIO.new(request_env["rack.input"]) -+ request_env["rack.errors"] = StringIO.new() -+ -+ status, headers, body = @app.call(request_env) -+ -+ rack_errors = request_env['rack.errors'].string() -+ if not rack_errors.empty?() -+ $logger.error(rack_errors) -+ end -+ -+ return pack_response({ -+ :status => status, -+ :headers => headers, -+ :body => Base64.encode64(body.join("")), -+ :logs => Thread.current[:pcsd_logger_container], -+ }) -+ end -+ -+ if request["type"] == "sync_configs" -+ return pack_response({ -+ :next => Time.now.to_i + run_cfgsync(), -+ :logs => Thread.current[:pcsd_logger_container], -+ }) -+ end -+ -+ raise "Unexpected value for key 'type': '#{request['type']}'" -+ rescue => e -+ return pack_response({:error => "Processing request error: '#{e}'"}) -+ end -+ end -+end -+ -+ -+use TornadoCommunicationMiddleware -+ -+require 'pcsd' -+ -+::Rack::Handler.get('thin').run(Sinatra::Application, { -+ :Host => PCSD_RUBY_SOCKET, -+}) do |server| -+ puts server.class -+ server.threaded = true -+ # notify systemd we are running -+ if ISSYSTEMCTL -+ if ENV['NOTIFY_SOCKET'] -+ socket_name = ENV['NOTIFY_SOCKET'].dup -+ if socket_name.start_with?('@') -+ # abstract namespace socket -+ socket_name[0] = "\0" -+ end -+ $logger.info("Notifying systemd we are running (socket #{socket_name})") -+ sd_socket = Socket.new(Socket::AF_UNIX, Socket::SOCK_DGRAM) -+ sd_socket.connect(Socket.pack_sockaddr_un(socket_name)) -+ sd_socket.send('READY=1', 0) -+ sd_socket.close() -+ end -+ end -+end -diff --git a/pcsd/settings.rb b/pcsd/settings.rb -index e8dc0c96..4caa5b4c 100644 ---- a/pcsd/settings.rb -+++ b/pcsd/settings.rb -@@ -3,6 +3,7 @@ PCS_INTERNAL_EXEC = '/usr/lib/pcs/pcs_internal' - PCSD_EXEC_LOCATION = '/usr/lib/pcsd/' - PCSD_VAR_LOCATION = '/var/lib/pcsd/' - PCSD_DEFAULT_PORT = 2224 -+PCSD_RUBY_SOCKET = '/run/pcsd-ruby.socket' - - CRT_FILE = PCSD_VAR_LOCATION + 'pcsd.crt' - KEY_FILE = PCSD_VAR_LOCATION + 'pcsd.key' -diff --git a/pcsd/settings.rb.debian b/pcsd/settings.rb.debian -index daaae37b..c547bc51 100644 ---- a/pcsd/settings.rb.debian -+++ b/pcsd/settings.rb.debian -@@ -3,6 +3,7 @@ PCS_INTERNAL_EXEC = '/usr/lib/pcs/pcs_internal' - PCSD_EXEC_LOCATION = '/usr/share/pcsd/' - PCSD_VAR_LOCATION = '/var/lib/pcsd/' - PCSD_DEFAULT_PORT = 2224 -+PCSD_RUBY_SOCKET = '/run/pcsd-ruby.socket' - - CRT_FILE = PCSD_VAR_LOCATION + 'pcsd.crt' - KEY_FILE = PCSD_VAR_LOCATION + 'pcsd.key' -diff --git a/pcsd/sinatra_cmdline_wrapper.rb b/pcsd/sinatra_cmdline_wrapper.rb -deleted file mode 100644 -index f7b22008..00000000 ---- a/pcsd/sinatra_cmdline_wrapper.rb -+++ /dev/null -@@ -1,63 +0,0 @@ --require "base64" --require "date" --require "json" -- --request_json = ARGF.read() -- --begin -- request = JSON.parse(request_json) --rescue => e -- puts e -- exit --end -- --if !request.include?("type") -- result = {:error => "Type not specified"} -- print result.to_json -- exit --end -- --$tornado_logs = [] -- --require 'pcsd' -- --if ["sinatra_gui", "sinatra_remote"].include?(request["type"]) -- if request["type"] == "sinatra_gui" -- $tornado_username = request["session"]["username"] -- $tornado_groups = request["session"]["groups"] -- $tornado_is_authenticated = request["session"]["is_authenticated"] -- end -- -- set :logging, true -- set :run, false -- # Do not turn exceptions into fancy 100kB HTML pages and print them on stdout. -- # Instead, rack.errors is logged and therefore returned in result[:log]. -- set :show_exceptions, false -- app = [Sinatra::Application][0] -- -- env = request["env"] -- env["rack.input"] = StringIO.new(env["rack.input"]) -- env["rack.errors"] = StringIO.new() -- -- status, headers, body = app.call(env) -- rack_errors = env['rack.errors'].string() -- if not rack_errors.empty?() -- $logger.error(rack_errors) -- end -- -- result = { -- :status => status, -- :headers => headers, -- :body => Base64.encode64(body.join("")), -- } -- --elsif request["type"] == "sync_configs" -- result = { -- :next => Time.now.to_i + run_cfgsync() -- } --else -- result = {:error => "Unknown type: '#{request["type"]}'"} --end -- --result[:logs] = $tornado_logs --print result.to_json --- -2.21.1 - diff --git a/SOURCES/bz1783106-02-send-request-from-python-to-ruby-more-directly.patch b/SOURCES/bz1783106-02-send-request-from-python-to-ruby-more-directly.patch deleted file mode 100644 index 142cd99..0000000 --- a/SOURCES/bz1783106-02-send-request-from-python-to-ruby-more-directly.patch +++ /dev/null @@ -1,533 +0,0 @@ -From 770252b476bc342ea08da2bc5b83de713463d14a Mon Sep 17 00:00:00 2001 -From: Ivan Devat -Date: Thu, 12 Mar 2020 15:32:31 +0100 -Subject: [PATCH 1/2] send request from python to ruby more directly - -Rack protection middleware is launched before -TornadoCommunicationMiddleware. When request parts are unpacked in -TornadoCommunicationMiddleware they are not checked by rack protection. - -This commit changes communication between python and ruby - request is -sent to ruby more directly (without need to unpack request in sinatra -middleware). ---- - pcs/daemon/ruby_pcsd.py | 217 ++++++++++++++-------- - pcs_test/tier0/daemon/app/fixtures_app.py | 7 +- - pcs_test/tier0/daemon/test_ruby_pcsd.py | 61 ++---- - pcsd/rserver.rb | 39 ++-- - 4 files changed, 175 insertions(+), 149 deletions(-) - -diff --git a/pcs/daemon/ruby_pcsd.py b/pcs/daemon/ruby_pcsd.py -index e612f8da..53c53eaf 100644 ---- a/pcs/daemon/ruby_pcsd.py -+++ b/pcs/daemon/ruby_pcsd.py -@@ -7,8 +7,8 @@ from time import time as now - import pycurl - from tornado.gen import convert_yielded - from tornado.web import HTTPError --from tornado.httputil import split_host_and_port, HTTPServerRequest --from tornado.httpclient import AsyncHTTPClient -+from tornado.httputil import HTTPServerRequest, HTTPHeaders -+from tornado.httpclient import AsyncHTTPClient, HTTPClientError - from tornado.curl_httpclient import CurlError - - -@@ -29,6 +29,11 @@ RUBY_LOG_LEVEL_MAP = { - "DEBUG": logging.DEBUG, - } - -+__id_dict = {"id": 0} -+def get_request_id(): -+ __id_dict["id"] += 1 -+ return __id_dict["id"] -+ - class SinatraResult(namedtuple("SinatraResult", "headers, status, body")): - @classmethod - def from_response(cls, response): -@@ -60,6 +65,59 @@ def process_response_logs(rb_log_list): - group_id=group_id - ) - -+class RubyDaemonRequest(namedtuple( -+ "RubyDaemonRequest", -+ "request_type, path, query, headers, method, body" -+)): -+ def __new__( -+ cls, -+ request_type, -+ http_request: HTTPServerRequest = None, -+ payload=None, -+ ): -+ headers = http_request.headers if http_request else HTTPHeaders() -+ headers.add("X-Pcsd-Type", request_type) -+ if payload: -+ headers.add( -+ "X-Pcsd-Payload", -+ b64encode(json.dumps(payload).encode()).decode() -+ ) -+ return super(RubyDaemonRequest, cls).__new__( -+ cls, -+ request_type, -+ http_request.path if http_request else "", -+ http_request.query if http_request else "", -+ headers, -+ http_request.method if http_request else "GET", -+ http_request.body if http_request else None, -+ ) -+ -+ @property -+ def url(self): -+ # We do not need location for communication with ruby itself since we -+ # communicate via unix socket. But it is required by AsyncHTTPClient so -+ # "localhost" is used. -+ query = f"?{self.query}" if self.query else "" -+ return f"localhost/{self.path}{query}" -+ -+ @property -+ def is_get(self): -+ return self.method.upper() == "GET" -+ -+ @property -+ def has_http_request_detail(self): -+ return self.path or self.query or self.method != "GET" or self.body -+ -+def log_ruby_daemon_request(label, request: RubyDaemonRequest): -+ log.pcsd.debug("%s type: '%s'", label, request.request_type) -+ if request.has_http_request_detail: -+ log.pcsd.debug("%s path: '%s'", label, request.path) -+ if request.query: -+ log.pcsd.debug("%s query: '%s'", label, request.query) -+ log.pcsd.debug("%s method: '%s'", label, request.method) -+ if request.body: -+ log.pcsd.debug("%s body: '%s'", label, request.body) -+ - class Wrapper: - def __init__(self, pcsd_ruby_socket, debug=False): - self.__debug = debug -@@ -67,74 +125,87 @@ class Wrapper: - self.__client = AsyncHTTPClient() - self.__pcsd_ruby_socket = pcsd_ruby_socket - -- @staticmethod -- def get_sinatra_request(request: HTTPServerRequest): -- host, port = split_host_and_port(request.host) -- return {"env": { -- "PATH_INFO": request.path, -- "QUERY_STRING": request.query, -- "REMOTE_ADDR": request.remote_ip, -- "REMOTE_HOST": request.host, -- "REQUEST_METHOD": request.method, -- "REQUEST_URI": f"{request.protocol}://{request.host}{request.uri}", -- "SCRIPT_NAME": "", -- "SERVER_NAME": host, -- "SERVER_PORT": port, -- "SERVER_PROTOCOL": request.version, -- "HTTP_HOST": request.host, -- "HTTP_ACCEPT": "*/*", -- "HTTP_COOKIE": ";".join([ -- v.OutputString() for v in request.cookies.values() -- ]), -- "HTTPS": "on" if request.protocol == "https" else "off", -- "HTTP_VERSION": request.version, -- "REQUEST_PATH": request.path, -- "rack.input": request.body.decode("utf8"), -- }} -- - def prepare_curl_callback(self, curl): - curl.setopt(pycurl.UNIX_SOCKET_PATH, self.__pcsd_ruby_socket) - curl.setopt(pycurl.TIMEOUT, 70) - -- async def send_to_ruby(self, request_json): -- # We do not need location for communication with ruby itself since we -- # communicate via unix socket. But it is required by AsyncHTTPClient so -- # "localhost" is used. -- tornado_request = b64encode(request_json.encode()).decode() -- return (await self.__client.fetch( -- "localhost", -- method="POST", -- body=f"TORNADO_REQUEST={tornado_request}", -- prepare_curl_callback=self.prepare_curl_callback, -- )).body -- -- async def run_ruby(self, request_type, request=None): -- """ -- request_type: SINATRA_GUI|SINATRA_REMOTE|SYNC_CONFIGS -- request: result of get_sinatra_request|None -- i.e. it has structure returned by get_sinatra_request if the request -- is not None - so we can get SERVER_NAME and SERVER_PORT -- """ -- request = request or {} -- request.update({"type": request_type}) -- request_json = json.dumps(request) -- -- if self.__debug: -- log.pcsd.debug("Ruby daemon request: '%s'", request_json) -+ async def send_to_ruby(self, request: RubyDaemonRequest): - try: -- ruby_response = await self.send_to_ruby(request_json) -+ return (await self.__client.fetch( -+ request.url, -+ headers=request.headers, -+ method=request.method, -+ # Tornado enforces body=None for GET method: -+ # Even with `allow_nonstandard_methods` we disallow GET with a -+ # body (because libcurl doesn't allow it unless we use -+ # CUSTOMREQUEST). While the spec doesn't forbid clients from -+ # sending a body, it arguably disallows the server from doing -+ # anything with them. -+ body=(request.body if not request.is_get else None), -+ prepare_curl_callback=self.prepare_curl_callback, -+ )).body - except CurlError as e: -+ # This error we can get e.g. when ruby daemon is down. - log.pcsd.error( - "Cannot connect to ruby daemon (message: '%s'). Is it running?", - e - ) - raise HTTPError(500) -+ except HTTPClientError as e: -+ # This error we can get e.g. when rack protection raises exception. -+ log.pcsd.error( -+ ( -+ "Got error from ruby daemon (message: '%s')." -+ " Try checking system logs (e.g. journal, systemctl status" -+ " pcsd.service) for more information.." -+ ), -+ e -+ ) -+ raise HTTPError(500) -+ -+ async def run_ruby( -+ self, -+ request_type, -+ http_request: HTTPServerRequest = None, -+ payload=None, -+ ): -+ request = RubyDaemonRequest(request_type, http_request, payload) -+ request_id = get_request_id() -+ -+ def log_request(): -+ log_ruby_daemon_request( -+ f"Ruby daemon request (id: {request_id})", -+ request, -+ ) -+ -+ if self.__debug: -+ log_request() -+ -+ return self.process_ruby_response( -+ f"Ruby daemon response (id: {request_id})", -+ log_request, -+ await self.send_to_ruby(request), -+ ) -+ -+ def process_ruby_response(self, label, log_request, ruby_response): -+ """ -+ Return relevant part of unpacked ruby response. As a side effect -+ relevant logs are writen. - -+ string label -- is used as a log prefix -+ callable log_request -- is used to log request when some errors happen; -+ we want to log request before error even if there is not debug mode -+ string ruby_response -- body of response from ruby; it should contain -+ json with dictionary with response specific keys -+ """ - try: - response = json.loads(ruby_response) - if "error" in response: -+ if not self.__debug: -+ log_request() - log.pcsd.error( -- "Ruby daemon response contains an error: '%s'", -+ "%s contains an error: '%s'", -+ label, - json.dumps(response) - ) - raise HTTPError(500) -@@ -144,56 +215,52 @@ class Wrapper: - body = b64decode(response.pop("body")) - if self.__debug: - log.pcsd.debug( -- "Ruby daemon response (without logs and body): '%s'", -+ "%s (without logs and body): '%s'", -+ label, - json.dumps(response) - ) -- log.pcsd.debug("Ruby daemon response body: '%s'", body) -+ log.pcsd.debug("%s body: '%s'", label, body) - response["body"] = body - - elif self.__debug: - log.pcsd.debug( -- "Ruby daemon response (without logs): '%s'", -+ "%s (without logs): '%s'", -+ label, - json.dumps(response) - ) - process_response_logs(logs) - return response - except (json.JSONDecodeError, binascii.Error) as e: - if self.__debug: -- log.pcsd.debug("Ruby daemon response: '%s'", ruby_response) -+ log.pcsd.debug("%s: '%s'", label, ruby_response) -+ else: -+ log_request() -+ - log.pcsd.error("Cannot decode json from ruby pcsd wrapper: '%s'", e) - raise HTTPError(500) - - async def request_gui( - self, request: HTTPServerRequest, user, groups, is_authenticated - ) -> SinatraResult: -- sinatra_request = self.get_sinatra_request(request) - # Sessions handling was removed from ruby. However, some session - # information is needed for ruby code (e.g. rendering some parts of - # templates). So this information must be sent to ruby by another way. -- sinatra_request.update({ -- "session": { -+ return SinatraResult.from_response( -+ await convert_yielded(self.run_ruby(SINATRA_GUI, request, { - "username": user, - "groups": groups, - "is_authenticated": is_authenticated, -- } -- }) -- response = await convert_yielded(self.run_ruby( -- SINATRA_GUI, -- sinatra_request -- )) -- return SinatraResult.from_response(response) -+ })) -+ ) - - async def request_remote(self, request: HTTPServerRequest) -> SinatraResult: -- response = await convert_yielded(self.run_ruby( -- SINATRA_REMOTE, -- self.get_sinatra_request(request) -- )) -- return SinatraResult.from_response(response) -+ return SinatraResult.from_response( -+ await convert_yielded(self.run_ruby(SINATRA_REMOTE, request)) -+ ) - - async def sync_configs(self): - try: -- response = await convert_yielded(self.run_ruby(SYNC_CONFIGS)) -- return response["next"] -+ return (await convert_yielded(self.run_ruby(SYNC_CONFIGS)))["next"] - except HTTPError: - log.pcsd.error("Config synchronization failed") - return int(now()) + DEFAULT_SYNC_CONFIG_DELAY -diff --git a/pcs_test/tier0/daemon/app/fixtures_app.py b/pcs_test/tier0/daemon/app/fixtures_app.py -index 8d5b8f4c..590203b4 100644 ---- a/pcs_test/tier0/daemon/app/fixtures_app.py -+++ b/pcs_test/tier0/daemon/app/fixtures_app.py -@@ -20,7 +20,12 @@ class RubyPcsdWrapper(ruby_pcsd.Wrapper): - self.headers = {"Some": "value"} - self.body = b"Success action" - -- async def run_ruby(self, request_type, request=None): -+ async def run_ruby( -+ self, -+ request_type, -+ http_request=None, -+ payload=None, -+ ): - if request_type != self.request_type: - raise AssertionError( - f"Wrong request type: expected '{self.request_type}'" -diff --git a/pcs_test/tier0/daemon/test_ruby_pcsd.py b/pcs_test/tier0/daemon/test_ruby_pcsd.py -index 28f14c87..32eb74cc 100644 ---- a/pcs_test/tier0/daemon/test_ruby_pcsd.py -+++ b/pcs_test/tier0/daemon/test_ruby_pcsd.py -@@ -4,7 +4,7 @@ from base64 import b64encode - from unittest import TestCase, mock - from urllib.parse import urlencode - --from tornado.httputil import HTTPServerRequest -+from tornado.httputil import HTTPServerRequest, HTTPHeaders - from tornado.testing import AsyncTestCase, gen_test - from tornado.web import HTTPError - -@@ -22,46 +22,17 @@ def create_http_request(): - return HTTPServerRequest( - method="POST", - uri="/pcsd/uri", -- headers={"Cookie": "cookie1=first;cookie2=second"}, -+ headers=HTTPHeaders({"Cookie": "cookie1=first;cookie2=second"}), - body=str.encode(urlencode({"post-key": "post-value"})), - host="pcsd-host:2224" - ) - --class GetSinatraRequest(TestCase): -- def test_translate_request(self): -- # pylint: disable=invalid-name -- self.maxDiff = None -- self.assertEqual( -- create_wrapper().get_sinatra_request(create_http_request()), -- { -- 'env': { -- 'HTTPS': 'off', -- 'HTTP_ACCEPT': '*/*', -- 'HTTP_COOKIE': 'cookie1=first;cookie2=second', -- 'HTTP_HOST': 'pcsd-host:2224', -- 'HTTP_VERSION': 'HTTP/1.0', -- 'PATH_INFO': '/pcsd/uri', -- 'QUERY_STRING': '', -- 'REMOTE_ADDR': None, # It requires complicated request args -- 'REMOTE_HOST': 'pcsd-host:2224', -- 'REQUEST_METHOD': 'POST', -- 'REQUEST_PATH': '/pcsd/uri', -- 'REQUEST_URI': 'http://pcsd-host:2224/pcsd/uri', -- 'SCRIPT_NAME': '', -- 'SERVER_NAME': 'pcsd-host', -- 'SERVER_PORT': 2224, -- 'SERVER_PROTOCOL': 'HTTP/1.0', -- 'rack.input': 'post-key=post-value' -- } -- } -- ) -- - patch_ruby_pcsd = create_patcher(ruby_pcsd) - - class RunRuby(AsyncTestCase): - def setUp(self): - self.ruby_response = "" -- self.request = self.create_request() -+ self.request = ruby_pcsd.RubyDaemonRequest(ruby_pcsd.SYNC_CONFIGS) - self.wrapper = create_wrapper() - patcher = mock.patch.object( - self.wrapper, -@@ -72,14 +43,10 @@ class RunRuby(AsyncTestCase): - patcher.start() - super().setUp() - -- async def send_to_ruby(self, request_json): -- self.assertEqual(json.loads(request_json), self.request) -+ async def send_to_ruby(self, ruby_request): -+ self.assertEqual(ruby_request, self.request) - return self.ruby_response - -- @staticmethod -- def create_request(_type=ruby_pcsd.SYNC_CONFIGS): -- return {"type": _type} -- - def set_run_result(self, run_result): - self.ruby_response = json.dumps({**run_result, "logs": []}) - -@@ -125,10 +92,10 @@ class RunRuby(AsyncTestCase): - "body": b64encode(str.encode(body)).decode(), - }) - http_request = create_http_request() -- self.request = { -- **self.create_request(ruby_pcsd.SINATRA_REMOTE), -- **self.wrapper.get_sinatra_request(http_request), -- } -+ self.request = ruby_pcsd.RubyDaemonRequest( -+ ruby_pcsd.SINATRA_REMOTE, -+ http_request, -+ ) - result = yield self.wrapper.request_remote(http_request) - self.assert_sinatra_result(result, headers, status, body) - -@@ -148,15 +115,15 @@ class RunRuby(AsyncTestCase): - "body": b64encode(str.encode(body)).decode(), - }) - http_request = create_http_request() -- self.request = { -- **self.create_request(ruby_pcsd.SINATRA_GUI), -- **self.wrapper.get_sinatra_request(http_request), -- "session": { -+ self.request = ruby_pcsd.RubyDaemonRequest( -+ ruby_pcsd.SINATRA_GUI, -+ http_request, -+ { - "username": user, - "groups": groups, - "is_authenticated": is_authenticated, - } -- } -+ ) - result = yield self.wrapper.request_gui( - http_request, - user=user, -diff --git a/pcsd/rserver.rb b/pcsd/rserver.rb -index 6002a73c..4b58f252 100644 ---- a/pcsd/rserver.rb -+++ b/pcsd/rserver.rb -@@ -11,42 +11,25 @@ def pack_response(response) - return [200, {}, [response.to_json.to_str]] - end - --def unpack_request(transport_env) -- return JSON.parse(Base64.strict_decode64( -- transport_env["rack.request.form_hash"]["TORNADO_REQUEST"] -- )) --end -- - class TornadoCommunicationMiddleware - def initialize(app) - @app = app - end - -- def call(transport_env) -+ def call(env) - Thread.current[:pcsd_logger_container] = [] - begin -- request = unpack_request(transport_env) -+ type = env["HTTP_X_PCSD_TYPE"] - -- if ["sinatra_gui", "sinatra_remote"].include?(request["type"]) -- if request["type"] == "sinatra_gui" -- session = request["session"] -+ if ["sinatra_gui", "sinatra_remote"].include?(type) -+ if type == "sinatra_gui" -+ session = JSON.parse(Base64.strict_decode64(env["HTTP_X_PCSD_PAYLOAD"])) - Thread.current[:tornado_username] = session["username"] - Thread.current[:tornado_groups] = session["groups"] - Thread.current[:tornado_is_authenticated] = session["is_authenticated"] - end - -- # Keys rack.input and rack.errors are required. We make sure they are -- # there. -- request_env = request["env"] -- request_env["rack.input"] = StringIO.new(request_env["rack.input"]) -- request_env["rack.errors"] = StringIO.new() -- -- status, headers, body = @app.call(request_env) -- -- rack_errors = request_env['rack.errors'].string() -- if not rack_errors.empty?() -- $logger.error(rack_errors) -- end -+ status, headers, body = @app.call(env) - - return pack_response({ - :status => status, -@@ -56,16 +39,20 @@ class TornadoCommunicationMiddleware - }) - end - -- if request["type"] == "sync_configs" -+ if type == "sync_configs" - return pack_response({ - :next => Time.now.to_i + run_cfgsync(), - :logs => Thread.current[:pcsd_logger_container], - }) - end - -- raise "Unexpected value for key 'type': '#{request['type']}'" -+ return pack_response({ -+ :error => "Unexpected value for key 'type': '#{type}'" -+ }) - rescue => e -- return pack_response({:error => "Processing request error: '#{e}'"}) -+ return pack_response({ -+ :error => "Processing request error: '#{e}' '#{e.backtrace}'" -+ }) - end - end - end --- -2.21.1 - diff --git a/SOURCES/bz1792946-01-tests-update-for-pacemaker-2.0.3-4.patch b/SOURCES/bz1792946-01-tests-update-for-pacemaker-2.0.3-4.patch deleted file mode 100644 index 0168f2c..0000000 --- a/SOURCES/bz1792946-01-tests-update-for-pacemaker-2.0.3-4.patch +++ /dev/null @@ -1,367 +0,0 @@ -From 9fbeeed4e43dc37800de3c3f0cf6f7520dc31ccf Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek -Date: Mon, 20 Jan 2020 12:34:55 +0100 -Subject: [PATCH] tests: update for pacemaker-2.0.3-4 - ---- - pcs_test/tier0/test_resource.py | 59 +++++++++++++------------- - pcs_test/tier0/test_stonith.py | 75 +++++++++++++++++---------------- - pcs_test/tools/assertions.py | 24 +++++++++-- - 3 files changed, 88 insertions(+), 70 deletions(-) - -diff --git a/pcs_test/tier0/test_resource.py b/pcs_test/tier0/test_resource.py -index b8b85dd2..45d98dff 100644 ---- a/pcs_test/tier0/test_resource.py -+++ b/pcs_test/tier0/test_resource.py -@@ -10,6 +10,7 @@ from pcs_test.tier0.cib_resource.common import ResourceTest - from pcs_test.tools.assertions import ( - ac, - AssertPcsMixin, -+ assert_pcs_status, - ) - from pcs_test.tools.bin_mock import get_mock_settings - from pcs_test.tools.cib import get_assert_pcs_effect_mixin -@@ -953,11 +954,11 @@ monitor interval=20 (A-monitor-interval-20) - o,r = pcs(temp_cib, "resource status") - assert r == 0 - if PCMK_2_0_3_PLUS: -- ac(o,"""\ -+ assert_pcs_status(o,"""\ - * Resource Group: AGroup: -- * A1\t(ocf::heartbeat:Dummy):\t Stopped -- * A2\t(ocf::heartbeat:Dummy):\t Stopped -- * A3\t(ocf::heartbeat:Dummy):\t Stopped -+ * A1\t(ocf::heartbeat:Dummy):\tStopped -+ * A2\t(ocf::heartbeat:Dummy):\tStopped -+ * A3\t(ocf::heartbeat:Dummy):\tStopped - """) - else: - ac(o,"""\ -@@ -1208,19 +1209,19 @@ monitor interval=20 (A-monitor-interval-20) - output, returnVal = pcs(temp_cib, "resource") - assert returnVal == 0 - if PCMK_2_0_3_PLUS: -- ac(output, """\ -- * F\t(ocf::heartbeat:Dummy):\t Stopped -- * G\t(ocf::heartbeat:Dummy):\t Stopped -- * H\t(ocf::heartbeat:Dummy):\t Stopped -+ assert_pcs_status(output, """\ -+ * F\t(ocf::heartbeat:Dummy):\tStopped -+ * G\t(ocf::heartbeat:Dummy):\tStopped -+ * H\t(ocf::heartbeat:Dummy):\tStopped - * Resource Group: RGA: -- * A\t(ocf::heartbeat:Dummy):\t Stopped -- * B\t(ocf::heartbeat:Dummy):\t Stopped -- * C\t(ocf::heartbeat:Dummy):\t Stopped -- * E\t(ocf::heartbeat:Dummy):\t Stopped -- * D\t(ocf::heartbeat:Dummy):\t Stopped -- * K\t(ocf::heartbeat:Dummy):\t Stopped -- * J\t(ocf::heartbeat:Dummy):\t Stopped -- * I\t(ocf::heartbeat:Dummy):\t Stopped -+ * A\t(ocf::heartbeat:Dummy):\tStopped -+ * B\t(ocf::heartbeat:Dummy):\tStopped -+ * C\t(ocf::heartbeat:Dummy):\tStopped -+ * E\t(ocf::heartbeat:Dummy):\tStopped -+ * D\t(ocf::heartbeat:Dummy):\tStopped -+ * K\t(ocf::heartbeat:Dummy):\tStopped -+ * J\t(ocf::heartbeat:Dummy):\tStopped -+ * I\t(ocf::heartbeat:Dummy):\tStopped - """) - else: - ac(output, """\ -@@ -2004,9 +2005,9 @@ monitor interval=20 (A-monitor-interval-20) - - o,r = pcs(temp_cib, "resource") - if PCMK_2_0_3_PLUS: -- ac(o,"""\ -+ assert_pcs_status(o,"""\ - * Resource Group: AG: -- * D1\t(ocf::heartbeat:Dummy):\t Stopped -+ * D1\t(ocf::heartbeat:Dummy):\tStopped - * Clone Set: D0-clone [D0]: - """) - else: -@@ -2348,10 +2349,10 @@ monitor interval=20 (A-monitor-interval-20) - o,r = pcs(temp_cib, "resource status") - assert r == 0 - if PCMK_2_0_3_PLUS: -- ac(o,"""\ -+ assert_pcs_status(o,"""\ - * Resource Group: DGroup: -- * D1\t(ocf::heartbeat:Dummy):\t Stopped -- * D2\t(ocf::heartbeat:Dummy):\t Stopped -+ * D1\t(ocf::heartbeat:Dummy):\tStopped -+ * D2\t(ocf::heartbeat:Dummy):\tStopped - """) - else: - ac(o,"""\ -@@ -3560,12 +3561,12 @@ Error: role must be: Stopped, Started, Slave or Master (use --force to override) - assert retVal == 0 - output, retVal = pcs(temp_cib, "resource status") - if PCMK_2_0_3_PLUS: -- ac(output, outdent( -+ assert_pcs_status(output, outdent( - """\ - * Resource Group: dummies: -- * dummy1\t(ocf::heartbeat:Dummy):\t Stopped -- * dummy2\t(ocf::heartbeat:Dummy):\t Stopped -- * dummy3\t(ocf::heartbeat:Dummy):\t Stopped -+ * dummy1\t(ocf::heartbeat:Dummy):\tStopped -+ * dummy2\t(ocf::heartbeat:Dummy):\tStopped -+ * dummy3\t(ocf::heartbeat:Dummy):\tStopped - """ - )) - else: -@@ -3652,12 +3653,12 @@ Error: role must be: Stopped, Started, Slave or Master (use --force to override) - assert retVal == 0 - output, retVal = pcs(temp_cib, "resource status") - if PCMK_2_0_3_PLUS: -- ac(output, outdent( -+ assert_pcs_status(output, outdent( - """\ - * Resource Group: dummies: -- * dummy1\t(ocf::heartbeat:Dummy):\t Stopped -- * dummy2\t(ocf::heartbeat:Dummy):\t Stopped -- * dummy3\t(ocf::heartbeat:Dummy):\t Stopped -+ * dummy1\t(ocf::heartbeat:Dummy):\tStopped -+ * dummy2\t(ocf::heartbeat:Dummy):\tStopped -+ * dummy3\t(ocf::heartbeat:Dummy):\tStopped - """ - )) - else: -diff --git a/pcs_test/tier0/test_stonith.py b/pcs_test/tier0/test_stonith.py -index 46938e75..097a79b9 100644 ---- a/pcs_test/tier0/test_stonith.py -+++ b/pcs_test/tier0/test_stonith.py -@@ -517,13 +517,13 @@ class StonithTest(TestCase, AssertPcsMixin): - if PCMK_2_0_3_PLUS: - self.assert_pcs_success("stonith", outdent( - """\ -- * n1-ipmi\t(stonith:fence_apc):\t Stopped -- * n2-ipmi\t(stonith:fence_apc):\t Stopped -- * n1-apc1\t(stonith:fence_apc):\t Stopped -- * n1-apc2\t(stonith:fence_apc):\t Stopped -- * n2-apc1\t(stonith:fence_apc):\t Stopped -- * n2-apc2\t(stonith:fence_apc):\t Stopped -- * n2-apc3\t(stonith:fence_apc):\t Stopped -+ * n1-ipmi\t(stonith:fence_apc):\tStopped -+ * n2-ipmi\t(stonith:fence_apc):\tStopped -+ * n1-apc1\t(stonith:fence_apc):\tStopped -+ * n1-apc2\t(stonith:fence_apc):\tStopped -+ * n2-apc1\t(stonith:fence_apc):\tStopped -+ * n2-apc2\t(stonith:fence_apc):\tStopped -+ * n2-apc3\t(stonith:fence_apc):\tStopped - Target: rh7-1 - Level 1 - n1-ipmi - Level 2 - n1-apc1,n1-apc2,n2-apc2 -@@ -531,7 +531,7 @@ class StonithTest(TestCase, AssertPcsMixin): - Level 1 - n2-ipmi - Level 2 - n2-apc1,n2-apc2,n2-apc3 - """ -- )) -+ ), despace=True) - else: - self.assert_pcs_success("stonith", outdent( - """\ -@@ -559,12 +559,12 @@ class StonithTest(TestCase, AssertPcsMixin): - if PCMK_2_0_3_PLUS: - self.assert_pcs_success("stonith", outdent( - """\ -- * n1-ipmi\t(stonith:fence_apc):\t Stopped -- * n2-ipmi\t(stonith:fence_apc):\t Stopped -- * n1-apc1\t(stonith:fence_apc):\t Stopped -- * n1-apc2\t(stonith:fence_apc):\t Stopped -- * n2-apc1\t(stonith:fence_apc):\t Stopped -- * n2-apc3\t(stonith:fence_apc):\t Stopped -+ * n1-ipmi\t(stonith:fence_apc):\tStopped -+ * n2-ipmi\t(stonith:fence_apc):\tStopped -+ * n1-apc1\t(stonith:fence_apc):\tStopped -+ * n1-apc2\t(stonith:fence_apc):\tStopped -+ * n2-apc1\t(stonith:fence_apc):\tStopped -+ * n2-apc3\t(stonith:fence_apc):\tStopped - Target: rh7-1 - Level 1 - n1-ipmi - Level 2 - n1-apc1,n1-apc2 -@@ -572,7 +572,7 @@ class StonithTest(TestCase, AssertPcsMixin): - Level 1 - n2-ipmi - Level 2 - n2-apc1,n2-apc3 - """ -- )) -+ ), despace=True) - else: - self.assert_pcs_success("stonith", outdent( - """\ -@@ -599,11 +599,11 @@ class StonithTest(TestCase, AssertPcsMixin): - if PCMK_2_0_3_PLUS: - self.assert_pcs_success("stonith", outdent( - """\ -- * n1-ipmi\t(stonith:fence_apc):\t Stopped -- * n2-ipmi\t(stonith:fence_apc):\t Stopped -- * n1-apc1\t(stonith:fence_apc):\t Stopped -- * n1-apc2\t(stonith:fence_apc):\t Stopped -- * n2-apc3\t(stonith:fence_apc):\t Stopped -+ * n1-ipmi\t(stonith:fence_apc):\tStopped -+ * n2-ipmi\t(stonith:fence_apc):\tStopped -+ * n1-apc1\t(stonith:fence_apc):\tStopped -+ * n1-apc2\t(stonith:fence_apc):\tStopped -+ * n2-apc3\t(stonith:fence_apc):\tStopped - Target: rh7-1 - Level 1 - n1-ipmi - Level 2 - n1-apc1,n1-apc2 -@@ -611,7 +611,7 @@ class StonithTest(TestCase, AssertPcsMixin): - Level 1 - n2-ipmi - Level 2 - n2-apc3 - """ -- )) -+ ), despace=True) - else: - self.assert_pcs_success("stonith", outdent( - """\ -@@ -637,17 +637,17 @@ class StonithTest(TestCase, AssertPcsMixin): - if PCMK_2_0_3_PLUS: - self.assert_pcs_success("stonith", outdent( - """\ -- * n1-ipmi\t(stonith:fence_apc):\t Stopped -- * n2-ipmi\t(stonith:fence_apc):\t Stopped -- * n1-apc1\t(stonith:fence_apc):\t Stopped -- * n1-apc2\t(stonith:fence_apc):\t Stopped -+ * n1-ipmi\t(stonith:fence_apc):\tStopped -+ * n2-ipmi\t(stonith:fence_apc):\tStopped -+ * n1-apc1\t(stonith:fence_apc):\tStopped -+ * n1-apc2\t(stonith:fence_apc):\tStopped - Target: rh7-1 - Level 1 - n1-ipmi - Level 2 - n1-apc1,n1-apc2 - Target: rh7-2 - Level 1 - n2-ipmi - """ -- )) -+ ), despace=True) - else: - self.assert_pcs_success("stonith", outdent( - """\ -@@ -671,16 +671,16 @@ class StonithTest(TestCase, AssertPcsMixin): - if PCMK_2_0_3_PLUS: - self.assert_pcs_success("stonith", outdent( - """\ -- * n1-ipmi\t(stonith:fence_apc):\t Stopped -- * n2-ipmi\t(stonith:fence_apc):\t Stopped -- * n1-apc2\t(stonith:fence_apc):\t Stopped -+ * n1-ipmi\t(stonith:fence_apc):\tStopped -+ * n2-ipmi\t(stonith:fence_apc):\tStopped -+ * n1-apc2\t(stonith:fence_apc):\tStopped - Target: rh7-1 - Level 1 - n1-ipmi - Level 2 - n1-apc2 - Target: rh7-2 - Level 1 - n2-ipmi - """ -- )) -+ ), despace=True) - else: - self.assert_pcs_success("stonith", outdent( - """\ -@@ -704,14 +704,14 @@ class StonithTest(TestCase, AssertPcsMixin): - if PCMK_2_0_3_PLUS: - self.assert_pcs_success("stonith", outdent( - """\ -- * n1-ipmi\t(stonith:fence_apc):\t Stopped -- * n2-ipmi\t(stonith:fence_apc):\t Stopped -+ * n1-ipmi\t(stonith:fence_apc):\tStopped -+ * n2-ipmi\t(stonith:fence_apc):\tStopped - Target: rh7-1 - Level 1 - n1-ipmi - Target: rh7-2 - Level 1 - n2-ipmi - """ -- )) -+ ), despace=True) - else: - self.assert_pcs_success("stonith", outdent( - """\ -@@ -1219,9 +1219,9 @@ class LevelConfig(LevelTestsBase): - if PCMK_2_0_3_PLUS: - result = outdent( - """\ -- * F1\t(stonith:fence_apc):\t Stopped -- * F2\t(stonith:fence_apc):\t Stopped -- * F3\t(stonith:fence_apc):\t Stopped -+ * F1\t(stonith:fence_apc):\tStopped -+ * F2\t(stonith:fence_apc):\tStopped -+ * F3\t(stonith:fence_apc):\tStopped - """ - ) - else: -@@ -1234,7 +1234,8 @@ class LevelConfig(LevelTestsBase): - ) - self.assert_pcs_success( - "stonith", -- result + "\n".join(indent(self.config_lines, 1)) + "\n" -+ result + "\n".join(indent(self.config_lines, 1)) + "\n", -+ despace=True - ) - self.pcs_runner.mock_settings["corosync_conf_file"] = rc( - "corosync.conf" -diff --git a/pcs_test/tools/assertions.py b/pcs_test/tools/assertions.py -index db8f4df5..a2b7b4ac 100644 ---- a/pcs_test/tools/assertions.py -+++ b/pcs_test/tools/assertions.py -@@ -59,7 +59,8 @@ class AssertPcsMixin: - ) - - def assert_pcs_success( -- self, command, stdout_full=None, stdout_start=None, stdout_regexp=None -+ self, command, stdout_full=None, stdout_start=None, stdout_regexp=None, -+ despace=False - ): - full = stdout_full - if ( -@@ -75,7 +76,8 @@ class AssertPcsMixin: - stdout_full=full, - stdout_start=stdout_start, - stdout_regexp=stdout_regexp, -- returncode=0 -+ returncode=0, -+ despace=despace, - ) - - def assert_pcs_fail( -@@ -99,7 +101,7 @@ class AssertPcsMixin: - - def assert_pcs_result( - self, command, stdout_full=None, stdout_start=None, stdout_regexp=None, -- returncode=0 -+ returncode=0, despace=False - ): - msg = ( - "Please specify exactly one: stdout_start or stdout_full or" -@@ -162,7 +164,11 @@ class AssertPcsMixin: - ) - else: - expected_full = self.__prepare_output(stdout_full) -- if stdout != expected_full: -+ if ( -+ (despace and _despace(stdout) != _despace(expected_full)) -+ or -+ (not despace and stdout != expected_full) -+ ): - self.assertEqual( - stdout, - expected_full, -@@ -386,3 +392,13 @@ def __report_item_equal(real_report_item, report_item_info): - ) - ) - ) -+ -+def assert_pcs_status(status1, status2): -+ if _despace(status1) != _despace(status2): -+ raise AssertionError( -+ "strings not equal:\n{0}".format(prepare_diff(status1, status2)) -+ ) -+ -+def _despace(string): -+ # ignore whitespace changes between various pacemaker versions -+ return re.sub(r"[ \t]+", " ", string) --- -2.20.1 - diff --git a/SOURCES/bz1793574-01-fix-detecting-fence-history-support.patch b/SOURCES/bz1793574-01-fix-detecting-fence-history-support.patch deleted file mode 100644 index b8f7672..0000000 --- a/SOURCES/bz1793574-01-fix-detecting-fence-history-support.patch +++ /dev/null @@ -1,541 +0,0 @@ -From ac0305a8b6bb040ef06dcbfff309c91321400d44 Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek -Date: Mon, 27 Jan 2020 17:05:42 +0100 -Subject: [PATCH 3/7] fix detecting fence history support - ---- - pcs/lib/commands/stonith.py | 38 ++++++++------ - pcs/lib/pacemaker/live.py | 45 +++++++++------- - .../crm_mon.rng.with_fence_history.xml | 13 ----- - .../crm_mon.rng.without_fence_history.xml | 9 ---- - pcs_test/tier0/lib/commands/test_status.py | 35 +++---------- - .../lib/commands/test_stonith_history.py | 52 ++++++------------- - pcs_test/tier0/lib/pacemaker/test_live.py | 31 ++++++++++- - .../tools/command_env/config_runner_pcmk.py | 41 +++++++++++++++ - pcs_test/tools/command_env/mock_runner.py | 1 + - 9 files changed, 141 insertions(+), 124 deletions(-) - delete mode 100644 pcs_test/resources/crm_mon.rng.with_fence_history.xml - delete mode 100644 pcs_test/resources/crm_mon.rng.without_fence_history.xml - -diff --git a/pcs/lib/commands/stonith.py b/pcs/lib/commands/stonith.py -index c0849a54..ff87c852 100644 ---- a/pcs/lib/commands/stonith.py -+++ b/pcs/lib/commands/stonith.py -@@ -1,3 +1,7 @@ -+from typing import ( -+ Optional, -+) -+ - from pcs.lib import reports - from pcs.lib.cib import resource - from pcs.lib.cib.resource.common import are_meta_disabled -@@ -6,13 +10,14 @@ from pcs.lib.commands.resource import ( - _ensure_disabled_after_wait, - resource_environment - ) -+from pcs.lib.env import LibraryEnvironment - from pcs.lib.errors import LibraryError - from pcs.lib.pacemaker.live import ( - FenceHistoryCommandErrorException, - fence_history_cleanup, - fence_history_text, - fence_history_update, -- is_fence_history_supported, -+ is_fence_history_supported_management, - ) - from pcs.lib.pacemaker.values import validate_id - from pcs.lib.resource_agent import find_valid_stonith_agent_by_name as get_agent -@@ -162,51 +167,54 @@ def create_in_group( - put_after_adjacent, - ) - --def history_get_text(env, node=None): -+def history_get_text(env: LibraryEnvironment, node: Optional[str] = None): - """ - Get full fencing history in plain text - -- LibraryEnvironment env -- string node -- get history for the specified node or all nodes if None -+ env -+ node -- get history for the specified node or all nodes if None - """ -- if not is_fence_history_supported(): -+ runner = env.cmd_runner() -+ if not is_fence_history_supported_management(runner): - raise LibraryError(reports.fence_history_not_supported()) - - try: -- return fence_history_text(env.cmd_runner(), node) -+ return fence_history_text(runner, node) - except FenceHistoryCommandErrorException as e: - raise LibraryError( - reports.fence_history_command_error(str(e), "show") - ) - --def history_cleanup(env, node=None): -+def history_cleanup(env: LibraryEnvironment, node: Optional[str] = None): - """ - Clear fencing history - -- LibraryEnvironment env -- string node -- clear history for the specified node or all nodes if None -+ env -+ node -- clear history for the specified node or all nodes if None - """ -- if not is_fence_history_supported(): -+ runner = env.cmd_runner() -+ if not is_fence_history_supported_management(runner): - raise LibraryError(reports.fence_history_not_supported()) - - try: -- return fence_history_cleanup(env.cmd_runner(), node) -+ return fence_history_cleanup(runner, node) - except FenceHistoryCommandErrorException as e: - raise LibraryError( - reports.fence_history_command_error(str(e), "cleanup") - ) - --def history_update(env): -+def history_update(env: LibraryEnvironment): - """ - Update fencing history in a cluster (sync with other nodes) - -- LibraryEnvironment env -+ env - """ -- if not is_fence_history_supported(): -+ runner = env.cmd_runner() -+ if not is_fence_history_supported_management(runner): - raise LibraryError(reports.fence_history_not_supported()) - - try: -- return fence_history_update(env.cmd_runner()) -+ return fence_history_update(runner) - except FenceHistoryCommandErrorException as e: - raise LibraryError( - reports.fence_history_command_error(str(e), "update") -diff --git a/pcs/lib/pacemaker/live.py b/pcs/lib/pacemaker/live.py -index 233f2e2d..d6741441 100644 ---- a/pcs/lib/pacemaker/live.py -+++ b/pcs/lib/pacemaker/live.py -@@ -1,6 +1,7 @@ - import os.path - import re - from typing import ( -+ Iterable, - List, - Tuple, - ) -@@ -56,7 +57,7 @@ def get_cluster_status_text( - cmd.extend(["--show-detail", "--show-node-attributes", "--failcounts"]) - # by default, pending and failed actions are displayed - # with verbose==True, we display the whole history -- if is_fence_history_supported(): -+ if is_fence_history_supported_status(runner): - cmd.append("--fence-history=3") - stdout, stderr, retval = runner.run(cmd) - -@@ -523,25 +524,15 @@ def _resource_move_ban_clear( - - ### fence history - --def is_fence_history_supported(): -- try: -- crm_mon_rng = xml_fromstring(open(settings.crm_mon_schema, "r").read()) -- # Namespaces must be provided otherwise xpath won't match anything. -- # 'None' namespace is not supported, so we rename it. -- namespaces_map = { -- "ns": crm_mon_rng.nsmap.pop(None) -- } -- history_elements = crm_mon_rng.xpath( -- ".//ns:element[@name='fence_history']", -- namespaces=namespaces_map -- ) -- if history_elements: -- return True -- except (EnvironmentError, etree.XMLSyntaxError): -- # if we cannot tell for sure fence_history is supported, we will -- # continue as if it was not supported -- pass -- return False -+def is_fence_history_supported_status(runner: CommandRunner) -> bool: -+ return _is_in_pcmk_tool_help( -+ runner, "crm_mon", ["--fence-history"] -+ ) -+ -+def is_fence_history_supported_management(runner: CommandRunner) -> bool: -+ return _is_in_pcmk_tool_help( -+ runner, "stonith_admin", ["--history", "--broadcast", "--cleanup"] -+ ) - - def fence_history_cleanup(runner, node=None): - return _run_fence_history_command(runner, "--cleanup", node) -@@ -583,3 +574,17 @@ def __is_in_crm_resource_help(runner, text): - ) - # help goes to stderr but we check stdout as well if that gets changed - return text in stderr or text in stdout -+ -+def _is_in_pcmk_tool_help( -+ runner: CommandRunner, tool: str, text_list: Iterable[str] -+) -> bool: -+ stdout, stderr, dummy_retval = runner.run( -+ [__exec(tool), "--help-all"] -+ ) -+ # Help goes to stderr but we check stdout as well if that gets changed. Use -+ # generators in all to return early. -+ return ( -+ all(text in stderr for text in text_list) -+ or -+ all(text in stdout for text in text_list) -+ ) -diff --git a/pcs_test/resources/crm_mon.rng.with_fence_history.xml b/pcs_test/resources/crm_mon.rng.with_fence_history.xml -deleted file mode 100644 -index 45b380bd..00000000 ---- a/pcs_test/resources/crm_mon.rng.with_fence_history.xml -+++ /dev/null -@@ -1,13 +0,0 @@ -- -- -- -- -- -- -- -- -- -- -- -- -- -diff --git a/pcs_test/resources/crm_mon.rng.without_fence_history.xml b/pcs_test/resources/crm_mon.rng.without_fence_history.xml -deleted file mode 100644 -index f7efe52c..00000000 ---- a/pcs_test/resources/crm_mon.rng.without_fence_history.xml -+++ /dev/null -@@ -1,9 +0,0 @@ -- -- -- -- -- -- -- -- -- -diff --git a/pcs_test/tier0/lib/commands/test_status.py b/pcs_test/tier0/lib/commands/test_status.py -index 517aa908..06878668 100644 ---- a/pcs_test/tier0/lib/commands/test_status.py -+++ b/pcs_test/tier0/lib/commands/test_status.py -@@ -1,15 +1,12 @@ - from textwrap import dedent --from unittest import mock, TestCase -+from unittest import TestCase - --from pcs import settings - from pcs.common import file_type_codes, report_codes - from pcs.lib.commands import status - from pcs_test.tools import fixture - from pcs_test.tools.command_env import get_env_tools - from pcs_test.tools.misc import read_test_resource as rc_read - --crm_mon_rng_with_history = rc_read("crm_mon.rng.with_fence_history.xml") --crm_mon_rng_without_history = rc_read("crm_mon.rng.without_fence_history.xml") - - class FullClusterStatusPlaintext(TestCase): - def setUp(self): -@@ -212,11 +209,7 @@ class FullClusterStatusPlaintext(TestCase): - def test_success_live_verbose(self): - (self.config - .env.set_known_nodes(self.node_name_list) -- .fs.open( -- settings.crm_mon_schema, -- mock.mock_open(read_data=crm_mon_rng_without_history)(), -- name="fs.open.crm_mon_rng" -- ) -+ .runner.pcmk.can_fence_history_status(stderr="not supported") - .runner.pcmk.load_state_plaintext( - verbose=True, - stdout="crm_mon cluster status", -@@ -288,11 +281,7 @@ class FullClusterStatusPlaintext(TestCase): - (self.config - .env.set_corosync_conf_data(rc_read("corosync.conf")) - .env.set_cib_data("") -- .fs.open( -- settings.crm_mon_schema, -- mock.mock_open(read_data=crm_mon_rng_without_history)(), -- name="fs.open.crm_mon_rng" -- ) -+ .runner.pcmk.can_fence_history_status(stderr="not supported") - .runner.pcmk.load_state_plaintext( - verbose=True, stdout="crm_mon cluster status", - ) -@@ -320,11 +309,7 @@ class FullClusterStatusPlaintext(TestCase): - def test_success_verbose_inactive_and_fence_history(self): - (self.config - .env.set_known_nodes(self.node_name_list) -- .fs.open( -- settings.crm_mon_schema, -- mock.mock_open(read_data=crm_mon_rng_with_history)(), -- name="fs.open.crm_mon_rng" -- ) -+ .runner.pcmk.can_fence_history_status() - .runner.pcmk.load_state_plaintext( - verbose=True, - inactive=False, -@@ -375,11 +360,7 @@ class FullClusterStatusPlaintext(TestCase): - def _assert_success_with_ticket_status_failure(self, stderr="", msg=""): - (self.config - .env.set_known_nodes(self.node_name_list) -- .fs.open( -- settings.crm_mon_schema, -- mock.mock_open(read_data=crm_mon_rng_without_history)(), -- name="fs.open.crm_mon_rng" -- ) -+ .runner.pcmk.can_fence_history_status(stderr="not supported") - .runner.pcmk.load_state_plaintext( - verbose=True, - stdout="crm_mon cluster status", -@@ -553,11 +534,7 @@ class FullClusterStatusPlaintext(TestCase): - - (self.config - .env.set_known_nodes(self.node_name_list[1:]) -- .fs.open( -- settings.crm_mon_schema, -- mock.mock_open(read_data=crm_mon_rng_without_history)(), -- name="fs.open.crm_mon_rng" -- ) -+ .runner.pcmk.can_fence_history_status(stderr="not supported") - .runner.pcmk.load_state_plaintext( - verbose=True, - stdout="crm_mon cluster status", -diff --git a/pcs_test/tier0/lib/commands/test_stonith_history.py b/pcs_test/tier0/lib/commands/test_stonith_history.py -index e1bd35cb..cfdef13c 100644 ---- a/pcs_test/tier0/lib/commands/test_stonith_history.py -+++ b/pcs_test/tier0/lib/commands/test_stonith_history.py -@@ -1,25 +1,16 @@ --from unittest import mock, TestCase -+from unittest import TestCase - - from pcs_test.tools import fixture - from pcs_test.tools.command_env import get_env_tools --from pcs_test.tools.misc import read_test_resource as rc_read - --from pcs import settings - from pcs.common import report_codes - from pcs.lib.commands import stonith - - --crm_mon_rng_with_history = rc_read("crm_mon.rng.with_fence_history.xml") --crm_mon_rng_without_history = rc_read("crm_mon.rng.without_fence_history.xml") -- - class HistoryGetText(TestCase): - def setUp(self): - self.env_assist, self.config = get_env_tools(test_case=self) -- self.config.fs.open( -- settings.crm_mon_schema, -- mock.mock_open(read_data=crm_mon_rng_with_history)(), -- name="fs.open.crm_mon_rng" -- ) -+ self.config.runner.pcmk.can_fence_history_manage() - - def test_success_all_nodes(self): - history = ( -@@ -68,11 +59,10 @@ class HistoryGetText(TestCase): - ) - - def test_history_not_supported(self): -- self.config.fs.open( -- settings.crm_mon_schema, -- mock.mock_open(read_data=crm_mon_rng_without_history)(), -- name="fs.open.crm_mon_rng", -- instead="fs.open.crm_mon_rng" -+ self.config.runner.pcmk.can_fence_history_manage( -+ stderr="not supported", -+ name="runner.pcmk.can_fence_history_manage", -+ instead="runner.pcmk.can_fence_history_manage", - ) - self.env_assist.assert_raise_library_error( - lambda: stonith.history_get_text(self.env_assist.get_env()), -@@ -88,11 +78,7 @@ class HistoryGetText(TestCase): - class HistoryCleanup(TestCase): - def setUp(self): - self.env_assist, self.config = get_env_tools(test_case=self) -- self.config.fs.open( -- settings.crm_mon_schema, -- mock.mock_open(read_data=crm_mon_rng_with_history)(), -- name="fs.open.crm_mon_rng" -- ) -+ self.config.runner.pcmk.can_fence_history_manage() - - def test_success_all_nodes(self): - msg = "cleaning up fencing-history for node *\n" -@@ -129,11 +115,10 @@ class HistoryCleanup(TestCase): - ) - - def test_history_not_supported(self): -- self.config.fs.open( -- settings.crm_mon_schema, -- mock.mock_open(read_data=crm_mon_rng_without_history)(), -- name="fs.open.crm_mon_rng", -- instead="fs.open.crm_mon_rng" -+ self.config.runner.pcmk.can_fence_history_manage( -+ stderr="not supported", -+ name="runner.pcmk.can_fence_history_manage", -+ instead="runner.pcmk.can_fence_history_manage", - ) - self.env_assist.assert_raise_library_error( - lambda: stonith.history_cleanup(self.env_assist.get_env()), -@@ -149,11 +134,7 @@ class HistoryCleanup(TestCase): - class HistoryUpdate(TestCase): - def setUp(self): - self.env_assist, self.config = get_env_tools(test_case=self) -- self.config.fs.open( -- settings.crm_mon_schema, -- mock.mock_open(read_data=crm_mon_rng_with_history)(), -- name="fs.open.crm_mon_rng" -- ) -+ self.config.runner.pcmk.can_fence_history_manage() - - def test_success_all_nodes(self): - msg = "gather fencing-history from all nodes\n" -@@ -182,11 +163,10 @@ class HistoryUpdate(TestCase): - ) - - def test_history_not_supported(self): -- self.config.fs.open( -- settings.crm_mon_schema, -- mock.mock_open(read_data=crm_mon_rng_without_history)(), -- name="fs.open.crm_mon_rng", -- instead="fs.open.crm_mon_rng" -+ self.config.runner.pcmk.can_fence_history_manage( -+ stderr="not supported", -+ name="runner.pcmk.can_fence_history_manage", -+ instead="runner.pcmk.can_fence_history_manage", - ) - self.env_assist.assert_raise_library_error( - lambda: stonith.history_update(self.env_assist.get_env()), -diff --git a/pcs_test/tier0/lib/pacemaker/test_live.py b/pcs_test/tier0/lib/pacemaker/test_live.py -index 1ea5454e..d69d8b34 100644 ---- a/pcs_test/tier0/lib/pacemaker/test_live.py -+++ b/pcs_test/tier0/lib/pacemaker/test_live.py -@@ -79,7 +79,7 @@ class GetClusterStatusXmlTest(LibraryPacemakerTest): - class GetClusterStatusText(TestCase): - def setUp(self): - self.mock_fencehistory_supported = mock.patch( -- "pcs.lib.pacemaker.live.is_fence_history_supported", -+ "pcs.lib.pacemaker.live.is_fence_history_supported_status", - return_value=True - ) - self.mock_fencehistory_supported.start() -@@ -125,7 +125,7 @@ class GetClusterStatusText(TestCase): - def test_success_no_fence_history(self): - self.mock_fencehistory_supported.stop() - self.mock_fencehistory_supported = mock.patch( -- "pcs.lib.pacemaker.live.is_fence_history_supported", -+ "pcs.lib.pacemaker.live.is_fence_history_supported_status", - return_value=False - ) - self.mock_fencehistory_supported.start() -@@ -1399,3 +1399,30 @@ class ResourcesWaitingTest(LibraryPacemakerTest): - mock_runner.run.assert_called_once_with( - [self.path("crm_resource"), "--wait"] - ) -+ -+ -+class IsInPcmkToolHelp(TestCase): -+ # pylint: disable=protected-access -+ def test_all_in_stderr(self): -+ mock_runner = get_runner("", "ABCDE", 0) -+ self.assertTrue( -+ lib._is_in_pcmk_tool_help(mock_runner, "", ["A", "C", "E"]) -+ ) -+ -+ def test_all_in_stdout(self): -+ mock_runner = get_runner("ABCDE", "", 0) -+ self.assertTrue( -+ lib._is_in_pcmk_tool_help(mock_runner, "", ["A", "C", "E"]) -+ ) -+ -+ def test_some_in_stderr_all_in_stdout(self): -+ mock_runner = get_runner("ABCDE", "ABC", 0) -+ self.assertTrue( -+ lib._is_in_pcmk_tool_help(mock_runner, "", ["A", "C", "E"]) -+ ) -+ -+ def test_some_in_stderr_some_in_stdout(self): -+ mock_runner = get_runner("CDE", "ABC", 0) -+ self.assertFalse( -+ lib._is_in_pcmk_tool_help(mock_runner, "", ["A", "C", "E"]) -+ ) -diff --git a/pcs_test/tools/command_env/config_runner_pcmk.py b/pcs_test/tools/command_env/config_runner_pcmk.py -index 5bb9755b..0580e8d6 100644 ---- a/pcs_test/tools/command_env/config_runner_pcmk.py -+++ b/pcs_test/tools/command_env/config_runner_pcmk.py -@@ -70,11 +70,52 @@ def _fixture_state_node_xml( - - - class PcmkShortcuts(): -+ #pylint: disable=too-many-public-methods - def __init__(self, calls): - self.__calls = calls - self.default_wait_timeout = DEFAULT_WAIT_TIMEOUT - self.default_wait_error_returncode = WAIT_TIMEOUT_EXPIRED_RETURNCODE - -+ def can_fence_history_manage( -+ self, -+ name="runner.pcmk.can_fence_history_manage", -+ stderr="--history --cleanup --broadcast", -+ instead=None, -+ ): -+ """ -+ Create a call to check if fence_history is supported by stonith_admin -+ -+ string name -- key of the call -+ string stderr -- stonith_admin help text -+ string instead -- key of call instead of which this new call is to be -+ placed -+ """ -+ self.__calls.place( -+ name, -+ RunnerCall("stonith_admin --help-all", stderr=stderr), -+ instead=instead, -+ ) -+ -+ def can_fence_history_status( -+ self, -+ name="runner.pcmk.can_fence_history_status", -+ stderr="--fence-history", -+ instead=None, -+ ): -+ """ -+ Create a call to check if fence_history is supported by crm_mon -+ -+ string name -- key of the call -+ string stderr -- crm_mon help text -+ string instead -- key of call instead of which this new call is to be -+ placed -+ """ -+ self.__calls.place( -+ name, -+ RunnerCall("crm_mon --help-all", stderr=stderr), -+ instead=instead, -+ ) -+ - def fence_history_get( - self, name="runner.pcmk.fence_history_get", node=None, stdout="", - stderr="", returncode=0 -diff --git a/pcs_test/tools/command_env/mock_runner.py b/pcs_test/tools/command_env/mock_runner.py -index 2fe43137..8b9cb771 100644 ---- a/pcs_test/tools/command_env/mock_runner.py -+++ b/pcs_test/tools/command_env/mock_runner.py -@@ -61,6 +61,7 @@ COMMAND_COMPLETIONS = { - "crm_ticket": path.join(settings.pacemaker_binaries, "crm_ticket"), - "crm_verify": path.join(settings.pacemaker_binaries, "crm_verify"), - "sbd": settings.sbd_binary, -+ "stonith_admin": path.join(settings.pacemaker_binaries, "stonith_admin"), - } - - def complete_command(command): --- -2.21.1 - diff --git a/SOURCES/bz1805082-01-fix-resource-stonith-refresh-documentation.patch b/SOURCES/bz1805082-01-fix-resource-stonith-refresh-documentation.patch new file mode 100644 index 0000000..7703e96 --- /dev/null +++ b/SOURCES/bz1805082-01-fix-resource-stonith-refresh-documentation.patch @@ -0,0 +1,57 @@ +From be40fe494ddeb4f7132389ca0f3c1193de0e425d Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek +Date: Tue, 23 Jun 2020 12:57:05 +0200 +Subject: [PATCH 2/3] fix 'resource | stonith refresh' documentation + +--- + pcs/pcs.8 | 4 ++-- + pcs/usage.py | 4 ++-- + 2 files changed, 4 insertions(+), 4 deletions(-) + +diff --git a/pcs/pcs.8 b/pcs/pcs.8 +index c887d332..3efc5bb2 100644 +--- a/pcs/pcs.8 ++++ b/pcs/pcs.8 +@@ -325,7 +325,7 @@ If a node is not specified then resources / stonith devices on all nodes will be + refresh [] [node=] [\fB\-\-strict\fR] + Make the cluster forget the complete operation history (including failures) of the resource and re\-detect its current state. If you are interested in forgetting failed operations only, use the 'pcs resource cleanup' command. + .br +-If the named resource is part of a group, or one numbered instance of a clone or bundled resource, the clean\-up applies to the whole collective resource unless \fB\-\-strict\fR is given. ++If the named resource is part of a group, or one numbered instance of a clone or bundled resource, the refresh applies to the whole collective resource unless \fB\-\-strict\fR is given. + .br + If a resource id is not specified then all resources / stonith devices will be refreshed. + .br +@@ -613,7 +613,7 @@ If a node is not specified then resources / stonith devices on all nodes will be + refresh [] [\fB\-\-node\fR ] [\fB\-\-strict\fR] + Make the cluster forget the complete operation history (including failures) of the stonith device and re\-detect its current state. If you are interested in forgetting failed operations only, use the 'pcs stonith cleanup' command. + .br +-If the named stonith device is part of a group, or one numbered instance of a clone or bundled resource, the clean\-up applies to the whole collective resource unless \fB\-\-strict\fR is given. ++If the named stonith device is part of a group, or one numbered instance of a clone or bundled resource, the refresh applies to the whole collective resource unless \fB\-\-strict\fR is given. + .br + If a stonith id is not specified then all resources / stonith devices will be refreshed. + .br +diff --git a/pcs/usage.py b/pcs/usage.py +index 8722bd7b..0f3c95a3 100644 +--- a/pcs/usage.py ++++ b/pcs/usage.py +@@ -663,7 +663,7 @@ Commands: + interested in forgetting failed operations only, use the 'pcs resource + cleanup' command. + If the named resource is part of a group, or one numbered instance of a +- clone or bundled resource, the clean-up applies to the whole collective ++ clone or bundled resource, the refresh applies to the whole collective + resource unless --strict is given. + If a resource id is not specified then all resources / stonith devices + will be refreshed. +@@ -1214,7 +1214,7 @@ Commands: + are interested in forgetting failed operations only, use the 'pcs + stonith cleanup' command. + If the named stonith device is part of a group, or one numbered +- instance of a clone or bundled resource, the clean-up applies to the ++ instance of a clone or bundled resource, the refresh applies to the + whole collective resource unless --strict is given. + If a stonith id is not specified then all resources / stonith devices + will be refreshed. +-- +2.25.4 + diff --git a/SOURCES/bz1817547-01-resource-and-operation-defaults.patch b/SOURCES/bz1817547-01-resource-and-operation-defaults.patch new file mode 100644 index 0000000..34d1795 --- /dev/null +++ b/SOURCES/bz1817547-01-resource-and-operation-defaults.patch @@ -0,0 +1,7605 @@ +From ec4f8fc199891ad13235729272c0f115918cade9 Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek +Date: Thu, 21 May 2020 16:51:25 +0200 +Subject: [PATCH 1/3] squash bz1817547 Resource and operation defaults that + apply to specific resource/operation types + +add rule parser for rsc and op expressions + +improvements to rule parser + +make rule parts independent of the parser + +export parsed rules into cib + +add a command for adding new rsc and op defaults + +display rsc and op defaults with multiple nvsets + +fix parsing and processing of rsc_expression in rules + +improve syntax for creating a new nvset + +make the rule parser produce dataclasses + +fix for pyparsing-2.4.0 + +add commands for removing rsc and op defaults sets + +add commands for updating rsc and op defaults sets + +update chagelog, capabilities + +add tier1 tests for rules + +various minor fixes + +fix routing, create 'defaults update' command + +better error messages for unallowed rule expressions +--- + .gitlab-ci.yml | 3 + + README.md | 1 + + mypy.ini | 9 + + pcs.spec.in | 3 + + pcs/cli/common/lib_wrapper.py | 10 +- + pcs/cli/nvset.py | 53 ++ + pcs/cli/reports/messages.py | 39 + + pcs/cli/routing/resource.py | 77 +- + pcs/cli/rule.py | 89 +++ + pcs/common/interface/dto.py | 9 +- + pcs/common/pacemaker/nvset.py | 26 + + pcs/common/pacemaker/rule.py | 28 + + pcs/common/reports/codes.py | 3 + + pcs/common/reports/const.py | 6 + + pcs/common/reports/messages.py | 73 ++ + pcs/common/reports/types.py | 1 + + pcs/common/str_tools.py | 32 + + pcs/common/types.py | 13 + + pcs/config.py | 20 +- + pcs/lib/cib/nvpair_multi.py | 323 +++++++++ + pcs/lib/cib/rule/__init__.py | 8 + + pcs/lib/cib/rule/cib_to_dto.py | 185 +++++ + pcs/lib/cib/rule/expression_part.py | 49 ++ + pcs/lib/cib/rule/parsed_to_cib.py | 103 +++ + pcs/lib/cib/rule/parser.py | 232 ++++++ + pcs/lib/cib/rule/validator.py | 62 ++ + pcs/lib/cib/tools.py | 8 +- + pcs/lib/commands/cib_options.py | 322 ++++++++- + pcs/lib/validate.py | 15 + + pcs/lib/xml_tools.py | 9 +- + pcs/pcs.8 | 86 ++- + pcs/resource.py | 258 ++++++- + pcs/usage.py | 94 ++- + pcs_test/resources/cib-empty-3.1.xml | 2 +- + pcs_test/resources/cib-empty-3.2.xml | 2 +- + pcs_test/resources/cib-empty-3.3.xml | 10 + + pcs_test/resources/cib-empty-3.4.xml | 10 + + pcs_test/resources/cib-empty.xml | 2 +- + pcs_test/tier0/cli/reports/test_messages.py | 29 + + pcs_test/tier0/cli/resource/test_defaults.py | 324 +++++++++ + pcs_test/tier0/cli/test_nvset.py | 92 +++ + pcs_test/tier0/cli/test_rule.py | 477 +++++++++++++ + .../tier0/common/reports/test_messages.py | 55 +- + pcs_test/tier0/common/test_str_tools.py | 33 + + .../cib_options => cib/rule}/__init__.py | 0 + .../tier0/lib/cib/rule/test_cib_to_dto.py | 593 ++++++++++++++++ + .../tier0/lib/cib/rule/test_parsed_to_cib.py | 214 ++++++ + pcs_test/tier0/lib/cib/rule/test_parser.py | 270 +++++++ + pcs_test/tier0/lib/cib/rule/test_validator.py | 68 ++ + pcs_test/tier0/lib/cib/test_nvpair_multi.py | 513 ++++++++++++++ + pcs_test/tier0/lib/cib/test_tools.py | 13 +- + .../cib_options/test_operations_defaults.py | 120 ---- + .../cib_options/test_resources_defaults.py | 120 ---- + .../tier0/lib/commands/test_cib_options.py | 669 ++++++++++++++++++ + pcs_test/tier0/lib/test_validate.py | 27 + + pcs_test/tier1/legacy/test_resource.py | 8 +- + pcs_test/tier1/legacy/test_stonith.py | 8 +- + pcs_test/tier1/test_cib_options.py | 571 +++++++++++++++ + pcs_test/tier1/test_tag.py | 4 +- + pcs_test/tools/fixture.py | 4 +- + pcs_test/tools/misc.py | 61 +- + pcsd/capabilities.xml | 30 + + test/centos8/Dockerfile | 1 + + test/fedora30/Dockerfile | 1 + + test/fedora31/Dockerfile | 1 + + test/fedora32/Dockerfile | 1 + + 66 files changed, 6216 insertions(+), 366 deletions(-) + create mode 100644 pcs/cli/nvset.py + create mode 100644 pcs/cli/rule.py + create mode 100644 pcs/common/pacemaker/nvset.py + create mode 100644 pcs/common/pacemaker/rule.py + create mode 100644 pcs/lib/cib/nvpair_multi.py + create mode 100644 pcs/lib/cib/rule/__init__.py + create mode 100644 pcs/lib/cib/rule/cib_to_dto.py + create mode 100644 pcs/lib/cib/rule/expression_part.py + create mode 100644 pcs/lib/cib/rule/parsed_to_cib.py + create mode 100644 pcs/lib/cib/rule/parser.py + create mode 100644 pcs/lib/cib/rule/validator.py + create mode 100644 pcs_test/resources/cib-empty-3.3.xml + create mode 100644 pcs_test/resources/cib-empty-3.4.xml + create mode 100644 pcs_test/tier0/cli/resource/test_defaults.py + create mode 100644 pcs_test/tier0/cli/test_nvset.py + create mode 100644 pcs_test/tier0/cli/test_rule.py + rename pcs_test/tier0/lib/{commands/cib_options => cib/rule}/__init__.py (100%) + create mode 100644 pcs_test/tier0/lib/cib/rule/test_cib_to_dto.py + create mode 100644 pcs_test/tier0/lib/cib/rule/test_parsed_to_cib.py + create mode 100644 pcs_test/tier0/lib/cib/rule/test_parser.py + create mode 100644 pcs_test/tier0/lib/cib/rule/test_validator.py + create mode 100644 pcs_test/tier0/lib/cib/test_nvpair_multi.py + delete mode 100644 pcs_test/tier0/lib/commands/cib_options/test_operations_defaults.py + delete mode 100644 pcs_test/tier0/lib/commands/cib_options/test_resources_defaults.py + create mode 100644 pcs_test/tier0/lib/commands/test_cib_options.py + create mode 100644 pcs_test/tier1/test_cib_options.py + +diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml +index 83eba12d..24444b72 100644 +--- a/.gitlab-ci.yml ++++ b/.gitlab-ci.yml +@@ -51,6 +51,7 @@ pylint: + python3-pip + python3-pycurl + python3-pyOpenSSL ++ python3-pyparsing + findutils + make + time +@@ -69,6 +70,7 @@ mypy: + python3-pip + python3-pycurl + python3-pyOpenSSL ++ python3-pyparsing + git + make + tar +@@ -112,6 +114,7 @@ python_tier0_tests: + python3-pip + python3-pycurl + python3-pyOpenSSL ++ python3-pyparsing + which + " + - make install_pip +diff --git a/README.md b/README.md +index f888da68..efb4d0d5 100644 +--- a/README.md ++++ b/README.md +@@ -30,6 +30,7 @@ These are the runtime dependencies of pcs and pcsd: + * python3-pycurl + * python3-setuptools + * python3-pyOpenSSL (python3-openssl) ++* python3-pyparsing + * python3-tornado 6.x + * python dataclasses (`pip install dataclasses`; required only for python 3.6, + already included in 3.7+) +diff --git a/mypy.ini b/mypy.ini +index ad3d1f18..ac6789a9 100644 +--- a/mypy.ini ++++ b/mypy.ini +@@ -8,12 +8,18 @@ disallow_untyped_defs = True + [mypy-pcs.lib.cib.resource.relations] + disallow_untyped_defs = True + ++[mypy-pcs.lib.cib.rule] ++disallow_untyped_defs = True ++ + [mypy-pcs.lib.cib.tag] + disallow_untyped_defs = True + + [mypy-pcs.lib.commands.tag] + disallow_untyped_defs = True + ++[mypy-pcs.lib.commands.cib_options] ++disallow_untyped_defs = True ++ + [mypy-pcs.lib.dr.*] + disallow_untyped_defs = True + disallow_untyped_calls = True +@@ -84,3 +90,6 @@ ignore_missing_imports = True + + [mypy-distro] + ignore_missing_imports = True ++ ++[mypy-pyparsing] ++ignore_missing_imports = True +diff --git a/pcs.spec.in b/pcs.spec.in +index c52c2fe4..e292a708 100644 +--- a/pcs.spec.in ++++ b/pcs.spec.in +@@ -122,6 +122,8 @@ BuildRequires: platform-python-setuptools + %endif + + BuildRequires: python3-devel ++# for tier0 tests ++BuildRequires: python3-pyparsing + + # gcc for compiling custom rubygems + BuildRequires: gcc +@@ -155,6 +157,7 @@ Requires: platform-python-setuptools + + Requires: python3-lxml + Requires: python3-pycurl ++Requires: python3-pyparsing + # clufter and its dependencies + Requires: python3-clufter => 0.70.0 + %if "%{python3_version}" != "3.6" && "%{python3_version}" != "3.7" +diff --git a/pcs/cli/common/lib_wrapper.py b/pcs/cli/common/lib_wrapper.py +index 9fd05ac0..192a3dac 100644 +--- a/pcs/cli/common/lib_wrapper.py ++++ b/pcs/cli/common/lib_wrapper.py +@@ -388,8 +388,14 @@ def load_module(env, middleware_factory, name): + env, + middleware.build(middleware_factory.cib,), + { +- "set_operations_defaults": cib_options.set_operations_defaults, +- "set_resources_defaults": cib_options.set_resources_defaults, ++ "operation_defaults_config": cib_options.operation_defaults_config, ++ "operation_defaults_create": cib_options.operation_defaults_create, ++ "operation_defaults_remove": cib_options.operation_defaults_remove, ++ "operation_defaults_update": cib_options.operation_defaults_update, ++ "resource_defaults_config": cib_options.resource_defaults_config, ++ "resource_defaults_create": cib_options.resource_defaults_create, ++ "resource_defaults_remove": cib_options.resource_defaults_remove, ++ "resource_defaults_update": cib_options.resource_defaults_update, + }, + ) + +diff --git a/pcs/cli/nvset.py b/pcs/cli/nvset.py +new file mode 100644 +index 00000000..69442df3 +--- /dev/null ++++ b/pcs/cli/nvset.py +@@ -0,0 +1,53 @@ ++from typing import ( ++ cast, ++ Iterable, ++ List, ++ Optional, ++) ++ ++from pcs.cli.rule import rule_expression_dto_to_lines ++from pcs.common.pacemaker.nvset import CibNvsetDto ++from pcs.common.str_tools import ( ++ format_name_value_list, ++ indent, ++) ++from pcs.common.types import CibNvsetType ++ ++ ++def nvset_dto_list_to_lines( ++ nvset_dto_list: Iterable[CibNvsetDto], ++ with_ids: bool = False, ++ text_if_empty: Optional[str] = None, ++) -> List[str]: ++ if not nvset_dto_list: ++ return [text_if_empty] if text_if_empty else [] ++ return [ ++ line ++ for nvset_dto in nvset_dto_list ++ for line in nvset_dto_to_lines(nvset_dto, with_ids=with_ids) ++ ] ++ ++ ++def nvset_dto_to_lines(nvset: CibNvsetDto, with_ids: bool = False) -> List[str]: ++ nvset_label = _nvset_type_to_label.get(nvset.type, "Options Set") ++ heading_parts = [f"{nvset_label}: {nvset.id}"] ++ if nvset.options: ++ heading_parts.append( ++ " ".join(format_name_value_list(sorted(nvset.options.items()))) ++ ) ++ ++ lines = format_name_value_list( ++ sorted([(nvpair.name, nvpair.value) for nvpair in nvset.nvpairs]) ++ ) ++ if nvset.rule: ++ lines.extend( ++ rule_expression_dto_to_lines(nvset.rule, with_ids=with_ids) ++ ) ++ ++ return [" ".join(heading_parts)] + indent(lines) ++ ++ ++_nvset_type_to_label = { ++ cast(str, CibNvsetType.INSTANCE): "Attributes", ++ cast(str, CibNvsetType.META): "Meta Attrs", ++} +diff --git a/pcs/cli/reports/messages.py b/pcs/cli/reports/messages.py +index 36f00a9e..7ccc8ab0 100644 +--- a/pcs/cli/reports/messages.py ++++ b/pcs/cli/reports/messages.py +@@ -402,6 +402,45 @@ class TagCannotRemoveReferencesWithoutRemovingTag(CliReportMessageCustom): + ) + + ++class RuleExpressionParseError(CliReportMessageCustom): ++ _obj: messages.RuleExpressionParseError ++ ++ @property ++ def message(self) -> str: ++ # Messages coming from the parser are not very useful and readable, ++ # they mostly contain one line grammar expression covering the whole ++ # rule. No user would be able to parse that. Therefore we omit the ++ # messages. ++ marker = "-" * (self._obj.column_number - 1) + "^" ++ return ( ++ f"'{self._obj.rule_string}' is not a valid rule expression, parse " ++ f"error near or after line {self._obj.line_number} column " ++ f"{self._obj.column_number}\n" ++ f" {self._obj.rule_line}\n" ++ f" {marker}" ++ ) ++ ++ ++class CibNvsetAmbiguousProvideNvsetId(CliReportMessageCustom): ++ _obj: messages.CibNvsetAmbiguousProvideNvsetId ++ ++ @property ++ def message(self) -> str: ++ command_map = { ++ const.PCS_COMMAND_RESOURCE_DEFAULTS_UPDATE: ( ++ "pcs resource defaults set update" ++ ), ++ const.PCS_COMMAND_OPERATION_DEFAULTS_UPDATE: ( ++ "pcs resource op defaults set update" ++ ), ++ } ++ command = command_map.get(self._obj.pcs_command, "") ++ return ( ++ f"Several options sets exist, please use the '{command}' command " ++ "and specify an option set ID" ++ ) ++ ++ + def _create_report_msg_map() -> Dict[str, type]: + result: Dict[str, type] = {} + for report_msg_cls in get_all_subclasses(CliReportMessageCustom): +diff --git a/pcs/cli/routing/resource.py b/pcs/cli/routing/resource.py +index 28bb3d5e..0706f43b 100644 +--- a/pcs/cli/routing/resource.py ++++ b/pcs/cli/routing/resource.py +@@ -1,15 +1,88 @@ + from functools import partial ++from typing import ( ++ Any, ++ List, ++) + + from pcs import ( + resource, + usage, + ) + from pcs.cli.common.errors import raise_command_replaced ++from pcs.cli.common.parse_args import InputModifiers + from pcs.cli.common.routing import create_router + + from pcs.cli.resource.relations import show_resource_relations_cmd + + ++def resource_defaults_cmd( ++ lib: Any, argv: List[str], modifiers: InputModifiers ++) -> None: ++ """ ++ Options: ++ * -f - CIB file ++ * --force - allow unknown options ++ """ ++ if argv and "=" in argv[0]: ++ # DEPRECATED legacy command ++ return resource.resource_defaults_legacy_cmd( ++ lib, argv, modifiers, deprecated_syntax_used=True ++ ) ++ ++ router = create_router( ++ { ++ "config": resource.resource_defaults_config_cmd, ++ "set": create_router( ++ { ++ "create": resource.resource_defaults_set_create_cmd, ++ "delete": resource.resource_defaults_set_remove_cmd, ++ "remove": resource.resource_defaults_set_remove_cmd, ++ "update": resource.resource_defaults_set_update_cmd, ++ }, ++ ["resource", "defaults", "set"], ++ ), ++ "update": resource.resource_defaults_legacy_cmd, ++ }, ++ ["resource", "defaults"], ++ default_cmd="config", ++ ) ++ return router(lib, argv, modifiers) ++ ++ ++def resource_op_defaults_cmd( ++ lib: Any, argv: List[str], modifiers: InputModifiers ++) -> None: ++ """ ++ Options: ++ * -f - CIB file ++ * --force - allow unknown options ++ """ ++ if argv and "=" in argv[0]: ++ # DEPRECATED legacy command ++ return resource.resource_op_defaults_legacy_cmd( ++ lib, argv, modifiers, deprecated_syntax_used=True ++ ) ++ ++ router = create_router( ++ { ++ "config": resource.resource_op_defaults_config_cmd, ++ "set": create_router( ++ { ++ "create": resource.resource_op_defaults_set_create_cmd, ++ "delete": resource.resource_op_defaults_set_remove_cmd, ++ "remove": resource.resource_op_defaults_set_remove_cmd, ++ "update": resource.resource_op_defaults_set_update_cmd, ++ }, ++ ["resource", "op", "defaults", "set"], ++ ), ++ "update": resource.resource_op_defaults_legacy_cmd, ++ }, ++ ["resource", "op", "defaults"], ++ default_cmd="config", ++ ) ++ return router(lib, argv, modifiers) ++ ++ + resource_cmd = create_router( + { + "help": lambda lib, argv, modifiers: usage.resource(argv), +@@ -68,14 +141,14 @@ resource_cmd = create_router( + "failcount": resource.resource_failcount, + "op": create_router( + { +- "defaults": resource.resource_op_defaults_cmd, ++ "defaults": resource_op_defaults_cmd, + "add": resource.resource_op_add_cmd, + "remove": resource.resource_op_delete_cmd, + "delete": resource.resource_op_delete_cmd, + }, + ["resource", "op"], + ), +- "defaults": resource.resource_defaults_cmd, ++ "defaults": resource_defaults_cmd, + "cleanup": resource.resource_cleanup, + "refresh": resource.resource_refresh, + "relocate": create_router( +diff --git a/pcs/cli/rule.py b/pcs/cli/rule.py +new file mode 100644 +index 00000000..c1149fff +--- /dev/null ++++ b/pcs/cli/rule.py +@@ -0,0 +1,89 @@ ++from typing import List ++ ++from pcs.common.pacemaker.rule import CibRuleExpressionDto ++from pcs.common.str_tools import ( ++ format_name_value_list, ++ indent, ++) ++from pcs.common.types import CibRuleExpressionType ++ ++ ++def rule_expression_dto_to_lines( ++ rule_expr: CibRuleExpressionDto, with_ids: bool = False ++) -> List[str]: ++ if rule_expr.type == CibRuleExpressionType.RULE: ++ return _rule_dto_to_lines(rule_expr, with_ids) ++ if rule_expr.type == CibRuleExpressionType.DATE_EXPRESSION: ++ return _date_dto_to_lines(rule_expr, with_ids) ++ return _simple_expr_to_lines(rule_expr, with_ids) ++ ++ ++def _rule_dto_to_lines( ++ rule_expr: CibRuleExpressionDto, with_ids: bool = False ++) -> List[str]: ++ heading_parts = [ ++ "Rule{0}:".format(" (expired)" if rule_expr.is_expired else "") ++ ] ++ heading_parts.extend( ++ format_name_value_list(sorted(rule_expr.options.items())) ++ ) ++ if with_ids: ++ heading_parts.append(f"(id:{rule_expr.id})") ++ ++ lines = [] ++ for child in rule_expr.expressions: ++ lines.extend(rule_expression_dto_to_lines(child, with_ids)) ++ ++ return [" ".join(heading_parts)] + indent(lines) ++ ++ ++def _date_dto_to_lines( ++ rule_expr: CibRuleExpressionDto, with_ids: bool = False ++) -> List[str]: ++ # pylint: disable=too-many-branches ++ operation = rule_expr.options.get("operation", None) ++ ++ if operation == "date_spec": ++ heading_parts = ["Expression:"] ++ if with_ids: ++ heading_parts.append(f"(id:{rule_expr.id})") ++ line_parts = ["Date Spec:"] ++ if rule_expr.date_spec: ++ line_parts.extend( ++ format_name_value_list( ++ sorted(rule_expr.date_spec.options.items()) ++ ) ++ ) ++ if with_ids: ++ line_parts.append(f"(id:{rule_expr.date_spec.id})") ++ return [" ".join(heading_parts)] + indent([" ".join(line_parts)]) ++ ++ if operation == "in_range" and rule_expr.duration: ++ heading_parts = ["Expression:", "date", "in_range"] ++ if "start" in rule_expr.options: ++ heading_parts.append(rule_expr.options["start"]) ++ heading_parts.extend(["to", "duration"]) ++ if with_ids: ++ heading_parts.append(f"(id:{rule_expr.id})") ++ lines = [" ".join(heading_parts)] ++ ++ line_parts = ["Duration:"] ++ line_parts.extend( ++ format_name_value_list(sorted(rule_expr.duration.options.items())) ++ ) ++ if with_ids: ++ line_parts.append(f"(id:{rule_expr.duration.id})") ++ lines.extend(indent([" ".join(line_parts)])) ++ ++ return lines ++ ++ return _simple_expr_to_lines(rule_expr, with_ids=with_ids) ++ ++ ++def _simple_expr_to_lines( ++ rule_expr: CibRuleExpressionDto, with_ids: bool = False ++) -> List[str]: ++ parts = ["Expression:", rule_expr.as_string] ++ if with_ids: ++ parts.append(f"(id:{rule_expr.id})") ++ return [" ".join(parts)] +diff --git a/pcs/common/interface/dto.py b/pcs/common/interface/dto.py +index fb40fc5e..768156d6 100644 +--- a/pcs/common/interface/dto.py ++++ b/pcs/common/interface/dto.py +@@ -42,7 +42,14 @@ def from_dict(cls: Type[DtoType], data: DtoPayload) -> DtoType: + data=data, + # NOTE: all enum types has to be listed here in key cast + # see: https://github.com/konradhalas/dacite#casting +- config=dacite.Config(cast=[types.DrRole, types.ResourceRelationType,],), ++ config=dacite.Config( ++ cast=[ ++ types.CibNvsetType, ++ types.CibRuleExpressionType, ++ types.DrRole, ++ types.ResourceRelationType, ++ ] ++ ), + ) + + +diff --git a/pcs/common/pacemaker/nvset.py b/pcs/common/pacemaker/nvset.py +new file mode 100644 +index 00000000..6d72c787 +--- /dev/null ++++ b/pcs/common/pacemaker/nvset.py +@@ -0,0 +1,26 @@ ++from dataclasses import dataclass ++from typing import ( ++ Mapping, ++ Optional, ++ Sequence, ++) ++ ++from pcs.common.interface.dto import DataTransferObject ++from pcs.common.pacemaker.rule import CibRuleExpressionDto ++from pcs.common.types import CibNvsetType ++ ++ ++@dataclass(frozen=True) ++class CibNvpairDto(DataTransferObject): ++ id: str # pylint: disable=invalid-name ++ name: str ++ value: str ++ ++ ++@dataclass(frozen=True) ++class CibNvsetDto(DataTransferObject): ++ id: str # pylint: disable=invalid-name ++ type: CibNvsetType ++ options: Mapping[str, str] ++ rule: Optional[CibRuleExpressionDto] ++ nvpairs: Sequence[CibNvpairDto] +diff --git a/pcs/common/pacemaker/rule.py b/pcs/common/pacemaker/rule.py +new file mode 100644 +index 00000000..306e65e6 +--- /dev/null ++++ b/pcs/common/pacemaker/rule.py +@@ -0,0 +1,28 @@ ++from dataclasses import dataclass ++from typing import ( ++ Mapping, ++ Optional, ++ Sequence, ++) ++ ++from pcs.common.interface.dto import DataTransferObject ++from pcs.common.types import CibRuleExpressionType ++ ++ ++@dataclass(frozen=True) ++class CibRuleDateCommonDto(DataTransferObject): ++ id: str # pylint: disable=invalid-name ++ options: Mapping[str, str] ++ ++ ++@dataclass(frozen=True) ++class CibRuleExpressionDto(DataTransferObject): ++ # pylint: disable=too-many-instance-attributes ++ id: str # pylint: disable=invalid-name ++ type: CibRuleExpressionType ++ is_expired: bool # only valid for type==rule ++ options: Mapping[str, str] ++ date_spec: Optional[CibRuleDateCommonDto] ++ duration: Optional[CibRuleDateCommonDto] ++ expressions: Sequence["CibRuleExpressionDto"] ++ as_string: str +diff --git a/pcs/common/reports/codes.py b/pcs/common/reports/codes.py +index 26eb8b51..8bcabfab 100644 +--- a/pcs/common/reports/codes.py ++++ b/pcs/common/reports/codes.py +@@ -123,6 +123,7 @@ CIB_LOAD_ERROR = M("CIB_LOAD_ERROR") + CIB_LOAD_ERROR_GET_NODES_FOR_VALIDATION = M( + "CIB_LOAD_ERROR_GET_NODES_FOR_VALIDATION" + ) ++CIB_NVSET_AMBIGUOUS_PROVIDE_NVSET_ID = M("CIB_NVSET_AMBIGUOUS_PROVIDE_NVSET_ID") + CIB_LOAD_ERROR_SCOPE_MISSING = M("CIB_LOAD_ERROR_SCOPE_MISSING") + CIB_PUSH_FORCED_FULL_DUE_TO_CRM_FEATURE_SET = M( + "CIB_PUSH_FORCED_FULL_DUE_TO_CRM_FEATURE_SET" +@@ -405,6 +406,8 @@ RESOURCE_UNMOVE_UNBAN_PCMK_SUCCESS = M("RESOURCE_UNMOVE_UNBAN_PCMK_SUCCESS") + RESOURCE_UNMOVE_UNBAN_PCMK_EXPIRED_NOT_SUPPORTED = M( + "RESOURCE_UNMOVE_UNBAN_PCMK_EXPIRED_NOT_SUPPORTED" + ) ++RULE_EXPRESSION_PARSE_ERROR = M("RULE_EXPRESSION_PARSE_ERROR") ++RULE_EXPRESSION_NOT_ALLOWED = M("RULE_EXPRESSION_NOT_ALLOWED") + RUN_EXTERNAL_PROCESS_ERROR = M("RUN_EXTERNAL_PROCESS_ERROR") + RUN_EXTERNAL_PROCESS_FINISHED = M("RUN_EXTERNAL_PROCESS_FINISHED") + RUN_EXTERNAL_PROCESS_STARTED = M("RUN_EXTERNAL_PROCESS_STARTED") +diff --git a/pcs/common/reports/const.py b/pcs/common/reports/const.py +index aeb593ee..fa2122d0 100644 +--- a/pcs/common/reports/const.py ++++ b/pcs/common/reports/const.py +@@ -1,9 +1,15 @@ + from .types import ( + DefaultAddressSource, ++ PcsCommand, + ReasonType, + ServiceAction, + ) + ++PCS_COMMAND_OPERATION_DEFAULTS_UPDATE = PcsCommand( ++ "resource op defaults update" ++) ++PCS_COMMAND_RESOURCE_DEFAULTS_UPDATE = PcsCommand("resource defaults update") ++ + SERVICE_ACTION_START = ServiceAction("START") + SERVICE_ACTION_STOP = ServiceAction("STOP") + SERVICE_ACTION_ENABLE = ServiceAction("ENABLE") +diff --git a/pcs/common/reports/messages.py b/pcs/common/reports/messages.py +index 540e8c69..f04d8632 100644 +--- a/pcs/common/reports/messages.py ++++ b/pcs/common/reports/messages.py +@@ -27,6 +27,7 @@ from pcs.common.str_tools import ( + indent, + is_iterable_not_str, + ) ++from pcs.common.types import CibRuleExpressionType + + from . import ( + codes, +@@ -120,6 +121,7 @@ _type_articles = { + "ACL user": "an", + "ACL role": "an", + "ACL permission": "an", ++ "options set": "an", + } + + +@@ -6399,3 +6401,74 @@ class TagIdsNotInTheTag(ReportItemMessage): + ids=format_plural(self.id_list, "id"), + id_list=format_list(self.id_list), + ) ++ ++ ++@dataclass(frozen=True) ++class RuleExpressionParseError(ReportItemMessage): ++ """ ++ Unable to parse pacemaker cib rule expression string ++ ++ rule_string -- the whole rule expression string ++ reason -- error message from rule parser ++ rule_line -- part of rule_string - the line where the error occurred ++ line_number -- the line where parsing failed ++ column_number -- the column where parsing failed ++ position -- the start index where parsing failed ++ """ ++ ++ rule_string: str ++ reason: str ++ rule_line: str ++ line_number: int ++ column_number: int ++ position: int ++ _code = codes.RULE_EXPRESSION_PARSE_ERROR ++ ++ @property ++ def message(self) -> str: ++ # Messages coming from the parser are not very useful and readable, ++ # they mostly contain one line grammar expression covering the whole ++ # rule. No user would be able to parse that. Therefore we omit the ++ # messages. ++ return ( ++ f"'{self.rule_string}' is not a valid rule expression, parse error " ++ f"near or after line {self.line_number} column {self.column_number}" ++ ) ++ ++ ++@dataclass(frozen=True) ++class RuleExpressionNotAllowed(ReportItemMessage): ++ """ ++ Used rule expression is not allowed in current context ++ ++ expression_type -- disallowed expression type ++ """ ++ ++ expression_type: CibRuleExpressionType ++ _code = codes.RULE_EXPRESSION_NOT_ALLOWED ++ ++ @property ++ def message(self) -> str: ++ type_map = { ++ CibRuleExpressionType.OP_EXPRESSION: "op", ++ CibRuleExpressionType.RSC_EXPRESSION: "resource", ++ } ++ return ( ++ f"Keyword '{type_map[self.expression_type]}' cannot be used " ++ "in a rule in this command" ++ ) ++ ++ ++@dataclass(frozen=True) ++class CibNvsetAmbiguousProvideNvsetId(ReportItemMessage): ++ """ ++ An old command supporting only one nvset have been used when several nvsets ++ exist. We require an nvset ID the command should work with to be specified. ++ """ ++ ++ pcs_command: types.PcsCommand ++ _code = codes.CIB_NVSET_AMBIGUOUS_PROVIDE_NVSET_ID ++ ++ @property ++ def message(self) -> str: ++ return "Several options sets exist, please specify an option set ID" +diff --git a/pcs/common/reports/types.py b/pcs/common/reports/types.py +index 5973279e..541046ea 100644 +--- a/pcs/common/reports/types.py ++++ b/pcs/common/reports/types.py +@@ -3,6 +3,7 @@ from typing import NewType + DefaultAddressSource = NewType("DefaultAddressSource", str) + ForceCode = NewType("ForceCode", str) + MessageCode = NewType("MessageCode", str) ++PcsCommand = NewType("PcsCommand", str) + ReasonType = NewType("ReasonType", str) + ServiceAction = NewType("ServiceAction", str) + SeverityLevel = NewType("SeverityLevel", str) +diff --git a/pcs/common/str_tools.py b/pcs/common/str_tools.py +index deb38799..80864b50 100644 +--- a/pcs/common/str_tools.py ++++ b/pcs/common/str_tools.py +@@ -3,6 +3,8 @@ from typing import ( + Any, + List, + Mapping, ++ Sequence, ++ Tuple, + TypeVar, + ) + +@@ -49,6 +51,36 @@ def format_list_custom_last_separator( + ) + + ++# For now, Tuple[str, str] is sufficient. Feel free to change it if needed, ++# e.g. when values can be integers. ++def format_name_value_list(item_list: Sequence[Tuple[str, str]]) -> List[str]: ++ """ ++ Turn 2-tuples to 'name=value' strings with standard quoting ++ """ ++ output = [] ++ for name, value in item_list: ++ name = quote(name, "= ") ++ value = quote(value, "= ") ++ output.append(f"{name}={value}") ++ return output ++ ++ ++def quote(string: str, chars_to_quote: str) -> str: ++ """ ++ Quote a string if it contains specified characters ++ ++ string -- the string to be processed ++ chars_to_quote -- the characters causing quoting ++ """ ++ if not frozenset(chars_to_quote) & frozenset(string): ++ return string ++ if '"' not in string: ++ return f'"{string}"' ++ if "'" not in string: ++ return f"'{string}'" ++ return '"{string}"'.format(string=string.replace('"', '\\"')) ++ ++ + def join_multilines(strings): + return "\n".join([a.strip() for a in strings if a.strip()]) + +diff --git a/pcs/common/types.py b/pcs/common/types.py +index dace6f6d..0b656cc0 100644 +--- a/pcs/common/types.py ++++ b/pcs/common/types.py +@@ -3,6 +3,19 @@ from enum import auto + from pcs.common.tools import AutoNameEnum + + ++class CibNvsetType(AutoNameEnum): ++ INSTANCE = auto() ++ META = auto() ++ ++ ++class CibRuleExpressionType(AutoNameEnum): ++ RULE = auto() ++ EXPRESSION = auto() ++ DATE_EXPRESSION = auto() ++ OP_EXPRESSION = auto() ++ RSC_EXPRESSION = auto() ++ ++ + class ResourceRelationType(AutoNameEnum): + ORDER = auto() + ORDER_SET = auto() +diff --git a/pcs/config.py b/pcs/config.py +index 058ec55a..67aa6e0e 100644 +--- a/pcs/config.py ++++ b/pcs/config.py +@@ -48,6 +48,7 @@ from pcs import ( + from pcs.cli.common import middleware + from pcs.cli.common.errors import CmdLineInputError + from pcs.cli.constraint import command as constraint_command ++from pcs.cli.nvset import nvset_dto_list_to_lines + from pcs.cli.reports import process_library_reports + from pcs.common.reports import constraints as constraints_reports + from pcs.common.str_tools import indent +@@ -96,7 +97,8 @@ def _config_show_cib_lines(lib): + Commandline options: + * -f - CIB file + """ +- # update of pcs_options will change output of constraint show ++ # update of pcs_options will change output of constraint show and ++ # displaying resources and operations defaults + utils.pcs_options["--full"] = 1 + # get latest modifiers object after updating pcs_options + modifiers = utils.get_input_modifiers() +@@ -172,11 +174,23 @@ def _config_show_cib_lines(lib): + all_lines.append("") + all_lines.append("Resources Defaults:") + all_lines.extend( +- indent(resource.show_defaults(cib_dom, "rsc_defaults"), indent_step=1) ++ indent( ++ nvset_dto_list_to_lines( ++ lib.cib_options.resource_defaults_config(), ++ with_ids=modifiers.get("--full"), ++ text_if_empty="No defaults set", ++ ) ++ ) + ) + all_lines.append("Operations Defaults:") + all_lines.extend( +- indent(resource.show_defaults(cib_dom, "op_defaults"), indent_step=1) ++ indent( ++ nvset_dto_list_to_lines( ++ lib.cib_options.operation_defaults_config(), ++ with_ids=modifiers.get("--full"), ++ text_if_empty="No defaults set", ++ ) ++ ) + ) + + all_lines.append("") +diff --git a/pcs/lib/cib/nvpair_multi.py b/pcs/lib/cib/nvpair_multi.py +new file mode 100644 +index 00000000..7bdc2f55 +--- /dev/null ++++ b/pcs/lib/cib/nvpair_multi.py +@@ -0,0 +1,323 @@ ++from typing import ( ++ cast, ++ Iterable, ++ List, ++ Mapping, ++ NewType, ++ Optional, ++ Tuple, ++) ++from xml.etree.ElementTree import Element ++ ++from lxml import etree ++from lxml.etree import _Element ++ ++from pcs.common import reports ++from pcs.common.pacemaker.nvset import ( ++ CibNvpairDto, ++ CibNvsetDto, ++) ++from pcs.common.reports import ReportItemList ++from pcs.common.types import CibNvsetType ++from pcs.lib import validate ++from pcs.lib.cib.rule import ( ++ RuleParseError, ++ RuleRoot, ++ RuleValidator, ++ parse_rule, ++ rule_element_to_dto, ++ rule_to_cib, ++) ++from pcs.lib.cib.tools import ( ++ ElementSearcher, ++ IdProvider, ++ create_subelement_id, ++) ++from pcs.lib.xml_tools import ( ++ export_attributes, ++ remove_one_element, ++) ++ ++ ++NvsetTag = NewType("NvsetTag", str) ++NVSET_INSTANCE = NvsetTag("instance_attributes") ++NVSET_META = NvsetTag("meta_attributes") ++ ++_tag_to_type = { ++ str(NVSET_META): CibNvsetType.META, ++ str(NVSET_INSTANCE): CibNvsetType.INSTANCE, ++} ++ ++ ++def nvpair_element_to_dto(nvpair_el: Element) -> CibNvpairDto: ++ """ ++ Export an nvpair xml element to its DTO ++ """ ++ return CibNvpairDto( ++ nvpair_el.get("id", ""), ++ nvpair_el.get("name", ""), ++ nvpair_el.get("value", ""), ++ ) ++ ++ ++def nvset_element_to_dto(nvset_el: Element) -> CibNvsetDto: ++ """ ++ Export an nvset xml element to its DTO ++ """ ++ rule_el = nvset_el.find("./rule") ++ return CibNvsetDto( ++ nvset_el.get("id", ""), ++ _tag_to_type[nvset_el.tag], ++ export_attributes(nvset_el, with_id=False), ++ None if rule_el is None else rule_element_to_dto(rule_el), ++ [ ++ nvpair_element_to_dto(nvpair_el) ++ for nvpair_el in nvset_el.iterfind("./nvpair") ++ ], ++ ) ++ ++ ++def find_nvsets(parent_element: Element) -> List[Element]: ++ """ ++ Get all nvset xml elements in the given parent element ++ ++ parent_element -- an element to look for nvsets in ++ """ ++ return cast( ++ # The xpath method has a complicated return value, but we know our xpath ++ # expression returns only elements. ++ List[Element], ++ cast(_Element, parent_element).xpath( ++ "./*[{nvset_tags}]".format( ++ nvset_tags=" or ".join(f"self::{tag}" for tag in _tag_to_type) ++ ) ++ ), ++ ) ++ ++ ++def find_nvsets_by_ids( ++ parent_element: Element, id_list: Iterable[str] ++) -> Tuple[List[Element], ReportItemList]: ++ """ ++ Find nvset elements by their IDs and return them with non-empty report ++ list in case of errors. ++ ++ parent_element -- an element to look for nvsets in ++ id_list -- nvset IDs to be looked for ++ """ ++ element_list = [] ++ report_list: ReportItemList = [] ++ for nvset_id in id_list: ++ searcher = ElementSearcher( ++ _tag_to_type.keys(), ++ nvset_id, ++ parent_element, ++ element_type_desc="options set", ++ ) ++ if searcher.element_found(): ++ element_list.append(searcher.get_element()) ++ else: ++ report_list.extend(searcher.get_errors()) ++ return element_list, report_list ++ ++ ++class ValidateNvsetAppendNew: ++ """ ++ Validator for creating new nvset and appending it to CIB ++ """ ++ ++ def __init__( ++ self, ++ id_provider: IdProvider, ++ nvpair_dict: Mapping[str, str], ++ nvset_options: Mapping[str, str], ++ nvset_rule: Optional[str] = None, ++ rule_allows_rsc_expr: bool = False, ++ rule_allows_op_expr: bool = False, ++ ): ++ """ ++ id_provider -- elements' ids generator ++ nvpair_dict -- nvpairs to be put into the new nvset ++ nvset_options -- additional attributes of the created nvset ++ nvset_rule -- optional rule describing when the created nvset applies ++ rule_allows_rsc_expr -- is rsc_expression element allowed in nvset_rule? ++ rule_allows_op_expr -- is op_expression element allowed in nvset_rule? ++ """ ++ self._id_provider = id_provider ++ self._nvpair_dict = nvpair_dict ++ self._nvset_options = nvset_options ++ self._nvset_rule = nvset_rule ++ self._allow_rsc_expr = rule_allows_rsc_expr ++ self._allow_op_expr = rule_allows_op_expr ++ self._nvset_rule_parsed: Optional[RuleRoot] = None ++ ++ def validate(self, force_options: bool = False) -> reports.ReportItemList: ++ report_list: reports.ReportItemList = [] ++ ++ # Nvpair dict is intentionally not validated: it may contain any keys ++ # and values. This can change in the future and then we add a ++ # validation. Until then there is really nothing to validate there. ++ ++ # validate nvset options ++ validators = [ ++ validate.NamesIn( ++ ("id", "score"), ++ **validate.set_warning( ++ reports.codes.FORCE_OPTIONS, force_options ++ ), ++ ), ++ # with id_provider it validates that the id is available as well ++ validate.ValueId( ++ "id", option_name_for_report="id", id_provider=self._id_provider ++ ), ++ validate.ValueScore("score"), ++ ] ++ report_list.extend( ++ validate.ValidatorAll(validators).validate(self._nvset_options) ++ ) ++ ++ # parse and validate rule ++ # TODO write and call parsed rule validation and cleanup and tests ++ if self._nvset_rule: ++ try: ++ # Allow flags are set to True always, the parsed rule tree is ++ # checked in the validator instead. That gives us better error ++ # messages, such as "op expression cannot be used in this ++ # context" instead of a universal "parse error". ++ self._nvset_rule_parsed = parse_rule( ++ self._nvset_rule, allow_rsc_expr=True, allow_op_expr=True ++ ) ++ report_list.extend( ++ RuleValidator( ++ self._nvset_rule_parsed, ++ allow_rsc_expr=self._allow_rsc_expr, ++ allow_op_expr=self._allow_op_expr, ++ ).get_reports() ++ ) ++ except RuleParseError as e: ++ report_list.append( ++ reports.ReportItem.error( ++ reports.messages.RuleExpressionParseError( ++ e.rule_string, ++ e.msg, ++ e.rule_line, ++ e.lineno, ++ e.colno, ++ e.pos, ++ ) ++ ) ++ ) ++ ++ return report_list ++ ++ def get_parsed_rule(self) -> Optional[RuleRoot]: ++ return self._nvset_rule_parsed ++ ++ ++def nvset_append_new( ++ parent_element: Element, ++ id_provider: IdProvider, ++ nvset_tag: NvsetTag, ++ nvpair_dict: Mapping[str, str], ++ nvset_options: Mapping[str, str], ++ nvset_rule: Optional[RuleRoot] = None, ++) -> Element: ++ """ ++ Create new nvset and append it to CIB ++ ++ parent_element -- the created nvset will be appended into this element ++ id_provider -- elements' ids generator ++ nvset_tag -- type and actual tag of the nvset ++ nvpair_dict -- nvpairs to be put into the new nvset ++ nvset_options -- additional attributes of the created nvset ++ nvset_rule -- optional rule describing when the created nvset applies ++ """ ++ nvset_options = dict(nvset_options) # make a copy which we can modify ++ if "id" not in nvset_options or not nvset_options["id"]: ++ nvset_options["id"] = create_subelement_id( ++ parent_element, nvset_tag, id_provider ++ ) ++ ++ nvset_el = etree.SubElement(cast(_Element, parent_element), nvset_tag) ++ for name, value in nvset_options.items(): ++ if value != "": ++ nvset_el.attrib[name] = value ++ if nvset_rule: ++ rule_to_cib(cast(Element, nvset_el), id_provider, nvset_rule) ++ for name, value in nvpair_dict.items(): ++ _set_nvpair(cast(Element, nvset_el), id_provider, name, value) ++ return cast(Element, nvset_el) ++ ++ ++def nvset_remove(nvset_el_list: Iterable[Element]) -> None: ++ """ ++ Remove given nvset elements from CIB ++ ++ nvset_el_list -- nvset elements to be removed ++ """ ++ for nvset_el in nvset_el_list: ++ remove_one_element(nvset_el) ++ ++ ++def nvset_update( ++ nvset_el: Element, id_provider: IdProvider, nvpair_dict: Mapping[str, str], ++) -> None: ++ """ ++ Update an existing nvset ++ ++ nvset_el -- nvset to be updated ++ id_provider -- elements' ids generator ++ nvpair_dict -- nvpairs to be put into the nvset ++ """ ++ # Do not ever remove the nvset element, even if it is empty. There may be ++ # ACLs set in pacemaker which allow "write" for nvpairs (adding, changing ++ # and removing) but not nvsets. In such a case, removing the nvset would ++ # cause the whole change to be rejected by pacemaker with a "permission ++ # denied" message. ++ # https://bugzilla.redhat.com/show_bug.cgi?id=1642514 ++ for name, value in nvpair_dict.items(): ++ _set_nvpair(nvset_el, id_provider, name, value) ++ ++ ++def _set_nvpair( ++ nvset_element: Element, id_provider: IdProvider, name: str, value: str ++): ++ """ ++ Ensure name-value pair is set / removed in specified nvset ++ ++ nvset_element -- container for nvpair elements to update ++ id_provider -- elements' ids generator ++ name -- name of the nvpair to be set ++ value -- value of the nvpair to be set, if "" the nvpair will be removed ++ """ ++ nvpair_el_list = cast( ++ # The xpath method has a complicated return value, but we know our xpath ++ # expression returns only elements. ++ List[Element], ++ cast(_Element, nvset_element).xpath("./nvpair[@name=$name]", name=name), ++ ) ++ ++ if not nvpair_el_list: ++ if value != "": ++ etree.SubElement( ++ cast(_Element, nvset_element), ++ "nvpair", ++ { ++ "id": create_subelement_id( ++ nvset_element, ++ # limit id length to prevent excessively long ids ++ name[:20], ++ id_provider, ++ ), ++ "name": name, ++ "value": value, ++ }, ++ ) ++ return ++ ++ if value != "": ++ nvpair_el_list[0].set("value", value) ++ else: ++ nvset_element.remove(nvpair_el_list[0]) ++ for nvpair_el in nvpair_el_list[1:]: ++ nvset_element.remove(nvpair_el) +diff --git a/pcs/lib/cib/rule/__init__.py b/pcs/lib/cib/rule/__init__.py +new file mode 100644 +index 00000000..94228572 +--- /dev/null ++++ b/pcs/lib/cib/rule/__init__.py +@@ -0,0 +1,8 @@ ++from .cib_to_dto import rule_element_to_dto ++from .expression_part import BoolExpr as RuleRoot ++from .parser import ( ++ parse_rule, ++ RuleParseError, ++) ++from .parsed_to_cib import export as rule_to_cib ++from .validator import Validator as RuleValidator +diff --git a/pcs/lib/cib/rule/cib_to_dto.py b/pcs/lib/cib/rule/cib_to_dto.py +new file mode 100644 +index 00000000..d8198e0c +--- /dev/null ++++ b/pcs/lib/cib/rule/cib_to_dto.py +@@ -0,0 +1,185 @@ ++from typing import cast ++from xml.etree.ElementTree import Element ++ ++from lxml.etree import _Element ++ ++from pcs.common.pacemaker.rule import ( ++ CibRuleDateCommonDto, ++ CibRuleExpressionDto, ++) ++from pcs.common.str_tools import ( ++ format_name_value_list, ++ quote, ++) ++from pcs.common.types import CibRuleExpressionType ++from pcs.lib.xml_tools import export_attributes ++ ++ ++def rule_element_to_dto(rule_el: Element) -> CibRuleExpressionDto: ++ """ ++ Export a rule xml element including its children to their DTOs ++ """ ++ return _tag_to_export[rule_el.tag](rule_el) ++ ++ ++def _attrs_to_str(el: Element) -> str: ++ return " ".join( ++ format_name_value_list( ++ sorted(export_attributes(el, with_id=False).items()) ++ ) ++ ) ++ ++ ++def _rule_to_dto(rule_el: Element) -> CibRuleExpressionDto: ++ children_dto_list = [ ++ _tag_to_export[child.tag](child) ++ # The xpath method has a complicated return value, but we know our xpath ++ # expression only returns elements. ++ for child in cast( ++ Element, cast(_Element, rule_el).xpath(_xpath_for_export) ++ ) ++ ] ++ # "and" is a documented pacemaker default ++ # https://clusterlabs.org/pacemaker/doc/en-US/Pacemaker/2.0/html-single/Pacemaker_Explained/index.html#_rule_properties ++ boolean_op = rule_el.get("boolean-op", "and") ++ string_parts = [] ++ for child_dto in children_dto_list: ++ if child_dto.type == CibRuleExpressionType.RULE: ++ string_parts.append(f"({child_dto.as_string})") ++ else: ++ string_parts.append(child_dto.as_string) ++ return CibRuleExpressionDto( ++ rule_el.get("id", ""), ++ _tag_to_type[rule_el.tag], ++ False, # TODO implement is_expired ++ export_attributes(rule_el, with_id=False), ++ None, ++ None, ++ children_dto_list, ++ f" {boolean_op} ".join(string_parts), ++ ) ++ ++ ++def _common_expr_to_dto( ++ expr_el: Element, as_string: str ++) -> CibRuleExpressionDto: ++ return CibRuleExpressionDto( ++ expr_el.get("id", ""), ++ _tag_to_type[expr_el.tag], ++ False, ++ export_attributes(expr_el, with_id=False), ++ None, ++ None, ++ [], ++ as_string, ++ ) ++ ++ ++def _simple_expr_to_dto(expr_el: Element) -> CibRuleExpressionDto: ++ string_parts = [] ++ if "value" in expr_el.attrib: ++ # "attribute" and "operation" are defined as mandatory in CIB schema ++ string_parts.extend( ++ [expr_el.get("attribute", ""), expr_el.get("operation", "")] ++ ) ++ if "type" in expr_el.attrib: ++ string_parts.append(expr_el.get("type", "")) ++ string_parts.append(quote(expr_el.get("value", ""), " ")) ++ else: ++ # "attribute" and "operation" are defined as mandatory in CIB schema ++ string_parts.extend( ++ [expr_el.get("operation", ""), expr_el.get("attribute", "")] ++ ) ++ return _common_expr_to_dto(expr_el, " ".join(string_parts)) ++ ++ ++def _date_common_to_dto(expr_el: Element) -> CibRuleDateCommonDto: ++ return CibRuleDateCommonDto( ++ expr_el.get("id", ""), export_attributes(expr_el, with_id=False), ++ ) ++ ++ ++def _date_expr_to_dto(expr_el: Element) -> CibRuleExpressionDto: ++ date_spec = expr_el.find("./date_spec") ++ duration = expr_el.find("./duration") ++ ++ string_parts = [] ++ # "operation" is defined as mandatory in CIB schema ++ operation = expr_el.get("operation", "") ++ if operation == "date_spec": ++ string_parts.append("date-spec") ++ if date_spec is not None: ++ string_parts.append(_attrs_to_str(date_spec)) ++ elif operation == "in_range": ++ string_parts.extend(["date", "in_range"]) ++ # CIB schema allows "start" + "duration" or optional "start" + "end" ++ if "start" in expr_el.attrib: ++ string_parts.extend([expr_el.get("start", ""), "to"]) ++ if "end" in expr_el.attrib: ++ string_parts.append(expr_el.get("end", "")) ++ if duration is not None: ++ string_parts.append("duration") ++ string_parts.append(_attrs_to_str(duration)) ++ else: ++ # CIB schema allows operation=="gt" + "start" or operation=="lt" + "end" ++ string_parts.extend(["date", expr_el.get("operation", "")]) ++ if "start" in expr_el.attrib: ++ string_parts.append(expr_el.get("start", "")) ++ if "end" in expr_el.attrib: ++ string_parts.append(expr_el.get("end", "")) ++ ++ return CibRuleExpressionDto( ++ expr_el.get("id", ""), ++ _tag_to_type[expr_el.tag], ++ False, ++ export_attributes(expr_el, with_id=False), ++ None if date_spec is None else _date_common_to_dto(date_spec), ++ None if duration is None else _date_common_to_dto(duration), ++ [], ++ " ".join(string_parts), ++ ) ++ ++ ++def _op_expr_to_dto(expr_el: Element) -> CibRuleExpressionDto: ++ string_parts = ["op"] ++ string_parts.append(expr_el.get("name", "")) ++ if "interval" in expr_el.attrib: ++ string_parts.append( ++ "interval={interval}".format(interval=expr_el.get("interval", "")) ++ ) ++ return _common_expr_to_dto(expr_el, " ".join(string_parts)) ++ ++ ++def _rsc_expr_to_dto(expr_el: Element) -> CibRuleExpressionDto: ++ return _common_expr_to_dto( ++ expr_el, ++ ( ++ "resource " ++ + ":".join( ++ [ ++ expr_el.get(attr, "") ++ for attr in ["class", "provider", "type"] ++ ] ++ ) ++ ), ++ ) ++ ++ ++_tag_to_type = { ++ "rule": CibRuleExpressionType.RULE, ++ "expression": CibRuleExpressionType.EXPRESSION, ++ "date_expression": CibRuleExpressionType.DATE_EXPRESSION, ++ "op_expression": CibRuleExpressionType.OP_EXPRESSION, ++ "rsc_expression": CibRuleExpressionType.RSC_EXPRESSION, ++} ++ ++_tag_to_export = { ++ "rule": _rule_to_dto, ++ "expression": _simple_expr_to_dto, ++ "date_expression": _date_expr_to_dto, ++ "op_expression": _op_expr_to_dto, ++ "rsc_expression": _rsc_expr_to_dto, ++} ++_xpath_for_export = "./*[{export_tags}]".format( ++ export_tags=" or ".join(f"self::{tag}" for tag in _tag_to_export) ++) +diff --git a/pcs/lib/cib/rule/expression_part.py b/pcs/lib/cib/rule/expression_part.py +new file mode 100644 +index 00000000..3ba63aa2 +--- /dev/null ++++ b/pcs/lib/cib/rule/expression_part.py +@@ -0,0 +1,49 @@ ++""" ++Provides classes used as nodes of a semantic tree of a parsed rule expression. ++""" ++from dataclasses import dataclass ++from typing import ( ++ NewType, ++ Optional, ++ Sequence, ++) ++ ++ ++class RuleExprPart: ++ pass ++ ++ ++BoolOperator = NewType("BoolOperator", str) ++BOOL_AND = BoolOperator("AND") ++BOOL_OR = BoolOperator("OR") ++ ++ ++@dataclass(frozen=True) ++class BoolExpr(RuleExprPart): ++ """ ++ Represents a rule combining RuleExprPart objects by AND or OR operation. ++ """ ++ ++ operator: BoolOperator ++ children: Sequence[RuleExprPart] ++ ++ ++@dataclass(frozen=True) ++class RscExpr(RuleExprPart): ++ """ ++ Represents a resource expression in a rule. ++ """ ++ ++ standard: Optional[str] ++ provider: Optional[str] ++ type: Optional[str] ++ ++ ++@dataclass(frozen=True) ++class OpExpr(RuleExprPart): ++ """ ++ Represents an op expression in a rule. ++ """ ++ ++ name: str ++ interval: Optional[str] +diff --git a/pcs/lib/cib/rule/parsed_to_cib.py b/pcs/lib/cib/rule/parsed_to_cib.py +new file mode 100644 +index 00000000..0fcae4f1 +--- /dev/null ++++ b/pcs/lib/cib/rule/parsed_to_cib.py +@@ -0,0 +1,103 @@ ++from typing import cast ++from xml.etree.ElementTree import Element ++ ++from lxml import etree ++from lxml.etree import _Element ++ ++from pcs.lib.cib.tools import ( ++ IdProvider, ++ create_subelement_id, ++) ++ ++from .expression_part import ( ++ BoolExpr, ++ OpExpr, ++ RscExpr, ++ RuleExprPart, ++) ++ ++ ++def export( ++ parent_el: Element, id_provider: IdProvider, expr_tree: BoolExpr, ++) -> Element: ++ """ ++ Export parsed rule to a CIB element ++ ++ parent_el -- element to place the rule into ++ id_provider -- elements' ids generator ++ expr_tree -- parsed rule tree root ++ """ ++ element = __export_part(parent_el, expr_tree, id_provider) ++ # Add score only to the top level rule element (which is represented by ++ # BoolExpr class). This is achieved by this function not being called for ++ # child nodes. ++ # TODO This was implemented originaly only for rules in resource and ++ # operation defaults. In those cases, score is the only rule attribute and ++ # it is always INFINITY. Once this code is used for other rules, modify ++ # this behavior as needed. ++ if isinstance(expr_tree, BoolExpr): ++ element.set("score", "INFINITY") ++ return element ++ ++ ++def __export_part( ++ parent_el: Element, expr_tree: RuleExprPart, id_provider: IdProvider ++) -> Element: ++ part_export_map = { ++ BoolExpr: __export_bool, ++ OpExpr: __export_op, ++ RscExpr: __export_rsc, ++ } ++ func = part_export_map[type(expr_tree)] ++ # mypy doesn't handle this dynamic call ++ return func(parent_el, expr_tree, id_provider) # type: ignore ++ ++ ++def __export_bool( ++ parent_el: Element, boolean: BoolExpr, id_provider: IdProvider ++) -> Element: ++ element = etree.SubElement( ++ cast(_Element, parent_el), ++ "rule", ++ { ++ "id": create_subelement_id(parent_el, "rule", id_provider), ++ "boolean-op": boolean.operator.lower(), ++ }, ++ ) ++ for child in boolean.children: ++ __export_part(cast(Element, element), child, id_provider) ++ return cast(Element, element) ++ ++ ++def __export_op( ++ parent_el: Element, op: OpExpr, id_provider: IdProvider ++) -> Element: ++ element = etree.SubElement( ++ cast(_Element, parent_el), ++ "op_expression", ++ { ++ "id": create_subelement_id(parent_el, f"op-{op.name}", id_provider), ++ "name": op.name, ++ }, ++ ) ++ if op.interval: ++ element.attrib["interval"] = op.interval ++ return cast(Element, element) ++ ++ ++def __export_rsc( ++ parent_el: Element, rsc: RscExpr, id_provider: IdProvider ++) -> Element: ++ id_part = "-".join(filter(None, [rsc.standard, rsc.provider, rsc.type])) ++ element = etree.SubElement( ++ cast(_Element, parent_el), ++ "rsc_expression", ++ {"id": create_subelement_id(parent_el, f"rsc-{id_part}", id_provider)}, ++ ) ++ if rsc.standard: ++ element.attrib["class"] = rsc.standard ++ if rsc.provider: ++ element.attrib["provider"] = rsc.provider ++ if rsc.type: ++ element.attrib["type"] = rsc.type ++ return cast(Element, element) +diff --git a/pcs/lib/cib/rule/parser.py b/pcs/lib/cib/rule/parser.py +new file mode 100644 +index 00000000..2215c524 +--- /dev/null ++++ b/pcs/lib/cib/rule/parser.py +@@ -0,0 +1,232 @@ ++from typing import ( ++ Any, ++ Iterator, ++ Optional, ++ Tuple, ++) ++ ++import pyparsing ++ ++from .expression_part import ( ++ BOOL_AND, ++ BOOL_OR, ++ BoolExpr, ++ OpExpr, ++ RscExpr, ++ RuleExprPart, ++) ++ ++pyparsing.ParserElement.enablePackrat() ++ ++ ++class RuleParseError(Exception): ++ def __init__( ++ self, ++ rule_string: str, ++ rule_line: str, ++ lineno: int, ++ colno: int, ++ pos: int, ++ msg: str, ++ ): ++ super().__init__() ++ self.rule_string = rule_string ++ self.rule_line = rule_line ++ self.lineno = lineno ++ self.colno = colno ++ self.pos = pos ++ self.msg = msg ++ ++ ++def parse_rule( ++ rule_string: str, allow_rsc_expr: bool = False, allow_op_expr: bool = False ++) -> BoolExpr: ++ """ ++ Parse a rule string and return a corresponding semantic tree ++ ++ rule_string -- the whole rule expression ++ allow_rsc_expr -- allow resource expressions in the rule ++ allow_op_expr -- allow resource operation expressions in the rule ++ """ ++ if not rule_string: ++ return BoolExpr(BOOL_AND, []) ++ ++ try: ++ parsed = __get_rule_parser( ++ allow_rsc_expr=allow_rsc_expr, allow_op_expr=allow_op_expr ++ ).parseString(rule_string, parseAll=True)[0] ++ except pyparsing.ParseException as e: ++ raise RuleParseError( ++ rule_string, e.line, e.lineno, e.col, e.loc, e.args[2], ++ ) ++ ++ if not isinstance(parsed, BoolExpr): ++ # If we only got a representation on an inner rule element instead of a ++ # rule element itself, wrap the result in a default AND-rule. (There is ++ # only one expression so "and" vs. "or" doesn't really matter.) ++ parsed = BoolExpr(BOOL_AND, [parsed]) ++ ++ return parsed ++ ++ ++def __operator_operands( ++ token_list: pyparsing.ParseResults, ++) -> Iterator[Tuple[Any, Any]]: ++ # See pyparsing examples ++ # https://github.com/pyparsing/pyparsing/blob/master/examples/eval_arith.py ++ token_iterator = iter(token_list) ++ while True: ++ try: ++ yield (next(token_iterator), next(token_iterator)) ++ except StopIteration: ++ break ++ ++ ++def __build_bool_tree(token_list: pyparsing.ParseResults) -> RuleExprPart: ++ # See pyparsing examples ++ # https://github.com/pyparsing/pyparsing/blob/master/examples/eval_arith.py ++ token_to_operator = { ++ "and": BOOL_AND, ++ "or": BOOL_OR, ++ } ++ operand_left = token_list[0][0] ++ last_operator: Optional[str] = None ++ operand_list = [] ++ for operator, operand_right in __operator_operands(token_list[0][1:]): ++ # In each iteration, we get a bool_op ("and" or "or") and the right ++ # operand. ++ if last_operator == operator or last_operator is None: ++ # If we got the same operator as last time (or this is the first ++ # one), stack all the operads so we can put them all into one ++ # BoolExpr class. ++ operand_list.append(operand_right) ++ else: ++ # The operator has changed. Put all the stacked operands into the ++ # correct BoolExpr class and start the stacking again. The created ++ # class is the left operand of the current operator. ++ operand_left = BoolExpr( ++ token_to_operator[last_operator], [operand_left] + operand_list ++ ) ++ operand_list = [operand_right] ++ last_operator = operator ++ if operand_list and last_operator: ++ # Use any of the remaining stacked operands. ++ operand_left = BoolExpr( ++ token_to_operator[last_operator], [operand_left] + operand_list ++ ) ++ return operand_left ++ ++ ++def __build_op_expr(parse_result: pyparsing.ParseResults) -> RuleExprPart: ++ # Those attr are defined by setResultsName in op_expr grammar rule ++ return OpExpr( ++ parse_result.name, ++ # pyparsing-2.1.0 puts "interval_value" into parse_result.interval as ++ # defined in the grammar AND it also puts "interval_value" into ++ # parse_result. pyparsing-2.4.0 only puts "interval_value" into ++ # parse_result. Not sure why, maybe it's a bug, maybe it's intentional. ++ parse_result.interval_value if parse_result.interval_value else None, ++ ) ++ ++ ++def __build_rsc_expr(parse_result: pyparsing.ParseResults) -> RuleExprPart: ++ # Those attrs are defined by the regexp in rsc_expr grammar rule ++ return RscExpr( ++ parse_result.standard, parse_result.provider, parse_result.type ++ ) ++ ++ ++def __get_rule_parser( ++ allow_rsc_expr: bool = False, allow_op_expr: bool = False ++) -> pyparsing.ParserElement: ++ # This function defines the rule grammar ++ ++ # It was created for 'pcs resource [op] defaults' commands to be able to ++ # set defaults for specified resources and/or operation using rules. When ++ # implementing that feature, there was no time to reimplement all the other ++ # rule expressions from old code. The plan is to move old rule parser code ++ # here once there is time / need to do it. ++ # How to add other rule expressions: ++ # 1 Create new grammar rules in a way similar to existing rsc_expr and ++ # op_expr. Use setName for better description of a grammar when printed. ++ # Use setResultsName for an easy access to parsed parts. ++ # 2 Create new classes in expression_part module, probably one for each ++ # type of expression. Those are data containers holding the parsed data ++ # independent of the parser. ++ # 3 Create builders for the new classes and connect them to created ++ # grammar rules using setParseAction. ++ # 4 Add the new expressions into simple_expr_list. ++ # 5 Test and debug the whole thing. ++ ++ rsc_expr = pyparsing.And( ++ [ ++ pyparsing.CaselessKeyword("resource"), ++ # resource name ++ # Up to three parts seperated by ":". The parts can contain any ++ # characters except whitespace (token separator), ":" (parts ++ # separator) and "()" (brackets). ++ pyparsing.Regex( ++ r"(?P[^\s:()]+)?:(?P[^\s:()]+)?:(?P[^\s:()]+)?" ++ ).setName(""), ++ ] ++ ) ++ rsc_expr.setParseAction(__build_rsc_expr) ++ ++ op_interval = pyparsing.And( ++ [ ++ pyparsing.CaselessKeyword("interval"), ++ # no spaces allowed around the "=" ++ pyparsing.Literal("=").leaveWhitespace(), ++ # interval value: number followed by a time unit, no spaces allowed ++ # between the number and the unit thanks to Combine being used ++ pyparsing.Combine( ++ pyparsing.And( ++ [ ++ pyparsing.Word(pyparsing.nums), ++ pyparsing.Optional(pyparsing.Word(pyparsing.alphas)), ++ ] ++ ) ++ ) ++ .setName("[ + ++ ++ ++ Support for managing multiple sets of resource operations defaults. ++ ++ pcs commands: resource op defaults set create | delete | remove | update ++ ++ ++ ++ ++ Support for rules with 'resource' and 'op' expressions in sets of ++ resource operations defaults. ++ ++ pcs commands: resource op defaults set create ++ ++ + + + Show and set resources defaults, can set multiple defaults at once. +@@ -971,6 +986,21 @@ + pcs commands: resource defaults + + ++ ++ ++ Support for managing multiple sets of resources defaults. ++ ++ pcs commands: resource defaults set create | delete | remove | update ++ ++ ++ ++ ++ Support for rules with 'resource' and 'op' expressions in sets of ++ resources defaults. ++ ++ pcs commands: resource defaults set create ++ ++ + + + +diff --git a/test/centos8/Dockerfile b/test/centos8/Dockerfile +index bcdfadef..753f0ca7 100644 +--- a/test/centos8/Dockerfile ++++ b/test/centos8/Dockerfile +@@ -12,6 +12,7 @@ RUN dnf install -y \ + python3-pip \ + python3-pycurl \ + python3-pyOpenSSL \ ++ python3-pyparsing \ + # ruby + ruby \ + ruby-devel \ +diff --git a/test/fedora30/Dockerfile b/test/fedora30/Dockerfile +index 60aad892..7edbfe5b 100644 +--- a/test/fedora30/Dockerfile ++++ b/test/fedora30/Dockerfile +@@ -9,6 +9,7 @@ RUN dnf install -y \ + python3-mock \ + python3-pycurl \ + python3-pyOpenSSL \ ++ python3-pyparsing \ + # ruby + ruby \ + ruby-devel \ +diff --git a/test/fedora31/Dockerfile b/test/fedora31/Dockerfile +index eb24bb1c..6750e222 100644 +--- a/test/fedora31/Dockerfile ++++ b/test/fedora31/Dockerfile +@@ -10,6 +10,7 @@ RUN dnf install -y \ + python3-pip \ + python3-pycurl \ + python3-pyOpenSSL \ ++ python3-pyparsing \ + # ruby + ruby \ + ruby-devel \ +diff --git a/test/fedora32/Dockerfile b/test/fedora32/Dockerfile +index 61a0a439..c6cc2146 100644 +--- a/test/fedora32/Dockerfile ++++ b/test/fedora32/Dockerfile +@@ -11,6 +11,7 @@ RUN dnf install -y \ + python3-pip \ + python3-pycurl \ + python3-pyOpenSSL \ ++ python3-pyparsing \ + # ruby + ruby \ + ruby-devel \ +-- +2.25.4 + diff --git a/SOURCES/bz1832914-01-fix-running-pcs-status-on-remote-nodes.patch b/SOURCES/bz1832914-01-fix-running-pcs-status-on-remote-nodes.patch deleted file mode 100644 index 8c51350..0000000 --- a/SOURCES/bz1832914-01-fix-running-pcs-status-on-remote-nodes.patch +++ /dev/null @@ -1,322 +0,0 @@ -From d88962d655257940a678724cc8d7bc1008ed3a46 Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek -Date: Tue, 5 May 2020 11:02:36 +0200 -Subject: [PATCH 1/3] fix running 'pcs status' on remote nodes - ---- - pcs/lib/commands/status.py | 24 +++- - pcs_test/tier0/lib/commands/test_status.py | 122 +++++++++++++++++++++ - 2 files changed, 141 insertions(+), 5 deletions(-) - -diff --git a/pcs/lib/commands/status.py b/pcs/lib/commands/status.py -index 26332a65..84e3e046 100644 ---- a/pcs/lib/commands/status.py -+++ b/pcs/lib/commands/status.py -@@ -1,3 +1,4 @@ -+import os.path - from typing import ( - Iterable, - List, -@@ -6,6 +7,7 @@ from typing import ( - ) - from xml.etree.ElementTree import Element - -+from pcs import settings - from pcs.common import file_type_codes - from pcs.common.node_communicator import Communicator - from pcs.common.reports import ( -@@ -17,7 +19,7 @@ from pcs.common.tools import ( - indent, - ) - from pcs.lib import reports --from pcs.lib.cib import stonith -+from pcs.lib.cib import nvpair, stonith - from pcs.lib.cib.tools import get_crm_config, get_resources - from pcs.lib.communication.nodes import CheckReachability - from pcs.lib.communication.tools import run as run_communication -@@ -57,6 +59,7 @@ def full_cluster_status_plaintext( - """ - # pylint: disable=too-many-branches - # pylint: disable=too-many-locals -+ # pylint: disable=too-many-statements - - # validation - if not env.is_cib_live and env.is_corosync_conf_live: -@@ -84,7 +87,11 @@ def full_cluster_status_plaintext( - status_text, warning_list = get_cluster_status_text( - runner, hide_inactive_resources, verbose - ) -- corosync_conf = env.get_corosync_conf() -+ corosync_conf = None -+ # If we are live on a remote node, we have no corosync.conf. -+ # TODO Use the new file framework so the path is not exposed. -+ if not live or os.path.exists(settings.corosync_conf_file): -+ corosync_conf = env.get_corosync_conf() - cib = env.get_cib() - if verbose: - ticket_status_text, ticket_status_stderr, ticket_status_retval = ( -@@ -97,7 +104,7 @@ def full_cluster_status_plaintext( - except LibraryError: - pass - local_services_status = _get_local_services_status(runner) -- if verbose: -+ if verbose and corosync_conf: - node_name_list, node_names_report_list = get_existing_nodes_names( - corosync_conf - ) -@@ -117,8 +124,15 @@ def full_cluster_status_plaintext( - if report_processor.has_errors: - raise LibraryError() - -+ cluster_name = ( -+ corosync_conf.get_cluster_name() -+ if corosync_conf -+ else nvpair.get_value( -+ "cluster_property_set", get_crm_config(cib), "cluster-name", "" -+ ) -+ ) - parts = [] -- parts.append(f"Cluster name: {corosync_conf.get_cluster_name()}") -+ parts.append(f"Cluster name: {cluster_name}") - if warning_list: - parts.extend(["", "WARNINGS:"] + warning_list + [""]) - parts.append(status_text) -@@ -136,7 +150,7 @@ def full_cluster_status_plaintext( - else: - parts.extend(indent(ticket_status_text.splitlines())) - if live: -- if verbose: -+ if verbose and corosync_conf: - parts.extend(["", "PCSD Status:"]) - parts.extend(indent( - _format_node_reachability(node_name_list, node_reachability) -diff --git a/pcs_test/tier0/lib/commands/test_status.py b/pcs_test/tier0/lib/commands/test_status.py -index 06878668..7d54d579 100644 ---- a/pcs_test/tier0/lib/commands/test_status.py -+++ b/pcs_test/tier0/lib/commands/test_status.py -@@ -1,6 +1,7 @@ - from textwrap import dedent - from unittest import TestCase - -+from pcs import settings - from pcs.common import file_type_codes, report_codes - from pcs.lib.commands import status - from pcs_test.tools import fixture -@@ -9,16 +10,33 @@ from pcs_test.tools.misc import read_test_resource as rc_read - - - class FullClusterStatusPlaintext(TestCase): -+ # pylint: disable=too-many-public-methods - def setUp(self): - self.env_assist, self.config = get_env_tools(self) - self.node_name_list = ["node1", "node2", "node3"] - self.maxDiff = None - -+ @staticmethod -+ def _fixture_xml_clustername(name): -+ return """ -+ -+ -+ -+ -+ -+ """.format( -+ name=name -+ ) -+ - def _fixture_config_live_minimal(self): - (self.config - .runner.pcmk.load_state_plaintext( - stdout="crm_mon cluster status", - ) -+ .fs.exists(settings.corosync_conf_file, return_value=True) - .corosync_conf.load() - .runner.cib.load(resources=""" - -@@ -30,6 +48,25 @@ class FullClusterStatusPlaintext(TestCase): - ) - ) - -+ def _fixture_config_live_remote_minimal(self): -+ ( -+ self.config.runner.pcmk.load_state_plaintext( -+ stdout="crm_mon cluster status", -+ ) -+ .fs.exists(settings.corosync_conf_file, return_value=False) -+ .runner.cib.load( -+ optional_in_conf=self._fixture_xml_clustername("test-cib"), -+ resources=""" -+ -+ -+ -+ """, -+ ) -+ .runner.systemctl.is_active( -+ "sbd", is_active=False, name="runner.systemctl.is_active.sbd" -+ ) -+ ) -+ - def _fixture_config_local_daemons( - self, - corosync_enabled=True, corosync_active=True, -@@ -150,6 +187,7 @@ class FullClusterStatusPlaintext(TestCase): - .runner.pcmk.load_state_plaintext( - stdout="crm_mon cluster status", - ) -+ .fs.exists(settings.corosync_conf_file, return_value=True) - .corosync_conf.load_content("invalid corosync conf") - ) - self.env_assist.assert_raise_library_error( -@@ -170,6 +208,7 @@ class FullClusterStatusPlaintext(TestCase): - .runner.pcmk.load_state_plaintext( - stdout="crm_mon cluster status", - ) -+ .fs.exists(settings.corosync_conf_file, return_value=True) - .corosync_conf.load() - .runner.cib.load_content( - "some stdout", stderr="cib load error", returncode=1 -@@ -214,6 +253,7 @@ class FullClusterStatusPlaintext(TestCase): - verbose=True, - stdout="crm_mon cluster status", - ) -+ .fs.exists(settings.corosync_conf_file, return_value=True) - .corosync_conf.load(node_name_list=self.node_name_list) - .runner.cib.load(resources=""" - -@@ -254,6 +294,82 @@ class FullClusterStatusPlaintext(TestCase): - ) - ) - -+ def test_success_live_remote_node(self): -+ self._fixture_config_live_remote_minimal() -+ self._fixture_config_local_daemons( -+ corosync_enabled=False, -+ corosync_active=False, -+ pacemaker_enabled=False, -+ pacemaker_active=False, -+ pacemaker_remote_enabled=True, -+ pacemaker_remote_active=True, -+ ) -+ self.assertEqual( -+ status.full_cluster_status_plaintext(self.env_assist.get_env()), -+ dedent( -+ """\ -+ Cluster name: test-cib -+ crm_mon cluster status -+ -+ Daemon Status: -+ corosync: inactive/disabled -+ pacemaker: inactive/disabled -+ pacemaker_remote: active/enabled -+ pcsd: active/enabled""" -+ ), -+ ) -+ -+ def test_success_live_remote_node_verbose(self): -+ ( -+ self.config.runner.pcmk.can_fence_history_status( -+ stderr="not supported" -+ ) -+ .runner.pcmk.load_state_plaintext( -+ verbose=True, stdout="crm_mon cluster status", -+ ) -+ .fs.exists(settings.corosync_conf_file, return_value=False) -+ .runner.cib.load( -+ optional_in_conf=self._fixture_xml_clustername("test-cib"), -+ resources=""" -+ -+ -+ -+ """, -+ ) -+ .runner.pcmk.load_ticket_state_plaintext(stdout="ticket status") -+ .runner.systemctl.is_active( -+ "sbd", is_active=False, name="runner.systemctl.is_active.sbd" -+ ) -+ ) -+ self._fixture_config_local_daemons( -+ corosync_enabled=False, -+ corosync_active=False, -+ pacemaker_enabled=False, -+ pacemaker_active=False, -+ pacemaker_remote_enabled=True, -+ pacemaker_remote_active=True, -+ ) -+ -+ self.assertEqual( -+ status.full_cluster_status_plaintext( -+ self.env_assist.get_env(), verbose=True -+ ), -+ dedent( -+ """\ -+ Cluster name: test-cib -+ crm_mon cluster status -+ -+ Tickets: -+ ticket status -+ -+ Daemon Status: -+ corosync: inactive/disabled -+ pacemaker: inactive/disabled -+ pacemaker_remote: active/enabled -+ pcsd: active/enabled""" -+ ), -+ ) -+ - def test_succes_mocked(self): - (self.config - .env.set_corosync_conf_data(rc_read("corosync.conf")) -@@ -316,6 +432,7 @@ class FullClusterStatusPlaintext(TestCase): - fence_history=True, - stdout="crm_mon cluster status", - ) -+ .fs.exists(settings.corosync_conf_file, return_value=True) - .corosync_conf.load(node_name_list=self.node_name_list) - .runner.cib.load(resources=""" - -@@ -365,6 +482,7 @@ class FullClusterStatusPlaintext(TestCase): - verbose=True, - stdout="crm_mon cluster status", - ) -+ .fs.exists(settings.corosync_conf_file, return_value=True) - .corosync_conf.load(node_name_list=self.node_name_list) - .runner.cib.load(resources=""" - -@@ -421,6 +539,7 @@ class FullClusterStatusPlaintext(TestCase): - .runner.pcmk.load_state_plaintext( - stdout="crm_mon cluster status", - ) -+ .fs.exists(settings.corosync_conf_file, return_value=True) - .corosync_conf.load() - .runner.cib.load() - .runner.systemctl.is_active( -@@ -453,6 +572,7 @@ class FullClusterStatusPlaintext(TestCase): - .runner.pcmk.load_state_plaintext( - stdout="crm_mon cluster status", - ) -+ .fs.exists(settings.corosync_conf_file, return_value=True) - .corosync_conf.load() - .runner.cib.load() - .runner.systemctl.is_active( -@@ -481,6 +601,7 @@ class FullClusterStatusPlaintext(TestCase): - .runner.pcmk.load_state_plaintext( - stdout="crm_mon cluster status", - ) -+ .fs.exists(settings.corosync_conf_file, return_value=True) - .corosync_conf.load() - .runner.cib.load(resources=""" - -@@ -539,6 +660,7 @@ class FullClusterStatusPlaintext(TestCase): - verbose=True, - stdout="crm_mon cluster status", - ) -+ .fs.exists(settings.corosync_conf_file, return_value=True) - .corosync_conf.load(node_name_list=self.node_name_list) - .runner.cib.load(resources=""" - --- -2.25.4 - diff --git a/SOURCES/bz1838084-01-fix-ruby-daemon-closing-connection-after-30s.patch b/SOURCES/bz1838084-01-fix-ruby-daemon-closing-connection-after-30s.patch deleted file mode 100644 index cb505ee..0000000 --- a/SOURCES/bz1838084-01-fix-ruby-daemon-closing-connection-after-30s.patch +++ /dev/null @@ -1,39 +0,0 @@ -From 0cb9637f1962ad6be9e977b4b971b823af407c2d Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek -Date: Thu, 14 May 2020 16:42:32 +0200 -Subject: [PATCH 3/3] fix ruby daemon closing connection after 30s - ---- - pcs/daemon/ruby_pcsd.py | 2 +- - pcsd/rserver.rb | 2 +- - 2 files changed, 2 insertions(+), 2 deletions(-) - -diff --git a/pcs/daemon/ruby_pcsd.py b/pcs/daemon/ruby_pcsd.py -index 53c53eaf..b640752d 100644 ---- a/pcs/daemon/ruby_pcsd.py -+++ b/pcs/daemon/ruby_pcsd.py -@@ -127,7 +127,7 @@ class Wrapper: - - def prepare_curl_callback(self, curl): - curl.setopt(pycurl.UNIX_SOCKET_PATH, self.__pcsd_ruby_socket) -- curl.setopt(pycurl.TIMEOUT, 70) -+ curl.setopt(pycurl.TIMEOUT, 0) - - async def send_to_ruby(self, request: RubyDaemonRequest): - try: -diff --git a/pcsd/rserver.rb b/pcsd/rserver.rb -index 4b58f252..08eceb79 100644 ---- a/pcsd/rserver.rb -+++ b/pcsd/rserver.rb -@@ -63,7 +63,7 @@ use TornadoCommunicationMiddleware - require 'pcsd' - - ::Rack::Handler.get('thin').run(Sinatra::Application, { -- :Host => PCSD_RUBY_SOCKET, -+ :Host => PCSD_RUBY_SOCKET, :timeout => 0 - }) do |server| - puts server.class - server.threaded = true --- -2.25.4 - diff --git a/SOURCES/bz1840158-01-fix-inability-to-create-colocation-const.-web-ui.patch b/SOURCES/bz1840158-01-fix-inability-to-create-colocation-const.-web-ui.patch deleted file mode 100644 index ce4df0d..0000000 --- a/SOURCES/bz1840158-01-fix-inability-to-create-colocation-const.-web-ui.patch +++ /dev/null @@ -1,25 +0,0 @@ -From 5175507f22adffcb443f9f89bda9705599dd89e9 Mon Sep 17 00:00:00 2001 -From: Ivan Devat -Date: Thu, 7 May 2020 17:11:12 +0200 -Subject: [PATCH 2/3] fix inability to create colocation const. (web ui) - ---- - pcsd/pcs.rb | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb -index 9a0efb46..59492d20 100644 ---- a/pcsd/pcs.rb -+++ b/pcsd/pcs.rb -@@ -187,7 +187,7 @@ def add_colocation_constraint( - score = "INFINITY" - end - command = [ -- PCS, "constraint", "colocation", "add", resourceA, resourceB, score -+ PCS, "constraint", "colocation", "add", resourceA, "with", resourceB, score - ] - command << '--force' if force - stdout, stderr, retval = run_cmd(auth_user, *command) --- -2.25.4 - diff --git a/SOURCES/bz1843079-01-upgrade-CIB-schema-for-on-fail-demote.patch b/SOURCES/bz1843079-01-upgrade-CIB-schema-for-on-fail-demote.patch new file mode 100644 index 0000000..c74ad24 --- /dev/null +++ b/SOURCES/bz1843079-01-upgrade-CIB-schema-for-on-fail-demote.patch @@ -0,0 +1,1307 @@ +From 62b970d5e9edbdd68dc006193b0e606fb7ae7cdd Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek +Date: Thu, 2 Jul 2020 15:18:29 +0200 +Subject: [PATCH 1/3] upgrade CIB schema for on-fail=demote + +--- + pcs/lib/cib/resource/operations.py | 9 +- + pcs/lib/commands/remote_node.py | 73 +++-- + pcs/lib/commands/resource.py | 269 ++++++++++-------- + pcs/resource.py | 51 +++- + pcs_test/resources/cib-empty-3.3.xml | 2 +- + pcs_test/resources/cib-empty-3.4.xml | 2 +- + .../tier0/lib/cib/test_resource_operations.py | 9 +- + .../remote_node/test_node_add_remote.py | 54 +++- + .../commands/resource/test_resource_create.py | 237 ++++++++++++++- + pcs_test/tier1/cib_resource/test_create.py | 2 +- + pcs_test/tier1/legacy/test_resource.py | 23 ++ + pcs_test/tools/misc.py | 6 + + pcsd/capabilities.xml | 42 +++ + 13 files changed, 594 insertions(+), 185 deletions(-) + +diff --git a/pcs/lib/cib/resource/operations.py b/pcs/lib/cib/resource/operations.py +index 131e0a49..79d00685 100644 +--- a/pcs/lib/cib/resource/operations.py ++++ b/pcs/lib/cib/resource/operations.py +@@ -39,13 +39,14 @@ ATTRIBUTES = [ + ] + + ON_FAIL_VALUES = [ +- "ignore", + "block", +- "stop", +- "restart", +- "standby", ++ "demote", + "fence", ++ "ignore", ++ "restart", + "restart-container", ++ "standby", ++ "stop", + ] + + BOOLEAN_VALUES = [ +diff --git a/pcs/lib/commands/remote_node.py b/pcs/lib/commands/remote_node.py +index 6a2656a5..575e8044 100644 +--- a/pcs/lib/commands/remote_node.py ++++ b/pcs/lib/commands/remote_node.py +@@ -1,3 +1,10 @@ ++from typing import ( ++ Iterable, ++ Mapping, ++ Optional, ++ Union, ++) ++ + from pcs import settings + from pcs.common import reports + from pcs.common.file import RawFileError +@@ -13,6 +20,10 @@ from pcs.lib.cib.tools import ( + ElementSearcher, + get_resources, + ) ++ ++# TODO lib.commands should never import each other. This is to be removed when ++# the 'resource create' commands are overhauled. ++from pcs.lib.commands.resource import get_required_cib_version_for_primitive + from pcs.lib.communication.nodes import ( + DistributeFiles, + GetHostInfo, +@@ -24,6 +35,7 @@ from pcs.lib.communication.tools import ( + run as run_com, + run_and_raise, + ) ++from pcs.lib.corosync.config_facade import ConfigFacade as CorosyncConfigFacade + from pcs.lib.env import LibraryEnvironment + from pcs.lib.errors import LibraryError + from pcs.lib.file.instance import FileInstance +@@ -33,6 +45,9 @@ from pcs.lib.pacemaker import state + from pcs.lib.pacemaker.live import remove_node + + ++WaitType = Union[None, bool, int] ++ ++ + def _reports_skip_new_node(new_node_name, reason_type): + assert reason_type in {"unreachable", "not_live_cib"} + return [ +@@ -220,19 +235,19 @@ def _ensure_resource_running(env: LibraryEnvironment, resource_id): + + + def node_add_remote( +- env, +- node_name, +- node_addr, +- operations, +- meta_attributes, +- instance_attributes, +- skip_offline_nodes=False, +- allow_incomplete_distribution=False, +- allow_pacemaker_remote_service_fail=False, +- allow_invalid_operation=False, +- allow_invalid_instance_attributes=False, +- use_default_operations=True, +- wait=False, ++ env: LibraryEnvironment, ++ node_name: str, ++ node_addr: Optional[str], ++ operations: Iterable[Mapping[str, str]], ++ meta_attributes: Mapping[str, str], ++ instance_attributes: Mapping[str, str], ++ skip_offline_nodes: bool = False, ++ allow_incomplete_distribution: bool = False, ++ allow_pacemaker_remote_service_fail: bool = False, ++ allow_invalid_operation: bool = False, ++ allow_invalid_instance_attributes: bool = False, ++ use_default_operations: bool = True, ++ wait: WaitType = False, + ): + # pylint: disable=too-many-arguments + # pylint: disable=too-many-branches +@@ -241,34 +256,36 @@ def node_add_remote( + """ + create an ocf:pacemaker:remote resource and use it as a remote node + +- LibraryEnvironment env -- provides all for communication with externals +- string node_name -- the name of the new node +- mixed node_addr -- the address of the new node or None for default +- list of dict operations -- attributes for each entered operation +- dict meta_attributes -- attributes for primitive/meta_attributes +- dict instance_attributes -- attributes for primitive/instance_attributes +- bool skip_offline_nodes -- if True, ignore when some nodes are offline +- bool allow_incomplete_distribution -- if True, allow this command to ++ env -- provides all for communication with externals ++ node_name -- the name of the new node ++ node_addr -- the address of the new node or None for default ++ operations -- attributes for each entered operation ++ meta_attributes -- attributes for primitive/meta_attributes ++ instance_attributes -- attributes for primitive/instance_attributes ++ skip_offline_nodes -- if True, ignore when some nodes are offline ++ allow_incomplete_distribution -- if True, allow this command to + finish successfully even if file distribution did not succeed +- bool allow_pacemaker_remote_service_fail -- if True, allow this command to ++ allow_pacemaker_remote_service_fail -- if True, allow this command to + finish successfully even if starting/enabling pacemaker_remote did not + succeed +- bool allow_invalid_operation -- if True, allow to use operations that ++ allow_invalid_operation -- if True, allow to use operations that + are not listed in a resource agent metadata +- bool allow_invalid_instance_attributes -- if True, allow to use instance ++ allow_invalid_instance_attributes -- if True, allow to use instance + attributes that are not listed in a resource agent metadata and allow to + omit required instance_attributes +- bool use_default_operations -- if True, add operations specified in ++ use_default_operations -- if True, add operations specified in + a resource agent metadata to the resource +- mixed wait -- a flag for controlling waiting for pacemaker idle mechanism ++ wait -- a flag for controlling waiting for pacemaker idle mechanism + """ + env.ensure_wait_satisfiable(wait) + + report_processor = env.report_processor +- cib = env.get_cib() ++ cib = env.get_cib( ++ minimal_version=get_required_cib_version_for_primitive(operations) ++ ) + id_provider = IdProvider(cib) + if env.is_cib_live: +- corosync_conf = env.get_corosync_conf() ++ corosync_conf: Optional[CorosyncConfigFacade] = env.get_corosync_conf() + else: + corosync_conf = None + report_processor.report( +diff --git a/pcs/lib/commands/resource.py b/pcs/lib/commands/resource.py +index 75826c9d..db4b7bb3 100644 +--- a/pcs/lib/commands/resource.py ++++ b/pcs/lib/commands/resource.py +@@ -61,6 +61,9 @@ from pcs.lib.resource_agent import ( + from pcs.lib.validate import ValueTimeInterval + + ++WaitType = Union[None, bool, int] ++ ++ + @contextmanager + def resource_environment( + env, +@@ -262,44 +265,43 @@ def _get_required_cib_version_for_container( + + + def create( +- env, +- resource_id, +- resource_agent_name, +- operation_list, +- meta_attributes, +- instance_attributes, +- allow_absent_agent=False, +- allow_invalid_operation=False, +- allow_invalid_instance_attributes=False, +- use_default_operations=True, +- ensure_disabled=False, +- wait=False, +- allow_not_suitable_command=False, ++ env: LibraryEnvironment, ++ resource_id: str, ++ resource_agent_name: str, ++ operation_list: Iterable[Mapping[str, str]], ++ meta_attributes: Mapping[str, str], ++ instance_attributes: Mapping[str, str], ++ allow_absent_agent: bool = False, ++ allow_invalid_operation: bool = False, ++ allow_invalid_instance_attributes: bool = False, ++ use_default_operations: bool = True, ++ ensure_disabled: bool = False, ++ wait: WaitType = False, ++ allow_not_suitable_command: bool = False, + ): + # pylint: disable=too-many-arguments, too-many-locals + """ +- Create resource in a cib. ++ Create a primitive resource in a cib. + +- LibraryEnvironment env provides all for communication with externals +- string resource_id is identifier of resource +- string resource_agent_name contains name for the identification of agent +- list of dict operation_list contains attributes for each entered operation +- dict meta_attributes contains attributes for primitive/meta_attributes +- dict instance_attributes contains attributes for +- primitive/instance_attributes +- bool allow_absent_agent is a flag for allowing agent that is not installed ++ env -- provides all for communication with externals ++ resource_id -- is identifier of resource ++ resource_agent_name -- contains name for the identification of agent ++ operation_list -- contains attributes for each entered operation ++ meta_attributes -- contains attributes for primitive/meta_attributes ++ instance_attributes -- contains attributes for primitive/instance_attributes ++ allow_absent_agent -- is a flag for allowing agent that is not installed + in a system +- bool allow_invalid_operation is a flag for allowing to use operations that ++ allow_invalid_operation -- is a flag for allowing to use operations that + are not listed in a resource agent metadata +- bool allow_invalid_instance_attributes is a flag for allowing to use ++ allow_invalid_instance_attributes -- is a flag for allowing to use + instance attributes that are not listed in a resource agent metadata + or for allowing to not use the instance_attributes that are required in + resource agent metadata +- bool use_default_operations is a flag for stopping stopping of adding ++ use_default_operations -- is a flag for stopping stopping of adding + default cib operations (specified in a resource agent) +- bool ensure_disabled is flag that keeps resource in target-role "Stopped" +- mixed wait is flag for controlling waiting for pacemaker idle mechanism +- bool allow_not_suitable_command -- flag for FORCE_NOT_SUITABLE_COMMAND ++ ensure_disabled -- is flag that keeps resource in target-role "Stopped" ++ wait -- is flag for controlling waiting for pacemaker idle mechanism ++ allow_not_suitable_command -- flag for FORCE_NOT_SUITABLE_COMMAND + """ + resource_agent = get_agent( + env.report_processor, +@@ -315,6 +317,9 @@ def create( + ensure_disabled + or resource.common.are_meta_disabled(meta_attributes) + ), ++ required_cib_version=get_required_cib_version_for_primitive( ++ operation_list ++ ), + ) as resources_section: + id_provider = IdProvider(resources_section) + _check_special_cases( +@@ -345,46 +350,45 @@ def create( + + + def create_as_clone( +- env, +- resource_id, +- resource_agent_name, +- operation_list, +- meta_attributes, +- instance_attributes, +- clone_meta_options, +- allow_absent_agent=False, +- allow_invalid_operation=False, +- allow_invalid_instance_attributes=False, +- use_default_operations=True, +- ensure_disabled=False, +- wait=False, +- allow_not_suitable_command=False, ++ env: LibraryEnvironment, ++ resource_id: str, ++ resource_agent_name: str, ++ operation_list: Iterable[Mapping[str, str]], ++ meta_attributes: Mapping[str, str], ++ instance_attributes: Mapping[str, str], ++ clone_meta_options: Mapping[str, str], ++ allow_absent_agent: bool = False, ++ allow_invalid_operation: bool = False, ++ allow_invalid_instance_attributes: bool = False, ++ use_default_operations: bool = True, ++ ensure_disabled: bool = False, ++ wait: WaitType = False, ++ allow_not_suitable_command: bool = False, + ): + # pylint: disable=too-many-arguments, too-many-locals + """ +- Create resource in a clone ++ Create a primitive resource in a clone + +- LibraryEnvironment env provides all for communication with externals +- string resource_id is identifier of resource +- string resource_agent_name contains name for the identification of agent +- list of dict operation_list contains attributes for each entered operation +- dict meta_attributes contains attributes for primitive/meta_attributes +- dict instance_attributes contains attributes for +- primitive/instance_attributes +- dict clone_meta_options contains attributes for clone/meta_attributes +- bool allow_absent_agent is a flag for allowing agent that is not installed ++ env -- provides all for communication with externals ++ resource_id -- is identifier of resource ++ resource_agent_name -- contains name for the identification of agent ++ operation_list -- contains attributes for each entered operation ++ meta_attributes -- contains attributes for primitive/meta_attributes ++ instance_attributes -- contains attributes for primitive/instance_attributes ++ clone_meta_options -- contains attributes for clone/meta_attributes ++ allow_absent_agent -- is a flag for allowing agent that is not installed + in a system +- bool allow_invalid_operation is a flag for allowing to use operations that ++ allow_invalid_operation -- is a flag for allowing to use operations that + are not listed in a resource agent metadata +- bool allow_invalid_instance_attributes is a flag for allowing to use ++ allow_invalid_instance_attributes -- is a flag for allowing to use + instance attributes that are not listed in a resource agent metadata + or for allowing to not use the instance_attributes that are required in + resource agent metadata +- bool use_default_operations is a flag for stopping stopping of adding ++ use_default_operations -- is a flag for stopping stopping of adding + default cib operations (specified in a resource agent) +- bool ensure_disabled is flag that keeps resource in target-role "Stopped" +- mixed wait is flag for controlling waiting for pacemaker idle mechanism +- bool allow_not_suitable_command -- flag for FORCE_NOT_SUITABLE_COMMAND ++ ensure_disabled -- is flag that keeps resource in target-role "Stopped" ++ wait -- is flag for controlling waiting for pacemaker idle mechanism ++ allow_not_suitable_command -- flag for FORCE_NOT_SUITABLE_COMMAND + """ + resource_agent = get_agent( + env.report_processor, +@@ -401,6 +405,9 @@ def create_as_clone( + or resource.common.are_meta_disabled(meta_attributes) + or resource.common.is_clone_deactivated_by_meta(clone_meta_options) + ), ++ required_cib_version=get_required_cib_version_for_primitive( ++ operation_list ++ ), + ) as resources_section: + id_provider = IdProvider(resources_section) + _check_special_cases( +@@ -437,49 +444,50 @@ def create_as_clone( + + + def create_in_group( +- env, +- resource_id, +- resource_agent_name, +- group_id, +- operation_list, +- meta_attributes, +- instance_attributes, +- allow_absent_agent=False, +- allow_invalid_operation=False, +- allow_invalid_instance_attributes=False, +- use_default_operations=True, +- ensure_disabled=False, +- adjacent_resource_id=None, +- put_after_adjacent=False, +- wait=False, +- allow_not_suitable_command=False, ++ env: LibraryEnvironment, ++ resource_id: str, ++ resource_agent_name: str, ++ group_id: str, ++ operation_list: Iterable[Mapping[str, str]], ++ meta_attributes: Mapping[str, str], ++ instance_attributes: Mapping[str, str], ++ allow_absent_agent: bool = False, ++ allow_invalid_operation: bool = False, ++ allow_invalid_instance_attributes: bool = False, ++ use_default_operations: bool = True, ++ ensure_disabled: bool = False, ++ adjacent_resource_id: Optional[str] = None, ++ put_after_adjacent: bool = False, ++ wait: WaitType = False, ++ allow_not_suitable_command: bool = False, + ): + # pylint: disable=too-many-arguments, too-many-locals + """ + Create resource in a cib and put it into defined group + +- LibraryEnvironment env provides all for communication with externals +- string resource_id is identifier of resource +- string resource_agent_name contains name for the identification of agent +- string group_id is identificator for group to put primitive resource inside +- list of dict operation_list contains attributes for each entered operation +- dict meta_attributes contains attributes for primitive/meta_attributes +- bool allow_absent_agent is a flag for allowing agent that is not installed ++ env -- provides all for communication with externals ++ resource_id -- is identifier of resource ++ resource_agent_name -- contains name for the identification of agent ++ group_id -- is identificator for group to put primitive resource inside ++ operation_list -- contains attributes for each entered operation ++ meta_attributes -- contains attributes for primitive/meta_attributes ++ instance_attributes -- contains attributes for primitive/instance_attributes ++ allow_absent_agent -- is a flag for allowing agent that is not installed + in a system +- bool allow_invalid_operation is a flag for allowing to use operations that ++ allow_invalid_operation -- is a flag for allowing to use operations that + are not listed in a resource agent metadata +- bool allow_invalid_instance_attributes is a flag for allowing to use ++ allow_invalid_instance_attributes -- is a flag for allowing to use + instance attributes that are not listed in a resource agent metadata + or for allowing to not use the instance_attributes that are required in + resource agent metadata +- bool use_default_operations is a flag for stopping stopping of adding ++ use_default_operations -- is a flag for stopping stopping of adding + default cib operations (specified in a resource agent) +- bool ensure_disabled is flag that keeps resource in target-role "Stopped" +- string adjacent_resource_id identify neighbor of a newly created resource +- bool put_after_adjacent is flag to put a newly create resource befor/after ++ ensure_disabled -- is flag that keeps resource in target-role "Stopped" ++ adjacent_resource_id -- identify neighbor of a newly created resource ++ put_after_adjacent -- is flag to put a newly create resource befor/after + adjacent resource +- mixed wait is flag for controlling waiting for pacemaker idle mechanism +- bool allow_not_suitable_command -- flag for FORCE_NOT_SUITABLE_COMMAND ++ wait -- is flag for controlling waiting for pacemaker idle mechanism ++ allow_not_suitable_command -- flag for FORCE_NOT_SUITABLE_COMMAND + """ + resource_agent = get_agent( + env.report_processor, +@@ -495,6 +503,9 @@ def create_in_group( + ensure_disabled + or resource.common.are_meta_disabled(meta_attributes) + ), ++ required_cib_version=get_required_cib_version_for_primitive( ++ operation_list ++ ), + ) as resources_section: + id_provider = IdProvider(resources_section) + _check_special_cases( +@@ -532,48 +543,48 @@ def create_in_group( + + + def create_into_bundle( +- env, +- resource_id, +- resource_agent_name, +- operation_list, +- meta_attributes, +- instance_attributes, +- bundle_id, +- allow_absent_agent=False, +- allow_invalid_operation=False, +- allow_invalid_instance_attributes=False, +- use_default_operations=True, +- ensure_disabled=False, +- wait=False, +- allow_not_suitable_command=False, +- allow_not_accessible_resource=False, ++ env: LibraryEnvironment, ++ resource_id: str, ++ resource_agent_name: str, ++ operation_list: Iterable[Mapping[str, str]], ++ meta_attributes: Mapping[str, str], ++ instance_attributes: Mapping[str, str], ++ bundle_id: str, ++ allow_absent_agent: bool = False, ++ allow_invalid_operation: bool = False, ++ allow_invalid_instance_attributes: bool = False, ++ use_default_operations: bool = True, ++ ensure_disabled: bool = False, ++ wait: WaitType = False, ++ allow_not_suitable_command: bool = False, ++ allow_not_accessible_resource: bool = False, + ): + # pylint: disable=too-many-arguments, too-many-locals + """ + Create a new resource in a cib and put it into an existing bundle + +- LibraryEnvironment env provides all for communication with externals +- string resource_id is identifier of resource +- string resource_agent_name contains name for the identification of agent +- list of dict operation_list contains attributes for each entered operation +- dict meta_attributes contains attributes for primitive/meta_attributes +- dict instance_attributes contains attributes for ++ env -- provides all for communication with externals ++ resource_id -- is identifier of resource ++ resource_agent_name -- contains name for the identification of agent ++ operation_list -- contains attributes for each entered operation ++ meta_attributes -- contains attributes for primitive/meta_attributes ++ instance_attributes -- contains attributes for + primitive/instance_attributes +- string bundle_id is id of an existing bundle to put the created resource in +- bool allow_absent_agent is a flag for allowing agent that is not installed ++ bundle_id -- is id of an existing bundle to put the created resource in ++ allow_absent_agent -- is a flag for allowing agent that is not installed + in a system +- bool allow_invalid_operation is a flag for allowing to use operations that ++ allow_invalid_operation -- is a flag for allowing to use operations that + are not listed in a resource agent metadata +- bool allow_invalid_instance_attributes is a flag for allowing to use ++ allow_invalid_instance_attributes -- is a flag for allowing to use + instance attributes that are not listed in a resource agent metadata + or for allowing to not use the instance_attributes that are required in + resource agent metadata +- bool use_default_operations is a flag for stopping stopping of adding ++ use_default_operations -- is a flag for stopping stopping of adding + default cib operations (specified in a resource agent) +- bool ensure_disabled is flag that keeps resource in target-role "Stopped" +- mixed wait is flag for controlling waiting for pacemaker idle mechanism +- bool allow_not_suitable_command -- flag for FORCE_NOT_SUITABLE_COMMAND +- bool allow_not_accessible_resource -- flag for ++ ensure_disabled -- is flag that keeps resource in target-role "Stopped" ++ wait -- is flag for controlling waiting for pacemaker idle mechanism ++ allow_not_suitable_command -- flag for FORCE_NOT_SUITABLE_COMMAND ++ allow_not_accessible_resource -- flag for + FORCE_RESOURCE_IN_BUNDLE_NOT_ACCESSIBLE + """ + resource_agent = get_agent( +@@ -582,6 +593,11 @@ def create_into_bundle( + resource_agent_name, + allow_absent_agent, + ) ++ required_cib_version = get_required_cib_version_for_primitive( ++ operation_list ++ ) ++ if not required_cib_version: ++ required_cib_version = Version(2, 8, 0) + with resource_environment( + env, + wait, +@@ -590,7 +606,7 @@ def create_into_bundle( + ensure_disabled + or resource.common.are_meta_disabled(meta_attributes) + ), +- required_cib_version=Version(2, 8, 0), ++ required_cib_version=required_cib_version, + ) as resources_section: + id_provider = IdProvider(resources_section) + _check_special_cases( +@@ -1070,9 +1086,7 @@ def disable_simulate( + + + def enable( +- env: LibraryEnvironment, +- resource_or_tag_ids: Iterable[str], +- wait: Optional[Union[bool, int]], ++ env: LibraryEnvironment, resource_or_tag_ids: Iterable[str], wait: WaitType, + ): + """ + Allow specified resources to be started by the cluster +@@ -1689,3 +1703,12 @@ def _find_resources_expand_tags_or_raise( + return resource.common.expand_tags_to_resources( + resources_section, resource_or_tag_el_list, + ) ++ ++ ++def get_required_cib_version_for_primitive( ++ op_list: Iterable[Mapping[str, str]] ++) -> Optional[Version]: ++ for op in op_list: ++ if op.get("on-fail", "") == "demote": ++ return Version(3, 4, 0) ++ return None +diff --git a/pcs/resource.py b/pcs/resource.py +index e835fc99..9a3bd0ee 100644 +--- a/pcs/resource.py ++++ b/pcs/resource.py +@@ -355,6 +355,21 @@ def resource_op_add_cmd(lib, argv, modifiers): + if not argv: + raise CmdLineInputError() + res_id = argv.pop(0) ++ ++ # Check if we need to upgrade cib schema. ++ # To do that, argv must be parsed, which is duplication of parsing in ++ # resource_operation_add. But we need to upgrade the cib first before ++ # calling that function. Hopefully, this will be fixed in the new pcs ++ # architecture. ++ ++ # argv[0] is an operation name ++ op_properties = utils.convert_args_to_tuples(argv[1:]) ++ for key, value in op_properties: ++ if key == "on-fail" and value == "demote": ++ utils.checkAndUpgradeCIB(3, 4, 0) ++ break ++ ++ # add the requested operation + utils.replace_cib_configuration( + resource_operation_add(utils.get_cib_dom(), res_id, argv) + ) +@@ -895,8 +910,6 @@ def resource_update(lib, args, modifiers, deal_with_guest_change=True): + if len(args) < 2: + raise CmdLineInputError() + res_id = args.pop(0) +- cib_xml = utils.get_cib() +- dom = utils.get_cib_dom(cib_xml=cib_xml) + + # Extract operation arguments + ra_values, op_values, meta_values = parse_resource_options(args) +@@ -907,6 +920,28 @@ def resource_update(lib, args, modifiers, deal_with_guest_change=True): + wait_timeout = utils.validate_wait_get_timeout() + wait = True + ++ # Check if we need to upgrade cib schema. ++ # To do that, argv must be parsed, which is duplication of parsing below. ++ # But we need to upgrade the cib first before calling that function. ++ # Hopefully, this will be fixed in the new pcs architecture. ++ ++ cib_upgraded = False ++ for op_argv in op_values: ++ if cib_upgraded: ++ break ++ if len(op_argv) < 2: ++ continue ++ # argv[0] is an operation name ++ op_vars = utils.convert_args_to_tuples(op_argv[1:]) ++ for k, v in op_vars: ++ if k == "on-fail" and v == "demote": ++ utils.checkAndUpgradeCIB(3, 4, 0) ++ cib_upgraded = True ++ break ++ ++ cib_xml = utils.get_cib() ++ dom = utils.get_cib_dom(cib_xml=cib_xml) ++ + resource = utils.dom_get_resource(dom, res_id) + if not resource: + clone = utils.dom_get_clone(dom, res_id) +@@ -994,21 +1029,21 @@ def resource_update(lib, args, modifiers, deal_with_guest_change=True): + else: + operations = operations[0] + +- for element in op_values: +- if not element: ++ for op_argv in op_values: ++ if not op_argv: + continue + +- op_name = element[0] ++ op_name = op_argv[0] + if op_name.find("=") != -1: + utils.err( + "%s does not appear to be a valid operation action" % op_name + ) + +- if len(element) < 2: ++ if len(op_argv) < 2: + continue + + op_role = "" +- op_vars = utils.convert_args_to_tuples(element[1:]) ++ op_vars = utils.convert_args_to_tuples(op_argv[1:]) + + for k, v in op_vars: + if k == "role": +@@ -1032,7 +1067,7 @@ def resource_update(lib, args, modifiers, deal_with_guest_change=True): + dom = resource_operation_add( + dom, + res_id, +- element, ++ op_argv, + validate_strict=False, + before_op=updating_op_before, + ) +diff --git a/pcs_test/resources/cib-empty-3.3.xml b/pcs_test/resources/cib-empty-3.3.xml +index 3a44fe08..4de94b6e 100644 +--- a/pcs_test/resources/cib-empty-3.3.xml ++++ b/pcs_test/resources/cib-empty-3.3.xml +@@ -1,4 +1,4 @@ +- ++ + + + +diff --git a/pcs_test/resources/cib-empty-3.4.xml b/pcs_test/resources/cib-empty-3.4.xml +index dcd4ff44..e677462d 100644 +--- a/pcs_test/resources/cib-empty-3.4.xml ++++ b/pcs_test/resources/cib-empty-3.4.xml +@@ -1,4 +1,4 @@ +- ++ + + + +diff --git a/pcs_test/tier0/lib/cib/test_resource_operations.py b/pcs_test/tier0/lib/cib/test_resource_operations.py +index 5e556cf4..e5be7a54 100644 +--- a/pcs_test/tier0/lib/cib/test_resource_operations.py ++++ b/pcs_test/tier0/lib/cib/test_resource_operations.py +@@ -316,13 +316,14 @@ class ValidateOperation(TestCase): + option_value="b", + option_name="on-fail", + allowed_values=[ +- "ignore", + "block", +- "stop", +- "restart", +- "standby", ++ "demote", + "fence", ++ "ignore", ++ "restart", + "restart-container", ++ "standby", ++ "stop", + ], + cannot_be_empty=False, + forbidden_characters=None, +diff --git a/pcs_test/tier0/lib/commands/remote_node/test_node_add_remote.py b/pcs_test/tier0/lib/commands/remote_node/test_node_add_remote.py +index 725ec68a..4da3fa0a 100644 +--- a/pcs_test/tier0/lib/commands/remote_node/test_node_add_remote.py ++++ b/pcs_test/tier0/lib/commands/remote_node/test_node_add_remote.py +@@ -118,7 +118,7 @@ FIXTURE_RESOURCES_TEMPLATE = """ + interval="0s" name="migrate_to" timeout="60" + /> + + + + """ +-FIXTURE_RESOURCES = FIXTURE_RESOURCES_TEMPLATE.format(server="remote-host") ++FIXTURE_RESOURCES = FIXTURE_RESOURCES_TEMPLATE.format( ++ server="remote-host", onfail="" ++) + + + class AddRemote(TestCase): +@@ -178,12 +180,48 @@ class AddRemote(TestCase): + .local.push_existing_authkey_to_remote(NODE_NAME, NODE_DEST_LIST) + .local.run_pacemaker_remote(NODE_NAME, NODE_DEST_LIST) + .env.push_cib( +- resources=FIXTURE_RESOURCES_TEMPLATE.format(server=NODE_NAME) ++ resources=FIXTURE_RESOURCES_TEMPLATE.format( ++ server=NODE_NAME, onfail="" ++ ) + ) + ) + node_add_remote(self.env_assist.get_env(), node_addr=NODE_NAME) + self.env_assist.assert_reports(REPORTS) + ++ def test_cib_upgrade_on_onfail_demote(self): ++ self._config_success_base() ++ self.config.runner.cib.load( ++ filename="cib-empty-3.4.xml", instead="runner.cib.load", ++ ) ++ self.config.runner.cib.upgrade(before="runner.cib.load") ++ self.config.runner.cib.load( ++ filename="cib-empty-3.3.xml", ++ name="load_cib_old_version", ++ before="runner.cib.upgrade", ++ ) ++ self.config.env.push_cib( ++ resources=FIXTURE_RESOURCES_TEMPLATE.format( ++ server="remote-host", onfail='on-fail="demote"' ++ ), ++ instead="env.push_cib", ++ ) ++ node_add_remote( ++ self.env_assist.get_env(), ++ operations=[ ++ { ++ "name": "monitor", ++ "timeout": "30", ++ "interval": "60s", ++ "on-fail": "demote", ++ } ++ ], ++ ) ++ self.env_assist.assert_reports( ++ REPORTS.info( ++ "cib_upgrade_successful", reports.codes.CIB_UPGRADE_SUCCESSFUL ++ ) ++ ) ++ + def test_node_name_conflict_report_is_unique(self): + ( + self.config.runner.cib.load( +@@ -623,7 +661,7 @@ class NotLive(TestCase): + .runner.pcmk.load_agent(agent_name="ocf:pacemaker:remote") + .env.push_cib( + resources=FIXTURE_RESOURCES_TEMPLATE.format( +- server=NODE_ADDR_PCSD ++ server=NODE_ADDR_PCSD, onfail="" + ) + ) + ) +@@ -648,7 +686,9 @@ class NotLive(TestCase): + self.config.runner.cib.load() + .runner.pcmk.load_agent(agent_name="ocf:pacemaker:remote") + .env.push_cib( +- resources=FIXTURE_RESOURCES_TEMPLATE.format(server=NODE_NAME) ++ resources=FIXTURE_RESOURCES_TEMPLATE.format( ++ server=NODE_NAME, onfail="" ++ ) + ) + ) + node_add_remote(self.env_assist.get_env(), no_node_addr=True) +@@ -672,7 +712,9 @@ class NotLive(TestCase): + self.config.runner.cib.load() + .runner.pcmk.load_agent(agent_name="ocf:pacemaker:remote") + .env.push_cib( +- resources=FIXTURE_RESOURCES_TEMPLATE.format(server="addr") ++ resources=FIXTURE_RESOURCES_TEMPLATE.format( ++ server="addr", onfail="" ++ ) + ) + ) + node_add_remote(self.env_assist.get_env(), node_addr="addr") +diff --git a/pcs_test/tier0/lib/commands/resource/test_resource_create.py b/pcs_test/tier0/lib/commands/resource/test_resource_create.py +index dc70ce22..a040a9d9 100644 +--- a/pcs_test/tier0/lib/commands/resource/test_resource_create.py ++++ b/pcs_test/tier0/lib/commands/resource/test_resource_create.py +@@ -35,13 +35,19 @@ def create( + ) + + +-def create_group(env, wait=TIMEOUT, disabled=False, meta_attributes=None): ++def create_group( ++ env, ++ wait=TIMEOUT, ++ disabled=False, ++ meta_attributes=None, ++ operation_list=None, ++): + return resource.create_in_group( + env, + "A", + "ocf:heartbeat:Dummy", + "G", +- operation_list=[], ++ operation_list=operation_list if operation_list else [], + meta_attributes=meta_attributes if meta_attributes else {}, + instance_attributes={}, + wait=wait, +@@ -50,13 +56,18 @@ def create_group(env, wait=TIMEOUT, disabled=False, meta_attributes=None): + + + def create_clone( +- env, wait=TIMEOUT, disabled=False, meta_attributes=None, clone_options=None ++ env, ++ wait=TIMEOUT, ++ disabled=False, ++ meta_attributes=None, ++ clone_options=None, ++ operation_list=None, + ): + return resource.create_as_clone( + env, + "A", + "ocf:heartbeat:Dummy", +- operation_list=[], ++ operation_list=operation_list if operation_list else [], + meta_attributes=meta_attributes if meta_attributes else {}, + instance_attributes={}, + clone_meta_options=clone_options if clone_options else {}, +@@ -71,12 +82,13 @@ def create_bundle( + disabled=False, + meta_attributes=None, + allow_not_accessible_resource=False, ++ operation_list=None, + ): + return resource.create_into_bundle( + env, + "A", + "ocf:heartbeat:Dummy", +- operation_list=[], ++ operation_list=operation_list if operation_list else [], + meta_attributes=meta_attributes if meta_attributes else {}, + instance_attributes={}, + bundle_id="B", +@@ -576,6 +588,60 @@ class Create(TestCase): + ] + ) + ++ def test_cib_upgrade_on_onfail_demote(self): ++ self.config.runner.cib.load( ++ filename="cib-empty-3.3.xml", ++ instead="runner.cib.load", ++ name="load_cib_old_version", ++ ) ++ self.config.runner.cib.upgrade() ++ self.config.runner.cib.load(filename="cib-empty-3.4.xml") ++ self.config.env.push_cib( ++ resources=""" ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ """ ++ ) ++ ++ create( ++ self.env_assist.get_env(), ++ operation_list=[ ++ { ++ "name": "monitor", ++ "timeout": "10", ++ "interval": "10", ++ "on-fail": "demote", ++ } ++ ], ++ ) ++ self.env_assist.assert_reports( ++ [fixture.info(report_codes.CIB_UPGRADE_SUCCESSFUL)] ++ ) ++ + + class CreateWait(TestCase): + def setUp(self): +@@ -746,6 +812,66 @@ class CreateInGroup(TestCase): + + create_group(self.env_assist.get_env(), wait=False) + ++ def test_cib_upgrade_on_onfail_demote(self): ++ self.config.remove(name="runner.pcmk.can_wait") ++ self.config.runner.cib.load( ++ filename="cib-empty-3.3.xml", ++ instead="runner.cib.load", ++ name="load_cib_old_version", ++ ) ++ self.config.runner.cib.upgrade() ++ self.config.runner.cib.load(filename="cib-empty-3.4.xml") ++ self.config.env.push_cib( ++ resources=""" ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ """ ++ ) ++ ++ create_group( ++ self.env_assist.get_env(), ++ operation_list=[ ++ { ++ "name": "monitor", ++ "timeout": "10", ++ "interval": "10", ++ "on-fail": "demote", ++ } ++ ], ++ wait=False, ++ ) ++ self.env_assist.assert_reports( ++ [fixture.info(report_codes.CIB_UPGRADE_SUCCESSFUL)] ++ ) ++ + def test_fail_wait(self): + self.config.env.push_cib( + resources=fixture_cib_resources_xml_group_simplest, +@@ -859,6 +985,62 @@ class CreateAsClone(TestCase): + ) + create_clone(self.env_assist.get_env(), wait=False) + ++ def test_cib_upgrade_on_onfail_demote(self): ++ self.config.remove(name="runner.pcmk.can_wait") ++ self.config.runner.cib.load( ++ filename="cib-empty-3.3.xml", ++ instead="runner.cib.load", ++ name="load_cib_old_version", ++ ) ++ self.config.runner.cib.upgrade() ++ self.config.runner.cib.load(filename="cib-empty-3.4.xml") ++ self.config.env.push_cib( ++ resources=""" ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ """ ++ ) ++ ++ create_clone( ++ self.env_assist.get_env(), ++ operation_list=[ ++ { ++ "name": "monitor", ++ "timeout": "10", ++ "interval": "10", ++ "on-fail": "demote", ++ } ++ ], ++ wait=False, ++ ) ++ self.env_assist.assert_reports( ++ [fixture.info(report_codes.CIB_UPGRADE_SUCCESSFUL)] ++ ) ++ + def test_fail_wait(self): + self.config.env.push_cib( + resources=fixture_cib_resources_xml_clone_simplest, +@@ -1168,7 +1350,7 @@ class CreateInToBundle(TestCase): + name="migrate_to" timeout="20" + /> + + +- """ ++ """, ++ onfail="" + ) + ) + # fmt: on +@@ -1290,6 +1473,42 @@ class CreateInToBundle(TestCase): + [fixture.info(report_codes.CIB_UPGRADE_SUCCESSFUL)] + ) + ++ def test_cib_upgrade_on_onfail_demote(self): ++ self.config.runner.pcmk.load_agent() ++ self.config.runner.cib.load( ++ filename="cib-empty-3.3.xml", name="load_cib_old_version", ++ ) ++ self.config.runner.cib.upgrade() ++ self.config.runner.cib.load( ++ filename="cib-empty-3.4.xml", resources=self.fixture_resources_pre ++ ) ++ self.config.env.push_cib( ++ resources=self.fixture_resource_post_simple_without_network.format( ++ network=""" ++ ++ """, ++ onfail='on-fail="demote"', ++ ) ++ ) ++ ++ create_bundle( ++ self.env_assist.get_env(), ++ operation_list=[ ++ { ++ "name": "monitor", ++ "timeout": "20", ++ "interval": "10", ++ "on-fail": "demote", ++ } ++ ], ++ wait=False, ++ ) ++ self.env_assist.assert_reports( ++ [fixture.info(report_codes.CIB_UPGRADE_SUCCESSFUL)] ++ ) ++ + def test_simplest_resource(self): + ( + self.config.runner.pcmk.load_agent() +@@ -1504,7 +1723,7 @@ class CreateInToBundle(TestCase): + .env.push_cib( + resources=( + self.fixture_resource_post_simple_without_network.format( +- network="" ++ network="", onfail="" + ) + ) + ) +@@ -1540,7 +1759,7 @@ class CreateInToBundle(TestCase): + .env.push_cib( + resources=( + self.fixture_resource_post_simple_without_network.format( +- network=network ++ network=network, onfail="" + ) + ) + ) +diff --git a/pcs_test/tier1/cib_resource/test_create.py b/pcs_test/tier1/cib_resource/test_create.py +index 977d627a..5ff83b71 100644 +--- a/pcs_test/tier1/cib_resource/test_create.py ++++ b/pcs_test/tier1/cib_resource/test_create.py +@@ -1143,7 +1143,7 @@ class FailOrWarnOp(ResourceTest): + " monitor on-fail=Abc", + ( + "Error: 'Abc' is not a valid on-fail value, use 'block', " +- "'fence', 'ignore', 'restart', 'restart-container', " ++ "'demote', 'fence', 'ignore', 'restart', 'restart-container', " + "'standby', 'stop'\n" + ERRORS_HAVE_OCURRED + ), + ) +diff --git a/pcs_test/tier1/legacy/test_resource.py b/pcs_test/tier1/legacy/test_resource.py +index 7ffcc83b..107ff406 100644 +--- a/pcs_test/tier1/legacy/test_resource.py ++++ b/pcs_test/tier1/legacy/test_resource.py +@@ -22,6 +22,7 @@ from pcs_test.tools.misc import ( + is_minimum_pacemaker_version, + outdent, + skip_unless_pacemaker_supports_bundle, ++ skip_unless_pacemaker_supports_op_onfail_demote, + skip_unless_crm_rule, + write_data_to_tmpfile, + write_file_to_tmpfile, +@@ -723,6 +724,28 @@ monitor interval=60s OCF_CHECK_LEVEL=1 (OPTest7-monitor-interval-60s) + ), + ) + ++ @skip_unless_pacemaker_supports_op_onfail_demote() ++ def test_add_operation_onfail_demote_upgrade_cib(self): ++ write_file_to_tmpfile(rc("cib-empty-3.3.xml"), self.temp_cib) ++ self.assert_pcs_success( ++ "resource create --no-default-ops R ocf:pacemaker:Dummy" ++ ) ++ self.assert_pcs_success( ++ "resource op add R start on-fail=demote", ++ stdout_full="Cluster CIB has been upgraded to latest version\n", ++ ) ++ ++ @skip_unless_pacemaker_supports_op_onfail_demote() ++ def test_update_add_operation_onfail_demote_upgrade_cib(self): ++ write_file_to_tmpfile(rc("cib-empty-3.3.xml"), self.temp_cib) ++ self.assert_pcs_success( ++ "resource create --no-default-ops R ocf:pacemaker:Dummy" ++ ) ++ self.assert_pcs_success( ++ "resource update R op start on-fail=demote", ++ stdout_full="Cluster CIB has been upgraded to latest version\n", ++ ) ++ + def _test_delete_remove_operation(self, command): + assert command in {"delete", "remove"} + +diff --git a/pcs_test/tools/misc.py b/pcs_test/tools/misc.py +index 33d78002..820f1e79 100644 +--- a/pcs_test/tools/misc.py ++++ b/pcs_test/tools/misc.py +@@ -253,6 +253,12 @@ def skip_unless_pacemaker_supports_rsc_and_op_rules(): + ) + + ++def skip_unless_pacemaker_supports_op_onfail_demote(): ++ return skip_unless_cib_schema_version( ++ (3, 4, 0), "resource operations with 'on-fail' option set to 'demote'" ++ ) ++ ++ + def skip_if_service_enabled(service_name): + return skipUnless( + not is_service_enabled(runner, service_name), +diff --git a/pcsd/capabilities.xml b/pcsd/capabilities.xml +index 6e1886cb..09983354 100644 +--- a/pcsd/capabilities.xml ++++ b/pcsd/capabilities.xml +@@ -465,6 +465,13 @@ + pcs commands: cluster node ( add-remote | delete-remote | remove-remote ) + + ++ ++ ++ Support for "demote" value of resource operation's "on-fail" option ++ ++ pcs commands: cluster node add-remote ++ ++ + + + +@@ -1056,6 +1063,13 @@ + pcs commands: resource create ... op + + ++ ++ ++ Support for "demote" value of resource operation's "on-fail" option ++ ++ pcs commands: resource create ... op ++ ++ + + + Wait for the created resource to start. +@@ -1105,6 +1119,13 @@ + pcs commands: resource update ... op + + ++ ++ ++ Support for "demote" value of resource operation's "on-fail" option ++ ++ pcs commands: resource update ... op ++ ++ + + + Wait for the changes to take effect. +@@ -1143,6 +1164,13 @@ + pcs commands: resource op ( add | delete | remove ) + + ++ ++ ++ Support for "demote" value of resource operation's "on-fail" option ++ ++ pcs commands: resource op add ++ ++ + + + +@@ -1555,6 +1583,20 @@ + pcs commands: stonith create ... op + + ++ ++ ++ Set resource operations when creating a stonith resource. ++ ++ pcs commands: stonith create ... op ++ ++ ++ ++ ++ Support for "demote" value of resource operation's "on-fail" option ++ ++ pcs commands: stonith create ... op ++ ++ + + + Wait for the created resource to start. +-- +2.25.4 + diff --git a/SOURCES/bz1857295-01-Fix-tag-removal-in-resource-unclone-ungroup-commands.patch b/SOURCES/bz1857295-01-Fix-tag-removal-in-resource-unclone-ungroup-commands.patch new file mode 100644 index 0000000..9f46bb1 --- /dev/null +++ b/SOURCES/bz1857295-01-Fix-tag-removal-in-resource-unclone-ungroup-commands.patch @@ -0,0 +1,402 @@ +From 4a986e8ee0610b1c85a04e38042e4073d41207a4 Mon Sep 17 00:00:00 2001 +From: Miroslav Lisik +Date: Mon, 13 Jul 2020 12:59:09 +0200 +Subject: [PATCH 2/3] Fix tag removal in resource 'unclone/ungroup' commands + and extend test coverage + +--- + pcs/resource.py | 2 +- + .../tier1/cib_resource/test_clone_unclone.py | 73 +++++++-- + .../tier1/cib_resource/test_group_ungroup.py | 143 +++++++++++++++--- + pcs_test/tools/cib.py | 10 +- + 4 files changed, 187 insertions(+), 41 deletions(-) + +diff --git a/pcs/resource.py b/pcs/resource.py +index 9a3bd0ee..49d28ef0 100644 +--- a/pcs/resource.py ++++ b/pcs/resource.py +@@ -2027,7 +2027,7 @@ def remove_resource_references( + if obj_ref.getAttribute("id") == resource_id: + tag = obj_ref.parentNode + tag.removeChild(obj_ref) +- if tag.getElementsByTagName(obj_ref).length == 0: ++ if tag.getElementsByTagName("obj_ref").length == 0: + remove_resource_references( + dom, tag.getAttribute("id"), output=output, + ) +diff --git a/pcs_test/tier1/cib_resource/test_clone_unclone.py b/pcs_test/tier1/cib_resource/test_clone_unclone.py +index c9c6a29e..2633801a 100644 +--- a/pcs_test/tier1/cib_resource/test_clone_unclone.py ++++ b/pcs_test/tier1/cib_resource/test_clone_unclone.py +@@ -55,6 +55,38 @@ FIXTURE_RESOURCES = """ + ) + + ++FIXTURE_CONSTRAINTS_CONFIG_XML = """ ++ ++ ++ ++ ++""" ++ ++ ++FIXTURE_TAGS_CONFIG_XML = """ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++""" ++ ++ ++FIXTURE_TAGS_RESULT_XML = """ ++ ++ ++ ++ ++ ++""" ++ ++ + class Unclone( + TestCase, + get_assert_pcs_effect_mixin( +@@ -66,6 +98,22 @@ class Unclone( + ): + empty_cib = rc("cib-empty.xml") + ++ def assert_tags_xml(self, expected_xml): ++ self.assert_resources_xml_in_cib( ++ expected_xml, ++ get_cib_part_func=lambda cib: etree.tostring( ++ etree.parse(cib).findall(".//tags")[0], ++ ), ++ ) ++ ++ def assert_constraint_xml(self, expected_xml): ++ self.assert_resources_xml_in_cib( ++ expected_xml, ++ get_cib_part_func=lambda cib: etree.tostring( ++ etree.parse(cib).findall(".//constraints")[0], ++ ), ++ ) ++ + def setUp(self): + # pylint: disable=invalid-name + self.temp_cib = get_tmp_file("tier1_cib_resource_group_ungroup") +@@ -75,18 +123,7 @@ class Unclone( + "resources", FIXTURE_CLONE, FIXTURE_DUMMY, + ) + xml_manip.append_to_first_tag_name( +- "configuration", +- """ +- +- +- +- +- +- +- +- +- +- """, ++ "configuration", FIXTURE_TAGS_CONFIG_XML, + ) + xml_manip.append_to_first_tag_name( + "constraints", +@@ -95,8 +132,8 @@ class Unclone( + rsc="C-clone" score="INFINITY"/> + """, + """ +- ++ + """, + ) + write_data_to_tmpfile(str(xml_manip), self.temp_cib) +@@ -111,6 +148,8 @@ class Unclone( + "Error: could not find resource: NonExistentClone\n", + ) + self.assert_resources_xml_in_cib(FIXTURE_CLONE_AND_RESOURCE) ++ self.assert_tags_xml(FIXTURE_TAGS_CONFIG_XML) ++ self.assert_constraint_xml(FIXTURE_CONSTRAINTS_CONFIG_XML) + + def test_not_clone_resource(self): + self.assert_pcs_fail( +@@ -118,9 +157,15 @@ class Unclone( + "Error: 'Dummy' is not a clone resource\n", + ) + self.assert_resources_xml_in_cib(FIXTURE_CLONE_AND_RESOURCE) ++ self.assert_tags_xml(FIXTURE_TAGS_CONFIG_XML) ++ self.assert_constraint_xml(FIXTURE_CONSTRAINTS_CONFIG_XML) + + def test_unclone_clone_id(self): + self.assert_effect("resource unclone C-clone", FIXTURE_RESOURCES) ++ self.assert_tags_xml(FIXTURE_TAGS_RESULT_XML) ++ self.assert_constraint_xml("") + + def test_unclone_resoruce_id(self): + self.assert_effect("resource unclone C", FIXTURE_RESOURCES) ++ self.assert_tags_xml(FIXTURE_TAGS_RESULT_XML) ++ self.assert_constraint_xml("") +diff --git a/pcs_test/tier1/cib_resource/test_group_ungroup.py b/pcs_test/tier1/cib_resource/test_group_ungroup.py +index f86e9890..88cc315d 100644 +--- a/pcs_test/tier1/cib_resource/test_group_ungroup.py ++++ b/pcs_test/tier1/cib_resource/test_group_ungroup.py +@@ -64,14 +64,63 @@ FIXTURE_AGROUP_XML = fixture_group_xml( + ) + + +-class TestGroupMixin( +- get_assert_pcs_effect_mixin( +- lambda cib: etree.tostring( +- # pylint:disable=undefined-variable +- etree.parse(cib).findall(".//resources")[0] +- ) +- ), +-): ++FIXTURE_CONSTRAINTS_CONFIG_XML = """ ++ ++ ++ ++ ++""" ++ ++FIXTURE_CLONE_TAG_CONSTRAINTS = """ ++ ++ ++ ++ ++""" ++ ++ ++FIXTURE_CLONE_CONSTRAINT = """ ++ ++ ++ ++""" ++ ++ ++FIXTURE_TAGS_CONFIG_XML = """ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++""" ++ ++ ++FIXTURE_TAGS_RESULT_XML = """ ++ ++ ++ ++ ++ ++ ++ ++""" ++ ++ ++class TestGroupMixin: + empty_cib = rc("cib-empty.xml") + + def setUp(self): +@@ -81,17 +130,7 @@ class TestGroupMixin( + xml_manip = XmlManipulation.from_file(self.empty_cib) + xml_manip.append_to_first_tag_name("resources", FIXTURE_AGROUP_XML) + xml_manip.append_to_first_tag_name( +- "configuration", +- """ +- +- +- +- +- +- +- +- +- """, ++ "configuration", FIXTURE_TAGS_CONFIG_XML, + ) + xml_manip.append_to_first_tag_name( + "constraints", +@@ -100,8 +139,8 @@ class TestGroupMixin( + rsc="AGroup" score="INFINITY"/> + """, + """ +- ++ + """, + ) + write_data_to_tmpfile(str(xml_manip), self.temp_cib) +@@ -111,9 +150,33 @@ class TestGroupMixin( + self.temp_cib.close() + + +-class GroupDeleteRemoveUngroupBase(TestGroupMixin): ++class GroupDeleteRemoveUngroupBase( ++ get_assert_pcs_effect_mixin( ++ lambda cib: etree.tostring( ++ # pylint:disable=undefined-variable ++ etree.parse(cib).findall(".//resources")[0] ++ ) ++ ), ++ TestGroupMixin, ++): + command = None + ++ def assert_tags_xml(self, expected_xml): ++ self.assert_resources_xml_in_cib( ++ expected_xml, ++ get_cib_part_func=lambda cib: etree.tostring( ++ etree.parse(cib).findall(".//tags")[0], ++ ), ++ ) ++ ++ def assert_constraint_xml(self, expected_xml): ++ self.assert_resources_xml_in_cib( ++ expected_xml, ++ get_cib_part_func=lambda cib: etree.tostring( ++ etree.parse(cib).findall(".//constraints")[0], ++ ), ++ ) ++ + def test_nonexistent_group(self): + self.assert_pcs_fail( + f"resource {self.command} NonExistentGroup", +@@ -122,6 +185,8 @@ class GroupDeleteRemoveUngroupBase(TestGroupMixin): + self.assert_resources_xml_in_cib( + fixture_resources_xml([FIXTURE_AGROUP_XML]), + ) ++ self.assert_tags_xml(FIXTURE_TAGS_CONFIG_XML) ++ self.assert_constraint_xml(FIXTURE_CONSTRAINTS_CONFIG_XML) + + def test_not_a_group_id(self): + self.assert_pcs_fail( +@@ -130,6 +195,8 @@ class GroupDeleteRemoveUngroupBase(TestGroupMixin): + self.assert_resources_xml_in_cib( + fixture_resources_xml([FIXTURE_AGROUP_XML]), + ) ++ self.assert_tags_xml(FIXTURE_TAGS_CONFIG_XML) ++ self.assert_constraint_xml(FIXTURE_CONSTRAINTS_CONFIG_XML) + + def test_whole_group(self): + self.assert_effect( +@@ -142,10 +209,12 @@ class GroupDeleteRemoveUngroupBase(TestGroupMixin): + ], + ), + output=( +- "Removing Constraint - location-T1-rh7-1-INFINITY\n" ++ "Removing Constraint - location-TagGroupOnly-rh7-1-INFINITY\n" + "Removing Constraint - location-AGroup-rh7-1-INFINITY\n" + ), + ) ++ self.assert_tags_xml(FIXTURE_TAGS_RESULT_XML) ++ self.assert_constraint_xml("") + + def test_specified_resources(self): + self.assert_effect( +@@ -160,6 +229,26 @@ class GroupDeleteRemoveUngroupBase(TestGroupMixin): + ], + ), + ) ++ self.assert_tags_xml(FIXTURE_TAGS_CONFIG_XML) ++ self.assert_constraint_xml(FIXTURE_CONSTRAINTS_CONFIG_XML) ++ ++ def test_all_resources(self): ++ self.assert_effect( ++ f"resource {self.command} AGroup A1 A2 A3", ++ fixture_resources_xml( ++ [ ++ fixture_primitive_xml("A1"), ++ fixture_primitive_xml("A2"), ++ fixture_primitive_xml("A3"), ++ ], ++ ), ++ output=( ++ "Removing Constraint - location-TagGroupOnly-rh7-1-INFINITY\n" ++ "Removing Constraint - location-AGroup-rh7-1-INFINITY\n" ++ ), ++ ) ++ self.assert_tags_xml(FIXTURE_TAGS_RESULT_XML) ++ self.assert_constraint_xml("") + + def test_cloned_group(self): + self.assert_pcs_success("resource clone AGroup") +@@ -172,6 +261,8 @@ class GroupDeleteRemoveUngroupBase(TestGroupMixin): + [fixture_clone_xml("AGroup-clone", FIXTURE_AGROUP_XML)], + ) + ) ++ self.assert_tags_xml(FIXTURE_TAGS_CONFIG_XML) ++ self.assert_constraint_xml(FIXTURE_CLONE_TAG_CONSTRAINTS) + + def test_cloned_group_all_resorces_specified(self): + self.assert_pcs_success("resource clone AGroup") +@@ -184,6 +275,8 @@ class GroupDeleteRemoveUngroupBase(TestGroupMixin): + [fixture_clone_xml("AGroup-clone", FIXTURE_AGROUP_XML)], + ) + ) ++ self.assert_tags_xml(FIXTURE_TAGS_CONFIG_XML) ++ self.assert_constraint_xml(FIXTURE_CLONE_TAG_CONSTRAINTS) + + def test_cloned_group_with_one_resource(self): + self.assert_pcs_success("resource clone AGroup") +@@ -199,8 +292,10 @@ class GroupDeleteRemoveUngroupBase(TestGroupMixin): + fixture_primitive_xml("A2"), + ], + ), +- output="Removing Constraint - location-T1-rh7-1-INFINITY\n", ++ output="Removing Constraint - location-TagGroupOnly-rh7-1-INFINITY\n", + ) ++ self.assert_tags_xml(FIXTURE_TAGS_RESULT_XML) ++ self.assert_constraint_xml(FIXTURE_CLONE_CONSTRAINT) + + + class ResourceUngroup(GroupDeleteRemoveUngroupBase, TestCase): +diff --git a/pcs_test/tools/cib.py b/pcs_test/tools/cib.py +index d52176cf..5eaaa92e 100644 +--- a/pcs_test/tools/cib.py ++++ b/pcs_test/tools/cib.py +@@ -30,8 +30,14 @@ def xml_format(xml_string): + + def get_assert_pcs_effect_mixin(get_cib_part): + class AssertPcsEffectMixin(AssertPcsMixin): +- def assert_resources_xml_in_cib(self, expected_xml_resources): +- xml = get_cib_part(self.temp_cib) ++ def assert_resources_xml_in_cib( ++ self, expected_xml_resources, get_cib_part_func=None, ++ ): ++ self.temp_cib.seek(0) ++ if get_cib_part_func is not None: ++ xml = get_cib_part_func(self.temp_cib) ++ else: ++ xml = get_cib_part(self.temp_cib) + try: + assert_xml_equal(expected_xml_resources, xml.decode()) + except AssertionError as e: +-- +2.25.4 + diff --git a/SOURCES/bz1867516-01-rule-fix-mixing-and-and-or-expressions.patch b/SOURCES/bz1867516-01-rule-fix-mixing-and-and-or-expressions.patch new file mode 100644 index 0000000..75222c5 --- /dev/null +++ b/SOURCES/bz1867516-01-rule-fix-mixing-and-and-or-expressions.patch @@ -0,0 +1,80 @@ +From 85f8cbca6af296a5b8e4d43e9f56daed0d7c195b Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek +Date: Mon, 10 Aug 2020 12:17:01 +0200 +Subject: [PATCH 1/2] rule: fix mixing 'and' and 'or' expressions + +--- + pcs/lib/cib/rule/parsed_to_cib.py | 5 +++++ + pcs_test/tier0/lib/cib/rule/test_parsed_to_cib.py | 4 ++-- + pcs_test/tier1/test_cib_options.py | 11 +++++++++-- + 3 files changed, 16 insertions(+), 4 deletions(-) + +diff --git a/pcs/lib/cib/rule/parsed_to_cib.py b/pcs/lib/cib/rule/parsed_to_cib.py +index 0fcae4f1..130663db 100644 +--- a/pcs/lib/cib/rule/parsed_to_cib.py ++++ b/pcs/lib/cib/rule/parsed_to_cib.py +@@ -62,6 +62,11 @@ def __export_bool( + { + "id": create_subelement_id(parent_el, "rule", id_provider), + "boolean-op": boolean.operator.lower(), ++ # Score or score-attribute is required for nested rules, otherwise ++ # the CIB is not valid. Pacemaker doesn't use the score of nested ++ # rules. Score for the top rule, which is used by pacemaker, is ++ # supposed to be set in the export function above. ++ "score": "0", + }, + ) + for child in boolean.children: +diff --git a/pcs_test/tier0/lib/cib/rule/test_parsed_to_cib.py b/pcs_test/tier0/lib/cib/rule/test_parsed_to_cib.py +index f61fce99..fa639f7c 100644 +--- a/pcs_test/tier0/lib/cib/rule/test_parsed_to_cib.py ++++ b/pcs_test/tier0/lib/cib/rule/test_parsed_to_cib.py +@@ -185,7 +185,7 @@ class Complex(Base): + ), + """ + +- ++ + +@@ -197,7 +197,7 @@ class Complex(Base): + class="ocf" provider="heartbeat" type="Dummy" + /> + +- ++ + +diff --git a/pcs_test/tier1/test_cib_options.py b/pcs_test/tier1/test_cib_options.py +index ba8f3515..92dbaed1 100644 +--- a/pcs_test/tier1/test_cib_options.py ++++ b/pcs_test/tier1/test_cib_options.py +@@ -254,14 +254,21 @@ class OpDefaultsSetCreate( + self.assert_effect( + ( + f"{self.cli_command} set create id=X meta nam1=val1 " +- "rule resource ::Dummy and op monitor" ++ "rule resource ::Dummy and (op start or op stop)" + ), + f"""\ + <{self.cib_tag}> + + + +- ++ ++ ++ ++ + + + +-- +2.25.4 + diff --git a/SOURCES/daemon-fix-cookie-options.patch b/SOURCES/daemon-fix-cookie-options.patch deleted file mode 100644 index f23287c..0000000 --- a/SOURCES/daemon-fix-cookie-options.patch +++ /dev/null @@ -1,54 +0,0 @@ -From 898cfe8212a5940dba6552196ddd243f912b5942 Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek -Date: Tue, 11 Feb 2020 10:18:33 +0100 -Subject: [PATCH 5/7] daemon: fix cookie options - ---- - pcs/daemon/app/session.py | 14 +++++++++++--- - 1 file changed, 11 insertions(+), 3 deletions(-) - -diff --git a/pcs/daemon/app/session.py b/pcs/daemon/app/session.py -index b4d29add..dcbb4c23 100644 ---- a/pcs/daemon/app/session.py -+++ b/pcs/daemon/app/session.py -@@ -4,10 +4,16 @@ from pcs.daemon.auth import check_user_groups, authorize_user - PCSD_SESSION = "pcsd.sid" - - class Mixin: -- __session = None - """ - Mixin for tornado.web.RequestHandler - """ -+ -+ __session = None -+ __cookie_options = { -+ "secure": True, -+ "httponly": True, -+ } -+ - def initialize(self, session_storage: Storage): - self.__storage = session_storage - -@@ -63,7 +69,7 @@ class Mixin: - """ - Write the session id into a response cookie. - """ -- self.set_cookie(PCSD_SESSION, self.session.sid) -+ self.set_cookie(PCSD_SESSION, self.session.sid, **self.__cookie_options) - - def put_request_cookies_sid_to_response_cookies_sid(self): - """ -@@ -73,7 +79,9 @@ class Mixin: - #TODO this method should exist temporarily (for sinatra compatibility) - #pylint: disable=invalid-name - if self.__sid_from_client is not None: -- self.set_cookie(PCSD_SESSION, self.__sid_from_client) -+ self.set_cookie( -+ PCSD_SESSION, self.__sid_from_client, **self.__cookie_options -+ ) - - def was_sid_in_request_cookies(self): - return self.__sid_from_client is not None --- -2.21.1 - diff --git a/SOURCES/do-not-support-cluster-setup-with-udp-u-transport.patch b/SOURCES/do-not-support-cluster-setup-with-udp-u-transport.patch index 8a08bef..6adfc5b 100644 --- a/SOURCES/do-not-support-cluster-setup-with-udp-u-transport.patch +++ b/SOURCES/do-not-support-cluster-setup-with-udp-u-transport.patch @@ -1,4 +1,4 @@ -From 10d13839883a96b35fc609eb51939ec97bc4aac6 Mon Sep 17 00:00:00 2001 +From aaf5cbfcc661cedc49ae5d86c0d442502aa17231 Mon Sep 17 00:00:00 2001 From: Ivan Devat Date: Tue, 20 Nov 2018 15:03:56 +0100 Subject: [PATCH 2/2] do not support cluster setup with udp(u) transport @@ -10,10 +10,10 @@ Subject: [PATCH 2/2] do not support cluster setup with udp(u) transport 3 files changed, 6 insertions(+) diff --git a/pcs/pcs.8 b/pcs/pcs.8 -index ff2ba0b0..7278c8dc 100644 +index 3efc5bb2..20247774 100644 --- a/pcs/pcs.8 +++ b/pcs/pcs.8 -@@ -283,6 +283,8 @@ By default, encryption is enabled with cipher=aes256 and hash=sha256. To disable +@@ -376,6 +376,8 @@ By default, encryption is enabled with cipher=aes256 and hash=sha256. To disable Transports udp and udpu: .br @@ -23,10 +23,10 @@ index ff2ba0b0..7278c8dc 100644 .br Transport options are: ip_version, netmtu diff --git a/pcs/usage.py b/pcs/usage.py -index 30c63964..60373d82 100644 +index 0f3c95a3..51bc1196 100644 --- a/pcs/usage.py +++ b/pcs/usage.py -@@ -689,6 +689,7 @@ Commands: +@@ -796,6 +796,7 @@ Commands: hash=sha256. To disable encryption, set cipher=none and hash=none. Transports udp and udpu: @@ -49,5 +49,5 @@ index b857cbae..b8d48d92 100644 #csetup-transport-options.knet .without-knet { -- -2.21.1 +2.25.4 diff --git a/SOURCES/update-a-hint-for-resource-create-master.patch b/SOURCES/update-a-hint-for-resource-create-master.patch deleted file mode 100644 index 512f999..0000000 --- a/SOURCES/update-a-hint-for-resource-create-master.patch +++ /dev/null @@ -1,39 +0,0 @@ -From a6708c6bde467cfced3c4a950eadff0375908303 Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek -Date: Thu, 23 Jan 2020 14:47:49 +0100 -Subject: [PATCH 2/7] update a hint for 'resource create ... master' - ---- - pcs/cli/resource/parse_args.py | 11 +++++++++-- - 1 file changed, 9 insertions(+), 2 deletions(-) - -diff --git a/pcs/cli/resource/parse_args.py b/pcs/cli/resource/parse_args.py -index 92dddac9..86280edb 100644 ---- a/pcs/cli/resource/parse_args.py -+++ b/pcs/cli/resource/parse_args.py -@@ -1,5 +1,5 @@ - from pcs.cli.common.parse_args import group_by_keywords, prepare_options --from pcs.cli.common.errors import CmdLineInputError, HINT_SYNTAX_CHANGE -+from pcs.cli.common.errors import CmdLineInputError, SEE_MAN_CHANGES - - - def parse_create_simple(arg_list): -@@ -51,7 +51,14 @@ def parse_create(arg_list): - # manpage. - # To be removed in the next significant version. - if e.message == "missing value of 'master' option": -- raise CmdLineInputError(message=e.message, hint=HINT_SYNTAX_CHANGE) -+ raise CmdLineInputError( -+ message=e.message, -+ hint=( -+ "Master/Slave resources have been renamed to promotable " -+ "clones, please use the 'promotable' keyword instead of " -+ "'master'. " + SEE_MAN_CHANGES -+ ) -+ ) - raise e - - return parts --- -2.21.1 - diff --git a/SPECS/pcs.spec b/SPECS/pcs.spec index 8b091c3..3a5e3fd 100644 --- a/SPECS/pcs.spec +++ b/SPECS/pcs.spec @@ -1,16 +1,22 @@ Name: pcs -Version: 0.10.4 -Release: 6%{?dist}.1 +Version: 0.10.6 +Release: 4%{?dist} +# https://docs.fedoraproject.org/en-US/packaging-guidelines/LicensingGuidelines/ # https://fedoraproject.org/wiki/Licensing:Main?rd=Licensing#Good_Licenses # GPLv2: pcs -# ASL 2.0: tornado -# MIT: handlebars -License: GPLv2 and ASL 2.0 and MIT +# ASL 2.0: dataclasses, tornado +# MIT: handlebars, backports, dacite, daemons, ethon, mustermann, rack, +# rack-protection, rack-test, sinatra, tilt +# GPLv2 or Ruby: eventmachne, json +# (GPLv2 or Ruby) and BSD: thin +# BSD or Ruby: open4, ruby2_keywords +# BSD and MIT: ffi +License: GPLv2 and ASL 2.0 and MIT and BSD and (GPLv2 or Ruby) and (BSD or Ruby) URL: https://github.com/ClusterLabs/pcs Group: System Environment/Base Summary: Pacemaker Configuration System #building only for architectures with pacemaker and corosync available -ExclusiveArch: i686 x86_64 s390x ppc64le aarch64 %{arm} +ExclusiveArch: i686 x86_64 s390x ppc64le aarch64 %global version_or_commit %{version} # %%global version_or_commit 5c3f35d2819b0e8be0dcbe0ee8f81b9b24b20b54 @@ -18,27 +24,31 @@ ExclusiveArch: i686 x86_64 s390x ppc64le aarch64 %{arm} %global pcs_source_name %{name}-%{version_or_commit} # ui_commit can be determined by hash, tag or branch -%global ui_commit 0.1.2 +%global ui_commit 0.1.4 +%global ui_modules_version 0.1.3 %global ui_src_name pcs-web-ui-%{ui_commit} %global pcs_snmp_pkg_name pcs-snmp %global pyagentx_version 0.4.pcs.2 -%global tornado_version 6.0.3 -%global version_rubygem_backports 3.11.4 +%global tornado_version 6.0.4 +%global dataclasses_version 0.6 +%global dacite_version 1.5.0 +%global version_rubygem_backports 3.17.2 %global version_rubygem_daemons 1.3.1 -%global version_rubygem_ethon 0.11.0 +%global version_rubygem_ethon 0.12.0 %global version_rubygem_eventmachine 1.2.7 -%global version_rubygem_ffi 1.9.25 +%global version_rubygem_ffi 1.13.1 %global version_rubygem_json 2.3.0 -%global version_rubygem_mustermann 1.0.3 +%global version_rubygem_mustermann 1.1.1 %global version_rubygem_open4 1.3.4 -%global version_rubygem_rack 2.0.6 -%global version_rubygem_rack_protection 2.0.4 -%global version_rubygem_rack_test 1.0.0 -%global version_rubygem_sinatra 2.0.4 +%global version_rubygem_rack 2.2.3 +%global version_rubygem_rack_protection 2.0.8.1 +%global version_rubygem_rack_test 1.1.0 +%global version_rubygem_ruby2_keywords 0.0.2 +%global version_rubygem_sinatra 2.0.8.1 %global version_rubygem_thin 1.7.2 -%global version_rubygem_tilt 2.0.9 +%global version_rubygem_tilt 2.0.10 # We do not use _libdir macro because upstream is not prepared for it. # Pcs does not include binaries and thus it should live in /usr/lib. Tornado @@ -73,6 +83,8 @@ Source2: pcsd-bundle-config-2 Source41: https://github.com/ondrejmular/pyagentx/archive/v%{pyagentx_version}/pyagentx-%{pyagentx_version}.tar.gz Source42: https://github.com/tornadoweb/tornado/archive/v%{tornado_version}/tornado-%{tornado_version}.tar.gz +Source43: https://github.com/ericvsmith/dataclasses/archive/%{dataclasses_version}/dataclasses-%{dataclasses_version}.tar.gz +Source44: https://github.com/konradhalas/dacite/archive/v%{dacite_version}/dacite-%{dacite_version}.tar.gz Source81: https://rubygems.org/downloads/backports-%{version_rubygem_backports}.gem Source82: https://rubygems.org/downloads/ethon-%{version_rubygem_ethon}.gem @@ -91,42 +103,36 @@ Source92: https://rubygems.org/downloads/tilt-%{version_rubygem_tilt}.gem Source93: https://rubygems.org/downloads/eventmachine-%{version_rubygem_eventmachine}.gem Source94: https://rubygems.org/downloads/daemons-%{version_rubygem_daemons}.gem Source95: https://rubygems.org/downloads/thin-%{version_rubygem_thin}.gem +Source96: https://rubygems.org/downloads/ruby2_keywords-%{version_rubygem_ruby2_keywords}.gem Source100: https://github.com/idevat/pcs-web-ui/archive/%{ui_commit}/%{ui_src_name}.tar.gz -Source101: https://github.com/idevat/pcs-web-ui/releases/download/%{ui_commit}/pcs-web-ui-node-modules-%{ui_commit}.tar.xz +Source101: https://github.com/idevat/pcs-web-ui/releases/download/%{ui_modules_version}/pcs-web-ui-node-modules-%{ui_modules_version}.tar.xz # Patches from upstream. # They should come before downstream patches to avoid unnecessary conflicts. # Z-streams are exception here: they can come from upstream but should be # applied at the end to keep z-stream changes as straightforward as possible. -Patch1: bz1676431-01-Display-status-of-disaster-recovery.patch -Patch2: bz1743731-01-fix-error-msg-when-cluster-is-not-set-up.patch -Patch3: bz1792946-01-tests-update-for-pacemaker-2.0.3-4.patch -Patch4: bz1781303-01-fix-safe-disabling-clones-groups-bundles.patch -Patch5: update-a-hint-for-resource-create-master.patch -Patch6: bz1793574-01-fix-detecting-fence-history-support.patch -Patch7: bz1750427-01-link-to-sbd-man-page-from-sbd-enable-doc.patch -Patch8: daemon-fix-cookie-options.patch -Patch9: bz1783106-01-fix-sinatra-wrapper-performance-issue.patch -Patch10: bz1783106-02-send-request-from-python-to-ruby-more-directly.patch +# Patch1: name.patch +Patch1: bz1817547-01-resource-and-operation-defaults.patch +Patch2: bz1805082-01-fix-resource-stonith-refresh-documentation.patch +Patch3: bz1843079-01-upgrade-CIB-schema-for-on-fail-demote.patch +Patch4: bz1857295-01-Fix-tag-removal-in-resource-unclone-ungroup-commands.patch +Patch5: bz1867516-01-rule-fix-mixing-and-and-or-expressions.patch # Downstream patches do not come from upstream. They adapt pcs for specific # RHEL needs. Patch101: do-not-support-cluster-setup-with-udp-u-transport.patch -Patch102: bz1832914-01-fix-running-pcs-status-on-remote-nodes.patch -Patch103: bz1838084-01-fix-ruby-daemon-closing-connection-after-30s.patch -Patch104: bz1840158-01-fix-inability-to-create-colocation-const.-web-ui.patch # git for patches BuildRequires: git #printf from coreutils is used in makefile BuildRequires: coreutils -BuildRequires: execstack # python for pcs BuildRequires: platform-python BuildRequires: python3-devel BuildRequires: platform-python-setuptools BuildRequires: python3-pycurl +BuildRequires: python3-pyparsing # gcc for compiling custom rubygems BuildRequires: gcc BuildRequires: gcc-c++ @@ -143,16 +149,6 @@ BuildRequires: systemd # for tests BuildRequires: python3-lxml BuildRequires: python3-pyOpenSSL -BuildRequires: pacemaker-cli >= 2.0.0 -# BuildRequires: fence-agents-all -BuildRequires: fence-agents-apc -BuildRequires: fence-agents-scsi -BuildRequires: fence-agents-ipmilan -# for tests -%ifarch i686 x86_64 -BuildRequires: fence-virt -%endif -BuildRequires: booth-site # pcsd fonts and font management tools for creating symlinks to fonts BuildRequires: fontconfig BuildRequires: liberation-sans-fonts @@ -169,6 +165,7 @@ Requires: python3-lxml Requires: platform-python-setuptools Requires: python3-clufter => 0.70.0 Requires: python3-pycurl +Requires: python3-pyparsing # ruby and gems for pcsd Requires: ruby >= 2.2.0 Requires: rubygems @@ -196,8 +193,12 @@ Requires: liberation-sans-fonts Requires: overpass-fonts # favicon Red Hat logo Requires: redhat-logos +# needs logrotate for /etc/logrotate.d/pcsd +Requires: logrotate Provides: bundled(tornado) = %{tornado_version} +Provides: bundled(dataclasses) = %{dataclasses_version} +Provides: bundled(dacite) = %{dacite_version} Provides: bundled(backports) = %{version_rubygem_backports} Provides: bundled(daemons) = %{version_rubygem_daemons} Provides: bundled(ethon) = %{version_rubygem_ethon} @@ -207,8 +208,9 @@ Provides: bundled(json) = %{version_rubygem_json} Provides: bundled(mustermann) = %{version_rubygem_mustermann} Provides: bundled(open4) = %{version_rubygem_open4} Provides: bundled(rack) = %{version_rubygem_rack} -Provides: bundled(rack) = %{version_rubygem_rack_protection} -Provides: bundled(rack) = %{version_rubygem_rack_test} +Provides: bundled(rack_protection) = %{version_rubygem_rack_protection} +Provides: bundled(rack_test) = %{version_rubygem_rack_test} +Provides: bundled(ruby2_keywords) = %{version_rubygem_ruby2_keywords} Provides: bundled(sinatra) = %{version_rubygem_sinatra} Provides: bundled(thin) = %{version_rubygem_thin} Provides: bundled(tilt) = %{version_rubygem_tilt} @@ -258,7 +260,11 @@ update_times(){ unset file_list[0] for fname in ${file_list[@]}; do - touch -r $reference_file $fname + # some files could be deleted by a patch therefore we test file for + # existance before touch to avoid exit with error: No such file or + # directory + # diffstat cannot create list of files without deleted files + test -e $fname && touch -r $reference_file $fname done } @@ -277,20 +283,13 @@ update_times_patch(){ update_times ${patch_file_name} `diffstat -p1 -l ${patch_file_name}` } +# update_times_patch %%{PATCH1} update_times_patch %{PATCH1} update_times_patch %{PATCH2} update_times_patch %{PATCH3} update_times_patch %{PATCH4} update_times_patch %{PATCH5} -update_times_patch %{PATCH6} -update_times_patch %{PATCH7} -update_times_patch %{PATCH8} -update_times_patch %{PATCH9} -update_times_patch %{PATCH10} update_times_patch %{PATCH101} -update_times_patch %{PATCH102} -update_times_patch %{PATCH103} -update_times_patch %{PATCH104} cp -f %SOURCE1 pcsd/public/images # prepare dirs/files necessary for building web ui @@ -322,6 +321,7 @@ cp -f %SOURCE92 pcsd/vendor/cache cp -f %SOURCE93 pcsd/vendor/cache cp -f %SOURCE94 pcsd/vendor/cache cp -f %SOURCE95 pcsd/vendor/cache +cp -f %SOURCE96 pcsd/vendor/cache # 3) dir for python bundles @@ -342,6 +342,20 @@ update_times %SOURCE42 `find %{bundled_src_dir}/tornado -follow` cp %{bundled_src_dir}/tornado/LICENSE tornado_LICENSE cp %{bundled_src_dir}/tornado/README.rst tornado_README.rst +# 6) sources for python dataclasses +tar -xzf %SOURCE43 -C %{bundled_src_dir} +mv %{bundled_src_dir}/dataclasses-%{dataclasses_version} %{bundled_src_dir}/dataclasses +update_times %SOURCE43 `find %{bundled_src_dir}/dataclasses -follow` +cp %{bundled_src_dir}/dataclasses/LICENSE.txt dataclasses_LICENSE.txt +cp %{bundled_src_dir}/dataclasses/README.rst dataclasses_README.rst + +# 7) sources for python dacite +tar -xzf %SOURCE44 -C %{bundled_src_dir} +mv %{bundled_src_dir}/dacite-%{dacite_version} %{bundled_src_dir}/dacite +update_times %SOURCE44 `find %{bundled_src_dir}/dacite -follow` +cp %{bundled_src_dir}/dacite/LICENSE dacite_LICENSE +cp %{bundled_src_dir}/dacite/README.md dacite_README.md + %build %define debug_package %{nil} @@ -351,6 +365,11 @@ pwd # build bundled rubygems (in main install it is disabled by BUILD_GEMS=false) mkdir -p %{rubygem_bundle_dir} +# The '-g' cflags option is needed for generation of MiniDebugInfo for shared +# libraries from rubygem extensions +# Currently used rubygems with extensions: eventmachine, ffi, json, thin +# There was rpmdiff issue with missing .gnu_debugdata section +# see https://docs.engineering.redhat.com/display/HTD/rpmdiff-elf-stripping gem install \ --force --verbose --no-rdoc --no-ri -l --no-user-install \ -i %{rubygem_bundle_dir} \ @@ -365,22 +384,31 @@ gem install \ %{rubygem_cache_dir}/rack-protection-%{version_rubygem_rack_protection}.gem \ %{rubygem_cache_dir}/rack-test-%{version_rubygem_rack_test}.gem \ %{rubygem_cache_dir}/rack-%{version_rubygem_rack}.gem \ + %{rubygem_cache_dir}/ruby2_keywords-%{version_rubygem_ruby2_keywords}.gem \ %{rubygem_cache_dir}/sinatra-%{version_rubygem_sinatra}.gem \ %{rubygem_cache_dir}/thin-%{version_rubygem_thin}.gem \ %{rubygem_cache_dir}/tilt-%{version_rubygem_tilt}.gem \ -- '--with-ldflags="-Wl,-z,relro -Wl,-z,ibt -Wl,-z,now -Wl,--gc-sections"' \ - '--with-cflags="-O2 -ffunction-sections"' - -# We can remove files required for gem compilation -rm -rf %{rubygem_bundle_dir}/gems/ffi-%{version_rubygem_ffi}/ext -rm -rf %{rubygem_bundle_dir}/gems/json-%{version_rubygem_json}/ext - - -# With this file there is "File is not stripped" problem during rpmdiff -# See https://docs.engineering.redhat.com/display/HTD/rpmdiff-elf-stripping -for fname in `find %{rubygem_bundle_dir}/extensions -type f -name "*.so"`; do - strip ${fname} -done + '--with-cflags="-g -O2 -ffunction-sections"' + +# prepare license files +# some rubygems do not have a license file (ruby2_keywords, thin) +mv %{rubygem_bundle_dir}/gems/backports-%{version_rubygem_backports}/LICENSE.txt backports_LICENSE.txt +mv %{rubygem_bundle_dir}/gems/daemons-%{version_rubygem_daemons}/LICENSE daemons_LICENSE +mv %{rubygem_bundle_dir}/gems/ethon-%{version_rubygem_ethon}/LICENSE ethon_LICENSE +mv %{rubygem_bundle_dir}/gems/eventmachine-%{version_rubygem_eventmachine}/LICENSE eventmachine_LICENSE +mv %{rubygem_bundle_dir}/gems/eventmachine-%{version_rubygem_eventmachine}/GNU eventmachine_GNU +mv %{rubygem_bundle_dir}/gems/ffi-%{version_rubygem_ffi}/COPYING ffi_COPYING +mv %{rubygem_bundle_dir}/gems/ffi-%{version_rubygem_ffi}/LICENSE ffi_LICENSE +mv %{rubygem_bundle_dir}/gems/ffi-%{version_rubygem_ffi}/LICENSE.SPECS ffi_LICENSE.SPECS +mv %{rubygem_bundle_dir}/gems/json-%{version_rubygem_json}/LICENSE json_LICENSE +mv %{rubygem_bundle_dir}/gems/mustermann-%{version_rubygem_mustermann}/LICENSE mustermann_LICENSE +mv %{rubygem_bundle_dir}/gems/open4-%{version_rubygem_open4}/LICENSE open4_LICENSE +mv %{rubygem_bundle_dir}/gems/rack-%{version_rubygem_rack}/MIT-LICENSE rack_MIT-LICENSE +mv %{rubygem_bundle_dir}/gems/rack-protection-%{version_rubygem_rack_protection}/License rack-protection_License +mv %{rubygem_bundle_dir}/gems/rack-test-%{version_rubygem_rack_test}/MIT-LICENSE.txt rack-test_MIT-LICENSE.txt +mv %{rubygem_bundle_dir}/gems/sinatra-%{version_rubygem_sinatra}/LICENSE sinatra_LICENSE +mv %{rubygem_bundle_dir}/gems/tilt-%{version_rubygem_tilt}/COPYING tilt_COPYING # build web ui and put it to pcsd make -C %{pcsd_public_dir}/%{ui_src_name} build @@ -398,24 +426,37 @@ make install \ BASH_COMPLETION_DIR=%{_datadir}/bash-completion/completions \ BUNDLE_PYAGENTX_SRC_DIR=`readlink -f %{bundled_src_dir}/pyagentx` \ BUNDLE_TORNADO_SRC_DIR=`readlink -f %{bundled_src_dir}/tornado` \ + BUNDLE_DACITE_SRC_DIR=`readlink -f %{bundled_src_dir}/dacite` \ + BUNDLE_DATACLASSES_SRC_DIR=`readlink -f %{bundled_src_dir}/dataclasses` \ BUILD_GEMS=false \ SYSTEMCTL_OVERRIDE=true \ hdrdir="%{_includedir}" \ rubyhdrdir="%{_includedir}" \ includedir="%{_includedir}" -# With this file there is "File is not stripped" problem during rpmdiff -# See https://docs.engineering.redhat.com/display/HTD/rpmdiff-elf-stripping -for fname in `find ${RPM_BUILD_ROOT}%{pcs_libdir}/pcs/bundled/packages/tornado/ -type f -name "*.so"`; do - strip ${fname} -done - # symlink favicon into pcsd directories ln -fs /etc/favicon.png ${RPM_BUILD_ROOT}%{pcs_libdir}/%{pcsd_public_dir}/images/favicon.png #after the ruby gem compilation we do not need ruby gems in the cache rm -r -v $RPM_BUILD_ROOT%{pcs_libdir}/%{rubygem_cache_dir} +# We are not building debug package for pcs but we need to add MiniDebuginfo +# to the bundled shared libraries from rubygem extensions in order to satisfy +# rpmdiff's binary stripping checker. +# Therefore we call find-debuginfo.sh script manually in order to strip +# binaries and add MiniDebugInfo with .gnu_debugdata section +/usr/lib/rpm/find-debuginfo.sh -j2 -m -i -S debugsourcefiles.list +# find-debuginfo.sh generated some files into /usr/lib/debug and +# /usr/src/debug/ that we don't want in the package +rm -rf $RPM_BUILD_ROOT%{pcs_libdir}/debug +rm -rf $RPM_BUILD_ROOT%{_prefix}/src/debug + +# We can remove files required for gem compilation +rm -rf $RPM_BUILD_ROOT%{pcs_libdir}/%{rubygem_bundle_dir}/gems/eventmachine-%{version_rubygem_eventmachine}/ext +rm -rf $RPM_BUILD_ROOT%{pcs_libdir}/%{rubygem_bundle_dir}/gems/ffi-%{version_rubygem_ffi}/ext +rm -rf $RPM_BUILD_ROOT%{pcs_libdir}/%{rubygem_bundle_dir}/gems/json-%{version_rubygem_json}/ext +rm -rf $RPM_BUILD_ROOT%{pcs_libdir}/%{rubygem_bundle_dir}/gems/thin-%{version_rubygem_thin}/ext + %check # In the building environment LC_CTYPE is set to C which causes tests to fail # due to python prints a warning about it to stderr. The following environment @@ -438,7 +479,7 @@ run_all_tests(){ # TODO: Investigate the issue BUNDLED_LIB_LOCATION=$RPM_BUILD_ROOT%{pcs_libdir}/pcs/bundled/packages \ - %{__python3} pcs_test/suite.py -v --vanilla --all-but \ + %{__python3} pcs_test/suite.py --tier0 -v --vanilla --all-but \ pcs_test.tier0.lib.commands.test_resource_agent.DescribeAgentUtf8.test_describe \ pcs_test.tier0.daemon.app.test_app_remote.SyncConfigMutualExclusive.test_get_not_locked \ pcs_test.tier0.daemon.app.test_app_remote.SyncConfigMutualExclusive.test_post_not_locked \ @@ -504,8 +545,29 @@ remove_all_tests %doc CHANGELOG.md %doc README.md %doc tornado_README.rst +%doc dacite_README.md +%doc dataclasses_README.rst %license tornado_LICENSE +%license dacite_LICENSE +%license dataclasses_LICENSE.txt %license COPYING +# rugygem licenses +%license backports_LICENSE.txt +%license daemons_LICENSE +%license ethon_LICENSE +%license eventmachine_LICENSE +%license eventmachine_GNU +%license ffi_COPYING +%license ffi_LICENSE +%license ffi_LICENSE.SPECS +%license json_LICENSE +%license mustermann_LICENSE +%license open4_LICENSE +%license rack_MIT-LICENSE +%license rack-protection_License +%license rack-test_MIT-LICENSE.txt +%license sinatra_LICENSE +%license tilt_COPYING %{python3_sitelib}/pcs %{python3_sitelib}/pcs-%{version}-py3.*.egg-info %{_sbindir}/pcs @@ -514,11 +576,14 @@ remove_all_tests %{pcs_libdir}/pcsd/* %{pcs_libdir}/pcsd/.bundle/config %{pcs_libdir}/pcs/bundled/packages/tornado* +%{pcs_libdir}/pcs/bundled/packages/dacite* +%{pcs_libdir}/pcs/bundled/packages/dataclasses* +%{pcs_libdir}/pcs/bundled/packages/__pycache__/dataclasses.cpython-36.pyc %{_unitdir}/pcsd.service %{_unitdir}/pcsd-ruby.service %{_datadir}/bash-completion/completions/pcs %{_sharedstatedir}/pcsd -%{_sysconfdir}/pam.d/pcsd +%config(noreplace) %{_sysconfdir}/pam.d/pcsd %dir %{_var}/log/pcsd %config(noreplace) %{_sysconfdir}/logrotate.d/pcsd %config(noreplace) %{_sysconfdir}/sysconfig/pcsd @@ -558,12 +623,37 @@ remove_all_tests %license pyagentx_LICENSE.txt %changelog -* Wed May 27 2020 Miroslav Lisik - 0.10.4-6.el8_2.1 -- Fixed running pcs status on remote nodes -- Fixed ruby daemon closing connection after 30s -- Fixed inability to create colocation constraint in webUI -- Updated bundled rubygem-json -- Resolves: rhbz#1832914 rhbz#1838084 rhbz#1840154 rhbz#1840158 +* Tue Aug 11 2020 Miroslav Lisik - 0.10.6-4 +- Fixed invalid CIB error caused by resource and operation defaults with mixed and-or rules +- Updated pcs-web-ui +- Resolves: rhbz#1867516 + +* Thu Jul 16 2020 Miroslav Lisik - 0.10.6-3 +- Added Upgrade CIB if user specifies on-fail=demote +- Fixed rpmdiff issue with binary stripping checker +- Fixed removing non-empty tag by removing tagged resource group or clone +- Resolves: rhbz#1843079 rhbz#1857295 + +* Thu Jun 25 2020 Miroslav Lisik - 0.10.6-2 +- Added resource and operation defaults that apply to specific resource/operation types +- Added Requires/BuildRequires: python3-pyparsing +- Added Requires: logrotate +- Fixed resource and stonith documentation +- Fixed rubygem licenses +- Fixed update_times() +- Updated rubygem rack to version 2.2.3 +- Removed BuildRequires execstack (it is not needed) +- Resolves: rhbz#1805082 rhbz#1817547 + +* Thu Jun 11 2020 Miroslav Lisik - 0.10.6-1 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Added python bundled dependencies: dacite, dataclasses +- Added new bundled rubygem ruby2_keywords +- Updated rubygem bundled packages: backports, ethon, ffi, json, mustermann, rack, rack_protection, rack_test, sinatra, tilt +- Updated pcs-web-ui +- Updated test run, only tier0 tests are running during build +- Removed BuildRequires needed for tier1 tests which were removed for build (pacemaker-cli, fence_agents-*, fence_virt, booth-site) +- Resolves: rhbz#1387358 rhbz#1684676 rhbz#1722970 rhbz#1778672 rhbz#1782553 rhbz#1790460 rhbz#1805082 rhbz#1810017 rhbz#1817547 rhbz#1830552 rhbz#1832973 rhbz#1833114 rhbz#1833506 rhbz#1838853 rhbz#1839637 * Fri Mar 20 2020 Miroslav Lisik - 0.10.4-6 - Fixed communication between python and ruby daemons