From 857f7dae3f61ec91d9fc13715a4403f470b0db50 Mon Sep 17 00:00:00 2001 From: CentOS Sources Date: Jul 28 2020 06:54:37 +0000 Subject: import pcs-0.10.6-2.el8 --- diff --git a/.gitignore b/.gitignore index 84e3f80..b5198b3 100644 --- a/.gitignore +++ b/.gitignore @@ -1,17 +1,23 @@ SOURCES/HAM-logo.png -SOURCES/backports-3.11.4.gem -SOURCES/ethon-0.11.0.gem -SOURCES/ffi-1.9.25.gem -SOURCES/json-2.1.0.gem -SOURCES/mustermann-1.0.3.gem +SOURCES/backports-3.17.2.gem +SOURCES/dacite-1.5.0.tar.gz +SOURCES/daemons-1.3.1.gem +SOURCES/dataclasses-0.6.tar.gz +SOURCES/ethon-0.12.0.gem +SOURCES/eventmachine-1.2.7.gem +SOURCES/ffi-1.13.1.gem +SOURCES/json-2.3.0.gem +SOURCES/mustermann-1.1.1.gem SOURCES/open4-1.3.4-1.gem -SOURCES/pcs-0.10.4.tar.gz -SOURCES/pcs-web-ui-0.1.2.tar.gz -SOURCES/pcs-web-ui-node-modules-0.1.2.tar.xz +SOURCES/pcs-0.10.6.tar.gz +SOURCES/pcs-web-ui-0.1.3.tar.gz +SOURCES/pcs-web-ui-node-modules-0.1.3.tar.xz SOURCES/pyagentx-0.4.pcs.2.tar.gz -SOURCES/rack-2.0.6.gem -SOURCES/rack-protection-2.0.4.gem -SOURCES/rack-test-1.0.0.gem -SOURCES/sinatra-2.0.4.gem -SOURCES/tilt-2.0.9.gem -SOURCES/tornado-6.0.3.tar.gz +SOURCES/rack-2.2.3.gem +SOURCES/rack-protection-2.0.8.1.gem +SOURCES/rack-test-1.1.0.gem +SOURCES/ruby2_keywords-0.0.2.gem +SOURCES/sinatra-2.0.8.1.gem +SOURCES/thin-1.7.2.gem +SOURCES/tilt-2.0.10.gem +SOURCES/tornado-6.0.4.tar.gz diff --git a/.pcs.metadata b/.pcs.metadata index 0d738d2..2c6863f 100644 --- a/.pcs.metadata +++ b/.pcs.metadata @@ -1,17 +1,23 @@ 679a4ce22a33ffd4d704261a17c00cff98d9499a SOURCES/HAM-logo.png -edf08f3a0d9e202048857d78ddda44e59294084c SOURCES/backports-3.11.4.gem -3c921ceeb2847be8cfa25704be74923e233786bd SOURCES/ethon-0.11.0.gem -86fa011857f977254ccf39f507587310f9ade768 SOURCES/ffi-1.9.25.gem -8b9e81a2a6ff57f97bec1f65940c61cc6b6d81be SOURCES/json-2.1.0.gem -2d090e7d3cd2a35efeaeacf006100fb83b828686 SOURCES/mustermann-1.0.3.gem +28b63a742124da6c9575a1c5e7d7331ef93600b2 SOURCES/backports-3.17.2.gem +c14ee49221d8e1b09364b5f248bc3da12484f675 SOURCES/dacite-1.5.0.tar.gz +e28c1e78d1a6e34e80f4933b494f1e0501939dd3 SOURCES/daemons-1.3.1.gem +81079b734108084eea0ae1c05a1cab0e806a3a1d SOURCES/dataclasses-0.6.tar.gz +921ef1be44583a7644ee7f20fe5f26f21d018a04 SOURCES/ethon-0.12.0.gem +7a5b2896e210fac9759c786ee4510f265f75b481 SOURCES/eventmachine-1.2.7.gem +cfa25e7a3760c3ec16723cb8263d9b7a52d0eadf SOURCES/ffi-1.13.1.gem +0230e8c5a37f1543982e5b04be503dd5f9004b47 SOURCES/json-2.3.0.gem +50a4e37904485810cb05e27d75c9783e5a8f3402 SOURCES/mustermann-1.1.1.gem 41a7fe9f8e3e02da5ae76c821b89c5b376a97746 SOURCES/open4-1.3.4-1.gem -d2b649f271580b18d39efffa93f62b55291ef55d SOURCES/pcs-0.10.4.tar.gz -8ac1291ce8f56073b74149ac56acc094337a3298 SOURCES/pcs-web-ui-0.1.2.tar.gz -52599fe9c17bda8cc0cad1acf830a9114b8b6db6 SOURCES/pcs-web-ui-node-modules-0.1.2.tar.xz +73fafb4228326c14a799f0cccbcb734ab7ba2bfa SOURCES/pcs-0.10.6.tar.gz +df118954a980ceecc9cdd0e85a83d43253836f7f SOURCES/pcs-web-ui-0.1.3.tar.gz +3e09042e3dc32c992451ba4c0454f2879f0d3f40 SOURCES/pcs-web-ui-node-modules-0.1.3.tar.xz 3176b2f2b332c2b6bf79fe882e83feecf3d3f011 SOURCES/pyagentx-0.4.pcs.2.tar.gz -b15267e1f94e69238a00a6f1bd48fb7683c03a78 SOURCES/rack-2.0.6.gem -c1376e5678322b401d988d261762a78bf2cf3361 SOURCES/rack-protection-2.0.4.gem -4c99cf0a82372a1bc5968c1551d9e606b68b4879 SOURCES/rack-test-1.0.0.gem -1c85f05c874bc8c0bf9c40291ea2d430090cdfd9 SOURCES/sinatra-2.0.4.gem -55a75a80e29731d072fe44dfaf865479b65c27fd SOURCES/tilt-2.0.9.gem -126c66189fc5b26a39c9b54eb17254652cca8b27 SOURCES/tornado-6.0.3.tar.gz +345b7169d4d2d62176a225510399963bad62b68f SOURCES/rack-2.2.3.gem +1f046e23baca8beece3b38c60382f44aa2b2cb41 SOURCES/rack-protection-2.0.8.1.gem +b80bc5ca38a885e747271675ba91dd3d02136bf1 SOURCES/rack-test-1.1.0.gem +0be571aacb5d6a212a30af3f322a7000d8af1ef9 SOURCES/ruby2_keywords-0.0.2.gem +04cca7a5d9d641fe076e4e24dc5b6ff31922f4c3 SOURCES/sinatra-2.0.8.1.gem +41395e86322ffd31f3a7aef1f697bda3e1e2d6b9 SOURCES/thin-1.7.2.gem +d265c822a6b228392d899e9eb5114613d65e6967 SOURCES/tilt-2.0.10.gem +e177f2a092dc5f23b0b3078e40adf52e17a9f8a6 SOURCES/tornado-6.0.4.tar.gz diff --git a/SOURCES/bz1676431-01-Display-status-of-disaster-recovery.patch b/SOURCES/bz1676431-01-Display-status-of-disaster-recovery.patch deleted file mode 100644 index bd37518..0000000 --- a/SOURCES/bz1676431-01-Display-status-of-disaster-recovery.patch +++ /dev/null @@ -1,5055 +0,0 @@ -From 7cf137380bc80653c50747a1d4d70783d593fcb5 Mon Sep 17 00:00:00 2001 -From: Miroslav Lisik -Date: Fri, 29 Nov 2019 12:16:11 +0100 -Subject: [PATCH 1/3] squash bz1676431 Display status of disaster recovery site - -support DR config in node add, node remove, cluster destroy - -dr: add command for setting recovery site - -improve typing - -move tests - -dr: add a command for displaying clusters' status - -dr: add a command for displaying dr config - -dr: add 'destroy' sub-command - -dr: review based fixes - -update capabilities, changelog ---- - CHANGELOG.md | 9 + - pcs/app.py | 2 + - pcs/cli/common/console_report.py | 16 +- - pcs/cli/common/lib_wrapper.py | 13 + - pcs/cli/dr.py | 138 ++++ - pcs/cli/routing/dr.py | 15 + - pcs/cluster.py | 1 + - pcs/common/dr.py | 109 +++ - pcs/common/file_type_codes.py | 27 +- - pcs/common/report_codes.py | 3 + - pcs/lib/commands/cluster.py | 18 +- - pcs/lib/commands/dr.py | 316 ++++++++ - pcs/lib/communication/corosync.py | 28 + - pcs/lib/communication/status.py | 97 +++ - pcs/lib/dr/__init__.py | 0 - pcs/lib/dr/config/__init__.py | 0 - pcs/lib/dr/config/facade.py | 49 ++ - pcs/lib/dr/env.py | 28 + - pcs/lib/env.py | 17 + - pcs/lib/file/instance.py | 21 +- - pcs/lib/file/metadata.py | 8 + - pcs/lib/file/toolbox.py | 80 +- - pcs/lib/node.py | 5 +- - pcs/lib/node_communication_format.py | 16 + - pcs/lib/reports.py | 31 + - pcs/pcs.8 | 18 +- - pcs/pcs_internal.py | 1 + - pcs/settings_default.py | 1 + - pcs/usage.py | 32 +- - .../tier0/cli/common/test_console_report.py | 24 + - pcs_test/tier0/cli/test_dr.py | 293 +++++++ - pcs_test/tier0/common/test_dr.py | 167 ++++ - .../lib/commands/cluster/test_add_nodes.py | 143 +++- - pcs_test/tier0/lib/commands/dr/__init__.py | 0 - .../tier0/lib/commands/dr/test_destroy.py | 342 ++++++++ - .../tier0/lib/commands/dr/test_get_config.py | 134 ++++ - .../lib/commands/dr/test_set_recovery_site.py | 702 ++++++++++++++++ - pcs_test/tier0/lib/commands/dr/test_status.py | 756 ++++++++++++++++++ - .../tier0/lib/communication/test_status.py | 7 + - pcs_test/tier0/lib/dr/__init__.py | 0 - pcs_test/tier0/lib/dr/test_facade.py | 138 ++++ - pcs_test/tier0/lib/test_env.py | 42 +- - .../tools/command_env/config_corosync_conf.py | 9 +- - pcs_test/tools/command_env/config_http.py | 3 + - .../tools/command_env/config_http_corosync.py | 24 + - .../tools/command_env/config_http_files.py | 28 +- - .../tools/command_env/config_http_status.py | 52 ++ - .../mock_get_local_corosync_conf.py | 12 +- - pcsd/capabilities.xml | 12 + - pcsd/pcsd_file.rb | 15 + - pcsd/pcsd_remove_file.rb | 7 + - pcsd/remote.rb | 19 +- - pcsd/settings.rb | 1 + - pcsd/settings.rb.debian | 1 + - pylintrc | 2 +- - 55 files changed, 3964 insertions(+), 68 deletions(-) - create mode 100644 pcs/cli/dr.py - create mode 100644 pcs/cli/routing/dr.py - create mode 100644 pcs/common/dr.py - create mode 100644 pcs/lib/commands/dr.py - create mode 100644 pcs/lib/communication/status.py - create mode 100644 pcs/lib/dr/__init__.py - create mode 100644 pcs/lib/dr/config/__init__.py - create mode 100644 pcs/lib/dr/config/facade.py - create mode 100644 pcs/lib/dr/env.py - create mode 100644 pcs_test/tier0/cli/test_dr.py - create mode 100644 pcs_test/tier0/common/test_dr.py - create mode 100644 pcs_test/tier0/lib/commands/dr/__init__.py - create mode 100644 pcs_test/tier0/lib/commands/dr/test_destroy.py - create mode 100644 pcs_test/tier0/lib/commands/dr/test_get_config.py - create mode 100644 pcs_test/tier0/lib/commands/dr/test_set_recovery_site.py - create mode 100644 pcs_test/tier0/lib/commands/dr/test_status.py - create mode 100644 pcs_test/tier0/lib/communication/test_status.py - create mode 100644 pcs_test/tier0/lib/dr/__init__.py - create mode 100644 pcs_test/tier0/lib/dr/test_facade.py - create mode 100644 pcs_test/tools/command_env/config_http_status.py - -diff --git a/CHANGELOG.md b/CHANGELOG.md -index 69e6da44..889436c3 100644 ---- a/CHANGELOG.md -+++ b/CHANGELOG.md -@@ -1,5 +1,14 @@ - # Change Log - -+## [Unreleased] -+ -+### Added -+- It is possible to configure a disaster-recovery site and display its status -+ ([rhbz#1676431]) -+ -+[rhbz#1676431]: https://bugzilla.redhat.com/show_bug.cgi?id=1676431 -+ -+ - ## [0.10.4] - 2019-11-28 - - ### Added -diff --git a/pcs/app.py b/pcs/app.py -index 8df07c1d..defc4055 100644 ---- a/pcs/app.py -+++ b/pcs/app.py -@@ -25,6 +25,7 @@ from pcs.cli.routing import ( - cluster, - config, - constraint, -+ dr, - host, - node, - pcsd, -@@ -245,6 +246,7 @@ def main(argv=None): - "booth": booth.booth_cmd, - "host": host.host_cmd, - "client": client.client_cmd, -+ "dr": dr.dr_cmd, - "help": lambda lib, argv, modifiers: usage.main(), - } - try: -diff --git a/pcs/cli/common/console_report.py b/pcs/cli/common/console_report.py -index 0a730cfa..d349c823 100644 ---- a/pcs/cli/common/console_report.py -+++ b/pcs/cli/common/console_report.py -@@ -2,6 +2,7 @@ - from collections import defaultdict - from collections.abc import Iterable - from functools import partial -+from typing import Mapping - import sys - - from pcs.common import ( -@@ -46,6 +47,7 @@ _file_role_translation = { - file_type_codes.BOOTH_CONFIG: "Booth configuration", - file_type_codes.BOOTH_KEY: "Booth key", - file_type_codes.COROSYNC_AUTHKEY: "Corosync authkey", -+ file_type_codes.PCS_DR_CONFIG: "disaster-recovery configuration", - file_type_codes.PACEMAKER_AUTHKEY: "Pacemaker authkey", - file_type_codes.PCSD_ENVIRONMENT_CONFIG: "pcsd configuration", - file_type_codes.PCSD_SSL_CERT: "pcsd SSL certificate", -@@ -53,7 +55,7 @@ _file_role_translation = { - file_type_codes.PCS_KNOWN_HOSTS: "known-hosts", - file_type_codes.PCS_SETTINGS_CONF: "pcs configuration", - } --_file_role_to_option_translation = { -+_file_role_to_option_translation: Mapping[str, str] = { - file_type_codes.BOOTH_CONFIG: "--booth-conf", - file_type_codes.BOOTH_KEY: "--booth-key", - file_type_codes.CIB: "-f", -@@ -2284,4 +2286,16 @@ CODE_TO_MESSAGE_BUILDER_MAP = { - "resources\n\n{crm_simulate_plaintext_output}" - ).format(**info) - , -+ -+ codes.DR_CONFIG_ALREADY_EXIST: lambda info: ( -+ "Disaster-recovery already configured" -+ ).format(**info), -+ -+ codes.DR_CONFIG_DOES_NOT_EXIST: lambda info: ( -+ "Disaster-recovery is not configured" -+ ).format(**info), -+ -+ codes.NODE_IN_LOCAL_CLUSTER: lambda info: ( -+ "Node '{node}' is part of local cluster" -+ ).format(**info), - } -diff --git a/pcs/cli/common/lib_wrapper.py b/pcs/cli/common/lib_wrapper.py -index 27b7d8b1..4ef6bf2f 100644 ---- a/pcs/cli/common/lib_wrapper.py -+++ b/pcs/cli/common/lib_wrapper.py -@@ -9,6 +9,7 @@ from pcs.lib.commands import ( - booth, - cib_options, - cluster, -+ dr, - fencing_topology, - node, - pcsd, -@@ -183,6 +184,18 @@ def load_module(env, middleware_factory, name): - } - ) - -+ if name == "dr": -+ return bind_all( -+ env, -+ middleware.build(middleware_factory.corosync_conf_existing), -+ { -+ "get_config": dr.get_config, -+ "destroy": dr.destroy, -+ "set_recovery_site": dr.set_recovery_site, -+ "status_all_sites_plaintext": dr.status_all_sites_plaintext, -+ } -+ ) -+ - if name == "remote_node": - return bind_all( - env, -diff --git a/pcs/cli/dr.py b/pcs/cli/dr.py -new file mode 100644 -index 00000000..c6830aa0 ---- /dev/null -+++ b/pcs/cli/dr.py -@@ -0,0 +1,138 @@ -+from typing import ( -+ Any, -+ List, -+ Sequence, -+) -+ -+from pcs.cli.common.console_report import error -+from pcs.cli.common.errors import CmdLineInputError -+from pcs.cli.common.parse_args import InputModifiers -+from pcs.common import report_codes -+from pcs.common.dr import ( -+ DrConfigDto, -+ DrConfigSiteDto, -+ DrSiteStatusDto, -+) -+from pcs.common.tools import indent -+ -+def config( -+ lib: Any, -+ argv: Sequence[str], -+ modifiers: InputModifiers, -+) -> None: -+ """ -+ Options: None -+ """ -+ modifiers.ensure_only_supported() -+ if argv: -+ raise CmdLineInputError() -+ config_raw = lib.dr.get_config() -+ try: -+ config_dto = DrConfigDto.from_dict(config_raw) -+ except (KeyError, TypeError, ValueError): -+ raise error( -+ "Unable to communicate with pcsd, received response:\n" -+ f"{config_raw}" -+ ) -+ -+ lines = ["Local site:"] -+ lines.extend(indent(_config_site_lines(config_dto.local_site))) -+ for site_dto in config_dto.remote_site_list: -+ lines.append("Remote site:") -+ lines.extend(indent(_config_site_lines(site_dto))) -+ print("\n".join(lines)) -+ -+def _config_site_lines(site_dto: DrConfigSiteDto) -> List[str]: -+ lines = [f"Role: {site_dto.site_role.capitalize()}"] -+ if site_dto.node_list: -+ lines.append("Nodes:") -+ lines.extend(indent(sorted([node.name for node in site_dto.node_list]))) -+ return lines -+ -+ -+def set_recovery_site( -+ lib: Any, -+ argv: Sequence[str], -+ modifiers: InputModifiers, -+) -> None: -+ """ -+ Options: -+ * --request-timeout - HTTP timeout for node authorization check -+ """ -+ modifiers.ensure_only_supported("--request-timeout") -+ if len(argv) != 1: -+ raise CmdLineInputError() -+ lib.dr.set_recovery_site(argv[0]) -+ -+def status( -+ lib: Any, -+ argv: Sequence[str], -+ modifiers: InputModifiers, -+) -> None: -+ """ -+ Options: -+ * --full - show full details, node attributes and failcount -+ * --hide-inactive - hide inactive resources -+ * --request-timeout - HTTP timeout for node authorization check -+ """ -+ modifiers.ensure_only_supported( -+ "--full", "--hide-inactive", "--request-timeout", -+ ) -+ if argv: -+ raise CmdLineInputError() -+ -+ status_list_raw = lib.dr.status_all_sites_plaintext( -+ hide_inactive_resources=modifiers.get("--hide-inactive"), -+ verbose=modifiers.get("--full"), -+ ) -+ try: -+ status_list = [ -+ DrSiteStatusDto.from_dict(status_raw) -+ for status_raw in status_list_raw -+ ] -+ except (KeyError, TypeError, ValueError): -+ raise error( -+ "Unable to communicate with pcsd, received response:\n" -+ f"{status_list_raw}" -+ ) -+ -+ has_errors = False -+ plaintext_parts = [] -+ for site_status in status_list: -+ plaintext_parts.append( -+ "--- {local_remote} cluster - {role} site ---".format( -+ local_remote=("Local" if site_status.local_site else "Remote"), -+ role=site_status.site_role.capitalize() -+ ) -+ ) -+ if site_status.status_successfully_obtained: -+ plaintext_parts.append(site_status.status_plaintext.strip()) -+ plaintext_parts.extend(["", ""]) -+ else: -+ has_errors = True -+ plaintext_parts.extend([ -+ "Error: Unable to get status of the cluster from any node", -+ "" -+ ]) -+ print("\n".join(plaintext_parts).strip()) -+ if has_errors: -+ raise error("Unable to get status of all sites") -+ -+ -+def destroy( -+ lib: Any, -+ argv: Sequence[str], -+ modifiers: InputModifiers, -+) -> None: -+ """ -+ Options: -+ * --skip-offline - skip unreachable nodes (including missing auth token) -+ * --request-timeout - HTTP timeout for node authorization check -+ """ -+ modifiers.ensure_only_supported("--skip-offline", "--request-timeout") -+ if argv: -+ raise CmdLineInputError() -+ force_flags = [] -+ if modifiers.get("--skip-offline"): -+ force_flags.append(report_codes.SKIP_OFFLINE_NODES) -+ lib.dr.destroy(force_flags=force_flags) -diff --git a/pcs/cli/routing/dr.py b/pcs/cli/routing/dr.py -new file mode 100644 -index 00000000..dbf44c1c ---- /dev/null -+++ b/pcs/cli/routing/dr.py -@@ -0,0 +1,15 @@ -+from pcs import usage -+from pcs.cli import dr -+from pcs.cli.common.routing import create_router -+ -+dr_cmd = create_router( -+ { -+ "help": lambda lib, argv, modifiers: usage.dr(argv), -+ "config": dr.config, -+ "destroy": dr.destroy, -+ "set-recovery-site": dr.set_recovery_site, -+ "status": dr.status, -+ }, -+ ["dr"], -+ default_cmd="help", -+) -diff --git a/pcs/cluster.py b/pcs/cluster.py -index 3a931b60..9473675f 100644 ---- a/pcs/cluster.py -+++ b/pcs/cluster.py -@@ -1209,6 +1209,7 @@ def cluster_destroy(lib, argv, modifiers): - settings.corosync_conf_file, - settings.corosync_authkey_file, - settings.pacemaker_authkey_file, -+ settings.pcsd_dr_config_location, - ]) - state_files = [ - "cib-*", -diff --git a/pcs/common/dr.py b/pcs/common/dr.py -new file mode 100644 -index 00000000..1648d93d ---- /dev/null -+++ b/pcs/common/dr.py -@@ -0,0 +1,109 @@ -+from enum import auto -+from typing import ( -+ Any, -+ Iterable, -+ Mapping, -+) -+ -+from pcs.common.interface.dto import DataTransferObject -+from pcs.common.tools import AutoNameEnum -+ -+ -+class DrRole(AutoNameEnum): -+ PRIMARY = auto() -+ RECOVERY = auto() -+ -+ -+class DrConfigNodeDto(DataTransferObject): -+ def __init__(self, name: str): -+ self.name = name -+ -+ def to_dict(self) -> Mapping[str, Any]: -+ return dict(name=self.name) -+ -+ @classmethod -+ def from_dict(cls, payload: Mapping[str, Any]) -> "DrConfigNodeDto": -+ return cls(payload["name"]) -+ -+ -+class DrConfigSiteDto(DataTransferObject): -+ def __init__( -+ self, -+ site_role: DrRole, -+ node_list: Iterable[DrConfigNodeDto] -+ ): -+ self.site_role = site_role -+ self.node_list = node_list -+ -+ def to_dict(self) -> Mapping[str, Any]: -+ return dict( -+ site_role=self.site_role.value, -+ node_list=[node.to_dict() for node in self.node_list] -+ ) -+ -+ @classmethod -+ def from_dict(cls, payload: Mapping[str, Any]) -> "DrConfigSiteDto": -+ return cls( -+ DrRole(payload["site_role"]), -+ [ -+ DrConfigNodeDto.from_dict(payload_node) -+ for payload_node in payload["node_list"] -+ ], -+ ) -+ -+ -+class DrConfigDto(DataTransferObject): -+ def __init__( -+ self, -+ local_site: DrConfigSiteDto, -+ remote_site_list: Iterable[DrConfigSiteDto] -+ ): -+ self.local_site = local_site -+ self.remote_site_list = remote_site_list -+ -+ def to_dict(self) -> Mapping[str, Any]: -+ return dict( -+ local_site=self.local_site.to_dict(), -+ remote_site_list=[site.to_dict() for site in self.remote_site_list], -+ ) -+ -+ @classmethod -+ def from_dict(cls, payload: Mapping[str, Any]) -> "DrConfigDto": -+ return cls( -+ DrConfigSiteDto.from_dict(payload["local_site"]), -+ [ -+ DrConfigSiteDto.from_dict(payload_site) -+ for payload_site in payload["remote_site_list"] -+ ], -+ ) -+ -+ -+class DrSiteStatusDto(DataTransferObject): -+ def __init__( -+ self, -+ local_site: bool, -+ site_role: DrRole, -+ status_plaintext: str, -+ status_successfully_obtained: bool -+ ): -+ self.local_site = local_site -+ self.site_role = site_role -+ self.status_plaintext = status_plaintext -+ self.status_successfully_obtained = status_successfully_obtained -+ -+ def to_dict(self) -> Mapping[str, Any]: -+ return dict( -+ local_site=self.local_site, -+ site_role=self.site_role.value, -+ status_plaintext=self.status_plaintext, -+ status_successfully_obtained=self.status_successfully_obtained, -+ ) -+ -+ @classmethod -+ def from_dict(cls, payload: Mapping[str, Any]) -> "DrSiteStatusDto": -+ return cls( -+ payload["local_site"], -+ DrRole(payload["site_role"]), -+ payload["status_plaintext"], -+ payload["status_successfully_obtained"], -+ ) -diff --git a/pcs/common/file_type_codes.py b/pcs/common/file_type_codes.py -index 9c801180..967aa76b 100644 ---- a/pcs/common/file_type_codes.py -+++ b/pcs/common/file_type_codes.py -@@ -1,11 +1,16 @@ --BOOTH_CONFIG = "BOOTH_CONFIG" --BOOTH_KEY = "BOOTH_KEY" --CIB = "CIB" --COROSYNC_AUTHKEY = "COROSYNC_AUTHKEY" --COROSYNC_CONF = "COROSYNC_CONF" --PACEMAKER_AUTHKEY = "PACEMAKER_AUTHKEY" --PCSD_ENVIRONMENT_CONFIG = "PCSD_ENVIRONMENT_CONFIG" --PCSD_SSL_CERT = "PCSD_SSL_CERT" --PCSD_SSL_KEY = "PCSD_SSL_KEY" --PCS_KNOWN_HOSTS = "PCS_KNOWN_HOSTS" --PCS_SETTINGS_CONF = "PCS_SETTINGS_CONF" -+from typing import NewType -+ -+FileTypeCode = NewType("FileTypeCode", str) -+ -+BOOTH_CONFIG = FileTypeCode("BOOTH_CONFIG") -+BOOTH_KEY = FileTypeCode("BOOTH_KEY") -+CIB = FileTypeCode("CIB") -+COROSYNC_AUTHKEY = FileTypeCode("COROSYNC_AUTHKEY") -+COROSYNC_CONF = FileTypeCode("COROSYNC_CONF") -+PACEMAKER_AUTHKEY = FileTypeCode("PACEMAKER_AUTHKEY") -+PCSD_ENVIRONMENT_CONFIG = FileTypeCode("PCSD_ENVIRONMENT_CONFIG") -+PCSD_SSL_CERT = FileTypeCode("PCSD_SSL_CERT") -+PCSD_SSL_KEY = FileTypeCode("PCSD_SSL_KEY") -+PCS_KNOWN_HOSTS = FileTypeCode("PCS_KNOWN_HOSTS") -+PCS_SETTINGS_CONF = FileTypeCode("PCS_SETTINGS_CONF") -+PCS_DR_CONFIG = FileTypeCode("PCS_DR_CONFIG") -diff --git a/pcs/common/report_codes.py b/pcs/common/report_codes.py -index 4e3433a8..514ac079 100644 ---- a/pcs/common/report_codes.py -+++ b/pcs/common/report_codes.py -@@ -141,6 +141,8 @@ COROSYNC_TRANSPORT_UNSUPPORTED_OPTIONS = "COROSYNC_TRANSPORT_UNSUPPORTED_OPTIONS - CRM_MON_ERROR = "CRM_MON_ERROR" - DEFAULTS_CAN_BE_OVERRIDEN = "DEFAULTS_CAN_BE_OVERRIDEN" - DEPRECATED_OPTION = "DEPRECATED_OPTION" -+DR_CONFIG_ALREADY_EXIST = "DR_CONFIG_ALREADY_EXIST" -+DR_CONFIG_DOES_NOT_EXIST = "DR_CONFIG_DOES_NOT_EXIST" - DUPLICATE_CONSTRAINTS_EXIST = "DUPLICATE_CONSTRAINTS_EXIST" - EMPTY_RESOURCE_SET_LIST = "EMPTY_RESOURCE_SET_LIST" - EMPTY_ID = "EMPTY_ID" -@@ -203,6 +205,7 @@ NONE_HOST_FOUND = "NONE_HOST_FOUND" - NODE_USED_AS_TIE_BREAKER = "NODE_USED_AS_TIE_BREAKER" - NODES_TO_REMOVE_UNREACHABLE = "NODES_TO_REMOVE_UNREACHABLE" - NODE_TO_CLEAR_IS_STILL_IN_CLUSTER = "NODE_TO_CLEAR_IS_STILL_IN_CLUSTER" -+NODE_IN_LOCAL_CLUSTER = "NODE_IN_LOCAL_CLUSTER" - OMITTING_NODE = "OMITTING_NODE" - OBJECT_WITH_ID_IN_UNEXPECTED_CONTEXT = "OBJECT_WITH_ID_IN_UNEXPECTED_CONTEXT" - PACEMAKER_LOCAL_NODE_NAME_NOT_FOUND = "PACEMAKER_LOCAL_NODE_NAME_NOT_FOUND" -diff --git a/pcs/lib/commands/cluster.py b/pcs/lib/commands/cluster.py -index 64015864..f30dcb25 100644 ---- a/pcs/lib/commands/cluster.py -+++ b/pcs/lib/commands/cluster.py -@@ -777,7 +777,7 @@ def add_nodes( - skip_wrong_config=force, - ) - -- # distribute corosync and pacemaker authkeys -+ # distribute corosync and pacemaker authkeys and other config files - files_action = {} - forceable_io_error_creator = reports.get_problem_creator( - report_codes.SKIP_FILE_DISTRIBUTION_ERRORS, force -@@ -814,6 +814,22 @@ def add_nodes( - file_path=settings.pacemaker_authkey_file, - )) - -+ if os.path.isfile(settings.pcsd_dr_config_location): -+ try: -+ files_action.update( -+ node_communication_format.pcs_dr_config_file( -+ open(settings.pcsd_dr_config_location, "rb").read() -+ ) -+ ) -+ except EnvironmentError as e: -+ report_processor.report(forceable_io_error_creator( -+ reports.file_io_error, -+ file_type_codes.PCS_DR_CONFIG, -+ RawFileError.ACTION_READ, -+ format_environment_error(e), -+ file_path=settings.pcsd_dr_config_location, -+ )) -+ - # pcs_settings.conf was previously synced using pcsdcli send_local_configs. - # This has been changed temporarily until new system for distribution and - # syncronization of configs will be introduced. -diff --git a/pcs/lib/commands/dr.py b/pcs/lib/commands/dr.py -new file mode 100644 -index 00000000..41ddb5cb ---- /dev/null -+++ b/pcs/lib/commands/dr.py -@@ -0,0 +1,316 @@ -+from typing import ( -+ Any, -+ Container, -+ Iterable, -+ List, -+ Mapping, -+ Tuple, -+) -+ -+from pcs.common import file_type_codes, report_codes -+from pcs.common.dr import ( -+ DrConfigDto, -+ DrConfigNodeDto, -+ DrConfigSiteDto, -+ DrSiteStatusDto, -+) -+from pcs.common.file import RawFileError -+from pcs.common.node_communicator import RequestTarget -+from pcs.common.reports import SimpleReportProcessor -+ -+from pcs.lib import node_communication_format, reports -+from pcs.lib.communication.corosync import GetCorosyncConf -+from pcs.lib.communication.nodes import ( -+ DistributeFilesWithoutForces, -+ RemoveFilesWithoutForces, -+) -+from pcs.lib.communication.status import GetFullClusterStatusPlaintext -+from pcs.lib.communication.tools import ( -+ run as run_com_cmd, -+ run_and_raise, -+) -+from pcs.lib.corosync.config_facade import ConfigFacade as CorosyncConfigFacade -+from pcs.lib.dr.config.facade import ( -+ DrRole, -+ Facade as DrConfigFacade, -+) -+from pcs.lib.env import LibraryEnvironment -+from pcs.lib.errors import LibraryError, ReportItemList -+from pcs.lib.file.instance import FileInstance -+from pcs.lib.file.raw_file import raw_file_error_report -+from pcs.lib.file.toolbox import for_file_type as get_file_toolbox -+from pcs.lib.interface.config import ParserErrorException -+from pcs.lib.node import get_existing_nodes_names -+ -+ -+def get_config(env: LibraryEnvironment) -> Mapping[str, Any]: -+ """ -+ Return local disaster recovery config -+ -+ env -- LibraryEnvironment -+ """ -+ report_processor = SimpleReportProcessor(env.report_processor) -+ report_list, dr_config = _load_dr_config(env.get_dr_env().config) -+ report_processor.report_list(report_list) -+ if report_processor.has_errors: -+ raise LibraryError() -+ -+ return DrConfigDto( -+ DrConfigSiteDto( -+ dr_config.local_role, -+ [] -+ ), -+ [ -+ DrConfigSiteDto( -+ site.role, -+ [DrConfigNodeDto(name) for name in site.node_name_list] -+ ) -+ for site in dr_config.get_remote_site_list() -+ ] -+ ).to_dict() -+ -+ -+def set_recovery_site(env: LibraryEnvironment, node_name: str) -> None: -+ """ -+ Set up disaster recovery with the local cluster being the primary site -+ -+ env -+ node_name -- a known host from the recovery site -+ """ -+ if env.ghost_file_codes: -+ raise LibraryError( -+ reports.live_environment_required(env.ghost_file_codes) -+ ) -+ report_processor = SimpleReportProcessor(env.report_processor) -+ dr_env = env.get_dr_env() -+ if dr_env.config.raw_file.exists(): -+ report_processor.report(reports.dr_config_already_exist()) -+ target_factory = env.get_node_target_factory() -+ -+ local_nodes, report_list = get_existing_nodes_names( -+ env.get_corosync_conf(), -+ error_on_missing_name=True -+ ) -+ report_processor.report_list(report_list) -+ -+ if node_name in local_nodes: -+ report_processor.report(reports.node_in_local_cluster(node_name)) -+ -+ report_list, local_targets = target_factory.get_target_list_with_reports( -+ local_nodes, allow_skip=False, report_none_host_found=False -+ ) -+ report_processor.report_list(report_list) -+ -+ report_list, remote_targets = ( -+ target_factory.get_target_list_with_reports( -+ [node_name], allow_skip=False, report_none_host_found=False -+ ) -+ ) -+ report_processor.report_list(report_list) -+ -+ if report_processor.has_errors: -+ raise LibraryError() -+ -+ com_cmd = GetCorosyncConf(env.report_processor) -+ com_cmd.set_targets(remote_targets) -+ remote_cluster_nodes, report_list = get_existing_nodes_names( -+ CorosyncConfigFacade.from_string( -+ run_and_raise(env.get_node_communicator(), com_cmd) -+ ), -+ error_on_missing_name=True -+ ) -+ if report_processor.report_list(report_list): -+ raise LibraryError() -+ -+ # ensure we have tokens for all nodes of remote cluster -+ report_list, remote_targets = target_factory.get_target_list_with_reports( -+ remote_cluster_nodes, allow_skip=False, report_none_host_found=False -+ ) -+ if report_processor.report_list(report_list): -+ raise LibraryError() -+ dr_config_exporter = ( -+ get_file_toolbox(file_type_codes.PCS_DR_CONFIG).exporter -+ ) -+ # create dr config for remote cluster -+ remote_dr_cfg = dr_env.create_facade(DrRole.RECOVERY) -+ remote_dr_cfg.add_site(DrRole.PRIMARY, local_nodes) -+ # send config to all node of remote cluster -+ distribute_file_cmd = DistributeFilesWithoutForces( -+ env.report_processor, -+ node_communication_format.pcs_dr_config_file( -+ dr_config_exporter.export(remote_dr_cfg.config) -+ ) -+ ) -+ distribute_file_cmd.set_targets(remote_targets) -+ run_and_raise(env.get_node_communicator(), distribute_file_cmd) -+ # create new dr config, with local cluster as primary site -+ local_dr_cfg = dr_env.create_facade(DrRole.PRIMARY) -+ local_dr_cfg.add_site(DrRole.RECOVERY, remote_cluster_nodes) -+ distribute_file_cmd = DistributeFilesWithoutForces( -+ env.report_processor, -+ node_communication_format.pcs_dr_config_file( -+ dr_config_exporter.export(local_dr_cfg.config) -+ ) -+ ) -+ distribute_file_cmd.set_targets(local_targets) -+ run_and_raise(env.get_node_communicator(), distribute_file_cmd) -+ # Note: No token sync across multiple clusters. Most probably they are in -+ # different subnetworks. -+ -+ -+def status_all_sites_plaintext( -+ env: LibraryEnvironment, -+ hide_inactive_resources: bool = False, -+ verbose: bool = False, -+) -> List[Mapping[str, Any]]: -+ """ -+ Return local site's and all remote sites' status as plaintext -+ -+ env -- LibraryEnvironment -+ hide_inactive_resources -- if True, do not display non-running resources -+ verbose -- if True, display more info -+ """ -+ # The command does not provide an option to skip offline / unreacheable / -+ # misbehaving nodes. -+ # The point of such skipping is to stop a command if it is unable to make -+ # changes on all nodes. The user can then decide to proceed anyway and -+ # make changes on the skipped nodes later manually. -+ # This command only reads from nodes so it automatically asks other nodes -+ # if one is offline / misbehaving. -+ class SiteData(): -+ local: bool -+ role: DrRole -+ target_list: Iterable[RequestTarget] -+ status_loaded: bool -+ status_plaintext: str -+ -+ def __init__(self, local, role, target_list): -+ self.local = local -+ self.role = role -+ self.target_list = target_list -+ self.status_loaded = False -+ self.status_plaintext = "" -+ -+ -+ if env.ghost_file_codes: -+ raise LibraryError( -+ reports.live_environment_required(env.ghost_file_codes) -+ ) -+ -+ report_processor = SimpleReportProcessor(env.report_processor) -+ report_list, dr_config = _load_dr_config(env.get_dr_env().config) -+ report_processor.report_list(report_list) -+ if report_processor.has_errors: -+ raise LibraryError() -+ -+ site_data_list = [] -+ target_factory = env.get_node_target_factory() -+ -+ # get local nodes -+ local_nodes, report_list = get_existing_nodes_names(env.get_corosync_conf()) -+ report_processor.report_list(report_list) -+ report_list, local_targets = target_factory.get_target_list_with_reports( -+ local_nodes, -+ skip_non_existing=True, -+ ) -+ report_processor.report_list(report_list) -+ site_data_list.append(SiteData(True, dr_config.local_role, local_targets)) -+ -+ # get remote sites' nodes -+ for conf_remote_site in dr_config.get_remote_site_list(): -+ report_list, remote_targets = ( -+ target_factory.get_target_list_with_reports( -+ conf_remote_site.node_name_list, -+ skip_non_existing=True, -+ ) -+ ) -+ report_processor.report_list(report_list) -+ site_data_list.append( -+ SiteData(False, conf_remote_site.role, remote_targets) -+ ) -+ if report_processor.has_errors: -+ raise LibraryError() -+ -+ # get all statuses -+ for site_data in site_data_list: -+ com_cmd = GetFullClusterStatusPlaintext( -+ report_processor, -+ hide_inactive_resources=hide_inactive_resources, -+ verbose=verbose, -+ ) -+ com_cmd.set_targets(site_data.target_list) -+ site_data.status_loaded, site_data.status_plaintext = run_com_cmd( -+ env.get_node_communicator(), com_cmd -+ ) -+ -+ return [ -+ DrSiteStatusDto( -+ site_data.local, -+ site_data.role, -+ site_data.status_plaintext, -+ site_data.status_loaded, -+ ).to_dict() -+ for site_data in site_data_list -+ ] -+ -+def _load_dr_config( -+ config_file: FileInstance, -+) -> Tuple[ReportItemList, DrConfigFacade]: -+ if not config_file.raw_file.exists(): -+ return [reports.dr_config_does_not_exist()], DrConfigFacade.empty() -+ try: -+ return [], config_file.read_to_facade() -+ except RawFileError as e: -+ return [raw_file_error_report(e)], DrConfigFacade.empty() -+ except ParserErrorException as e: -+ return ( -+ config_file.parser_exception_to_report_list(e), -+ DrConfigFacade.empty() -+ ) -+ -+ -+def destroy(env: LibraryEnvironment, force_flags: Container[str] = ()) -> None: -+ """ -+ Destroy disaster-recovery configuration on all sites -+ """ -+ if env.ghost_file_codes: -+ raise LibraryError( -+ reports.live_environment_required(env.ghost_file_codes) -+ ) -+ -+ report_processor = SimpleReportProcessor(env.report_processor) -+ skip_offline = report_codes.SKIP_OFFLINE_NODES in force_flags -+ -+ report_list, dr_config = _load_dr_config(env.get_dr_env().config) -+ report_processor.report_list(report_list) -+ -+ if report_processor.has_errors: -+ raise LibraryError() -+ -+ local_nodes, report_list = get_existing_nodes_names(env.get_corosync_conf()) -+ report_processor.report_list(report_list) -+ -+ if report_processor.has_errors: -+ raise LibraryError() -+ -+ remote_nodes: List[str] = [] -+ for conf_remote_site in dr_config.get_remote_site_list(): -+ remote_nodes.extend(conf_remote_site.node_name_list) -+ -+ target_factory = env.get_node_target_factory() -+ report_list, targets = target_factory.get_target_list_with_reports( -+ remote_nodes + local_nodes, skip_non_existing=skip_offline, -+ ) -+ report_processor.report_list(report_list) -+ if report_processor.has_errors: -+ raise LibraryError() -+ -+ com_cmd = RemoveFilesWithoutForces( -+ env.report_processor, { -+ "pcs disaster-recovery config": { -+ "type": "pcs_disaster_recovery_conf", -+ }, -+ }, -+ ) -+ com_cmd.set_targets(targets) -+ run_and_raise(env.get_node_communicator(), com_cmd) -diff --git a/pcs/lib/communication/corosync.py b/pcs/lib/communication/corosync.py -index 0f3c3787..1a78e0de 100644 ---- a/pcs/lib/communication/corosync.py -+++ b/pcs/lib/communication/corosync.py -@@ -138,3 +138,31 @@ class ReloadCorosyncConf( - def on_complete(self): - if not self.__was_successful and self.__has_failures: - self._report(reports.unable_to_perform_operation_on_any_node()) -+ -+ -+class GetCorosyncConf( -+ AllSameDataMixin, OneByOneStrategyMixin, RunRemotelyBase -+): -+ __was_successful = False -+ __has_failures = False -+ __corosync_conf = None -+ -+ def _get_request_data(self): -+ return RequestData("remote/get_corosync_conf") -+ -+ def _process_response(self, response): -+ report = response_to_report_item( -+ response, severity=ReportItemSeverity.WARNING -+ ) -+ if report is not None: -+ self.__has_failures = True -+ self._report(report) -+ return self._get_next_list() -+ self.__corosync_conf = response.data -+ self.__was_successful = True -+ return [] -+ -+ def on_complete(self): -+ if not self.__was_successful and self.__has_failures: -+ self._report(reports.unable_to_perform_operation_on_any_node()) -+ return self.__corosync_conf -diff --git a/pcs/lib/communication/status.py b/pcs/lib/communication/status.py -new file mode 100644 -index 00000000..3470415a ---- /dev/null -+++ b/pcs/lib/communication/status.py -@@ -0,0 +1,97 @@ -+import json -+from typing import Tuple -+ -+from pcs.common.node_communicator import RequestData -+from pcs.lib import reports -+from pcs.lib.communication.tools import ( -+ AllSameDataMixin, -+ OneByOneStrategyMixin, -+ RunRemotelyBase, -+) -+from pcs.lib.errors import ReportItemSeverity -+from pcs.lib.node_communication import response_to_report_item -+ -+ -+class GetFullClusterStatusPlaintext( -+ AllSameDataMixin, OneByOneStrategyMixin, RunRemotelyBase -+): -+ def __init__( -+ self, report_processor, hide_inactive_resources=False, verbose=False -+ ): -+ super().__init__(report_processor) -+ self._hide_inactive_resources = hide_inactive_resources -+ self._verbose = verbose -+ self._cluster_status = "" -+ self._was_successful = False -+ -+ def _get_request_data(self): -+ return RequestData( -+ "remote/cluster_status_plaintext", -+ [ -+ ( -+ "data_json", -+ json.dumps(dict( -+ hide_inactive_resources=self._hide_inactive_resources, -+ verbose=self._verbose, -+ )) -+ ) -+ ], -+ ) -+ -+ def _process_response(self, response): -+ report = response_to_report_item( -+ response, severity=ReportItemSeverity.WARNING -+ ) -+ if report is not None: -+ self._report(report) -+ return self._get_next_list() -+ -+ node = response.request.target.label -+ try: -+ output = json.loads(response.data) -+ if output["status"] == "success": -+ self._was_successful = True -+ self._cluster_status = output["data"] -+ return [] -+ if output["status_msg"]: -+ self._report( -+ reports.node_communication_command_unsuccessful( -+ node, -+ response.request.action, -+ output["status_msg"] -+ ) -+ ) -+ # TODO Node name should be added to each received report item and -+ # those modified report itemss should be reported. That, however, -+ # requires reports overhaul which would add posibility to add a -+ # node name to any report item. Also, infos and warnings should not -+ # be ignored. -+ if output["report_list"]: -+ for report_data in output["report_list"]: -+ if ( -+ report_data["severity"] == ReportItemSeverity.ERROR -+ and -+ report_data["report_text"] -+ ): -+ self._report( -+ reports.node_communication_command_unsuccessful( -+ node, -+ response.request.action, -+ report_data["report_text"] -+ ) -+ ) -+ except (ValueError, LookupError, TypeError): -+ self._report(reports.invalid_response_format( -+ node, -+ severity=ReportItemSeverity.WARNING, -+ )) -+ -+ return self._get_next_list() -+ -+ def on_complete(self) -> Tuple[bool, str]: -+ # Usually, reports.unable_to_perform_operation_on_any_node is reported -+ # when the operation was unsuccessful and failed on at least one node. -+ # The only use case this communication command is used does not need -+ # that report and on top of that the report causes confusing ouptut for -+ # the user. The report may be added in a future if needed. -+ return self._was_successful, self._cluster_status -diff --git a/pcs/lib/dr/__init__.py b/pcs/lib/dr/__init__.py -new file mode 100644 -index 00000000..e69de29b -diff --git a/pcs/lib/dr/config/__init__.py b/pcs/lib/dr/config/__init__.py -new file mode 100644 -index 00000000..e69de29b -diff --git a/pcs/lib/dr/config/facade.py b/pcs/lib/dr/config/facade.py -new file mode 100644 -index 00000000..f3187ba5 ---- /dev/null -+++ b/pcs/lib/dr/config/facade.py -@@ -0,0 +1,49 @@ -+from typing import ( -+ Iterable, -+ List, -+ NamedTuple, -+) -+ -+from pcs.common.dr import DrRole -+from pcs.lib.interface.config import FacadeInterface -+ -+ -+class DrSite(NamedTuple): -+ role: DrRole -+ node_name_list: List[str] -+ -+ -+class Facade(FacadeInterface): -+ @classmethod -+ def create(cls, local_role: DrRole) -> "Facade": -+ return cls(dict( -+ local=dict( -+ role=local_role.value, -+ ), -+ remote_sites=[], -+ )) -+ -+ @classmethod -+ def empty(cls) -> "Facade": -+ return cls(dict()) -+ -+ @property -+ def local_role(self) -> DrRole: -+ return DrRole(self._config["local"]["role"]) -+ -+ def add_site(self, role: DrRole, node_list: Iterable[str]) -> None: -+ self._config["remote_sites"].append( -+ dict( -+ role=role.value, -+ nodes=[dict(name=node) for node in node_list], -+ ) -+ ) -+ -+ def get_remote_site_list(self) -> List[DrSite]: -+ return [ -+ DrSite( -+ DrRole(conf_site["role"]), -+ [node["name"] for node in conf_site["nodes"]] -+ ) -+ for conf_site in self._config.get("remote_sites", []) -+ ] -diff --git a/pcs/lib/dr/env.py b/pcs/lib/dr/env.py -new file mode 100644 -index 00000000..c73ee622 ---- /dev/null -+++ b/pcs/lib/dr/env.py -@@ -0,0 +1,28 @@ -+from pcs.common import file_type_codes -+ -+from pcs.lib.file.instance import FileInstance -+from pcs.lib.file.toolbox import ( -+ for_file_type as get_file_toolbox, -+ FileToolbox, -+) -+ -+from .config.facade import ( -+ DrRole, -+ Facade, -+) -+ -+class DrEnv: -+ def __init__(self): -+ self._config_file = FileInstance.for_dr_config() -+ -+ @staticmethod -+ def create_facade(role: DrRole) -> Facade: -+ return Facade.create(role) -+ -+ @property -+ def config(self) -> FileInstance: -+ return self._config_file -+ -+ @staticmethod -+ def get_config_toolbox() -> FileToolbox: -+ return get_file_toolbox(file_type_codes.PCS_DR_CONFIG) -diff --git a/pcs/lib/env.py b/pcs/lib/env.py -index 66f7b1a4..0b12103e 100644 ---- a/pcs/lib/env.py -+++ b/pcs/lib/env.py -@@ -3,11 +3,13 @@ from typing import ( - ) - from xml.etree.ElementTree import Element - -+from pcs.common import file_type_codes - from pcs.common.node_communicator import Communicator, NodeCommunicatorFactory - from pcs.common.tools import Version - from pcs.lib import reports - from pcs.lib.booth.env import BoothEnv - from pcs.lib.cib.tools import get_cib_crm_feature_set -+from pcs.lib.dr.env import DrEnv - from pcs.lib.node import get_existing_nodes_names - from pcs.lib.communication import qdevice - from pcs.lib.communication.corosync import ( -@@ -89,6 +91,7 @@ class LibraryEnvironment: - self._request_timeout - ) - self.__loaded_booth_env = None -+ self.__loaded_dr_env = None - - self.__timeout_cache = {} - -@@ -108,6 +111,15 @@ class LibraryEnvironment: - def user_groups(self): - return self._user_groups - -+ @property -+ def ghost_file_codes(self): -+ codes = set() -+ if not self.is_cib_live: -+ codes.add(file_type_codes.CIB) -+ if not self.is_corosync_conf_live: -+ codes.add(file_type_codes.COROSYNC_CONF) -+ return codes -+ - def get_cib(self, minimal_version: Optional[Version] = None) -> Element: - if self.__loaded_cib_diff_source is not None: - raise AssertionError("CIB has already been loaded") -@@ -412,3 +424,8 @@ class LibraryEnvironment: - if self.__loaded_booth_env is None: - self.__loaded_booth_env = BoothEnv(name, self._booth_files_data) - return self.__loaded_booth_env -+ -+ def get_dr_env(self) -> DrEnv: -+ if self.__loaded_dr_env is None: -+ self.__loaded_dr_env = DrEnv() -+ return self.__loaded_dr_env -diff --git a/pcs/lib/file/instance.py b/pcs/lib/file/instance.py -index da6b760c..f0812c2d 100644 ---- a/pcs/lib/file/instance.py -+++ b/pcs/lib/file/instance.py -@@ -51,18 +51,27 @@ class FileInstance(): - """ - Factory for known-hosts file - """ -- file_type_code = file_type_codes.PCS_KNOWN_HOSTS -- return cls( -- raw_file.RealFile(metadata.for_file_type(file_type_code)), -- toolbox.for_file_type(file_type_code) -- ) -+ return cls._for_common(file_type_codes.PCS_KNOWN_HOSTS) - - @classmethod - def for_pacemaker_key(cls): - """ - Factory for pacemaker key file - """ -- file_type_code = file_type_codes.PACEMAKER_AUTHKEY -+ return cls._for_common(file_type_codes.PACEMAKER_AUTHKEY) -+ -+ @classmethod -+ def for_dr_config(cls) -> "FileInstance": -+ """ -+ Factory for disaster-recovery config file -+ """ -+ return cls._for_common(file_type_codes.PCS_DR_CONFIG) -+ -+ @classmethod -+ def _for_common( -+ cls, -+ file_type_code: file_type_codes.FileTypeCode, -+ ) -> "FileInstance": - return cls( - raw_file.RealFile(metadata.for_file_type(file_type_code)), - toolbox.for_file_type(file_type_code) -diff --git a/pcs/lib/file/metadata.py b/pcs/lib/file/metadata.py -index 175e5ac1..72701aed 100644 ---- a/pcs/lib/file/metadata.py -+++ b/pcs/lib/file/metadata.py -@@ -50,6 +50,14 @@ _metadata = { - permissions=0o600, - is_binary=False, - ), -+ code.PCS_DR_CONFIG: lambda: FileMetadata( -+ file_type_code=code.PCS_DR_CONFIG, -+ path=settings.pcsd_dr_config_location, -+ owner_user_name="root", -+ owner_group_name="root", -+ permissions=0o600, -+ is_binary=False, -+ ) - } - - def for_file_type(file_type_code, *args, **kwargs): -diff --git a/pcs/lib/file/toolbox.py b/pcs/lib/file/toolbox.py -index 5d827887..db852617 100644 ---- a/pcs/lib/file/toolbox.py -+++ b/pcs/lib/file/toolbox.py -@@ -1,4 +1,9 @@ --from collections import namedtuple -+from typing import ( -+ Any, -+ Dict, -+ NamedTuple, -+ Type, -+) - import json - - from pcs.common import file_type_codes as code -@@ -8,6 +13,8 @@ from pcs.lib.booth.config_parser import ( - Exporter as BoothConfigExporter, - Parser as BoothConfigParser, - ) -+from pcs.lib.dr.config.facade import Facade as DrConfigFacade -+from pcs.lib.errors import ReportItemList - from pcs.lib.interface.config import ( - ExporterInterface, - FacadeInterface, -@@ -16,27 +23,23 @@ from pcs.lib.interface.config import ( - ) - - --FileToolbox = namedtuple( -- "FileToolbox", -- [ -- # File type code the toolbox belongs to -- "file_type_code", -- # Provides an easy access for reading and modifying data -- "facade", -- # Turns raw data into a structure which the facade is able to process -- "parser", -- # Turns a structure produced by the parser and the facade to raw data -- "exporter", -- # Checks that the structure is valid -- "validator", -- # Provides means for file syncing based on the file's version -- "version_controller", -- ] --) -+class FileToolbox(NamedTuple): -+ # File type code the toolbox belongs to -+ file_type_code: code.FileTypeCode -+ # Provides an easy access for reading and modifying data -+ facade: Type[FacadeInterface] -+ # Turns raw data into a structure which the facade is able to process -+ parser: Type[ParserInterface] -+ # Turns a structure produced by the parser and the facade to raw data -+ exporter: Type[ExporterInterface] -+ # Checks that the structure is valid -+ validator: None # TBI -+ # Provides means for file syncing based on the file's version -+ version_controller: None # TBI - - - class JsonParserException(ParserErrorException): -- def __init__(self, json_exception): -+ def __init__(self, json_exception: json.JSONDecodeError): - super().__init__() - self.json_exception = json_exception - -@@ -45,7 +48,7 @@ class JsonParser(ParserInterface): - Adapts standard json parser to our interfaces - """ - @staticmethod -- def parse(raw_file_data): -+ def parse(raw_file_data: bytes) -> Dict[str, Any]: - try: - # json.loads handles bytes, it expects utf-8, 16 or 32 encoding - return json.loads(raw_file_data) -@@ -54,8 +57,12 @@ class JsonParser(ParserInterface): - - @staticmethod - def exception_to_report_list( -- exception, file_type_code, file_path, force_code, is_forced_or_warning -- ): -+ exception: JsonParserException, -+ file_type_code: code.FileTypeCode, -+ file_path: str, -+ force_code: str, # TODO: fix -+ is_forced_or_warning: bool -+ ) -> ReportItemList: - report_creator = reports.get_problem_creator( - force_code=force_code, is_forced=is_forced_or_warning - ) -@@ -80,7 +87,7 @@ class JsonExporter(ExporterInterface): - Adapts standard json exporter to our interfaces - """ - @staticmethod -- def export(config_structure): -+ def export(config_structure: Dict[str, Any])-> bytes: - return json.dumps( - config_structure, indent=4, sort_keys=True, - ).encode("utf-8") -@@ -88,23 +95,27 @@ class JsonExporter(ExporterInterface): - - class NoopParser(ParserInterface): - @staticmethod -- def parse(raw_file_data): -+ def parse(raw_file_data: bytes) -> bytes: - return raw_file_data - - @staticmethod - def exception_to_report_list( -- exception, file_type_code, file_path, force_code, is_forced_or_warning -- ): -+ exception: ParserErrorException, -+ file_type_code: code.FileTypeCode, -+ file_path: str, -+ force_code: str, # TODO: fix -+ is_forced_or_warning: bool -+ ) -> ReportItemList: - return [] - - class NoopExporter(ExporterInterface): - @staticmethod -- def export(config_structure): -+ def export(config_structure: bytes) -> bytes: - return config_structure - - class NoopFacade(FacadeInterface): - @classmethod -- def create(cls): -+ def create(cls) -> "NoopFacade": - return cls(bytes()) - - -@@ -135,7 +146,16 @@ _toolboxes = { - ), - code.PCS_KNOWN_HOSTS: FileToolbox( - file_type_code=code.PCS_KNOWN_HOSTS, -- facade=None, # TODO needed for 'auth' and 'deauth' commands -+ # TODO needed for 'auth' and 'deauth' commands -+ facade=None, # type: ignore -+ parser=JsonParser, -+ exporter=JsonExporter, -+ validator=None, # TODO needed for files syncing -+ version_controller=None, # TODO needed for files syncing -+ ), -+ code.PCS_DR_CONFIG: FileToolbox( -+ file_type_code=code.PCS_DR_CONFIG, -+ facade=DrConfigFacade, - parser=JsonParser, - exporter=JsonExporter, - validator=None, # TODO needed for files syncing -@@ -143,5 +163,5 @@ _toolboxes = { - ), - } - --def for_file_type(file_type_code): -+def for_file_type(file_type_code: code.FileTypeCode) -> FileToolbox: - return _toolboxes[file_type_code] -diff --git a/pcs/lib/node.py b/pcs/lib/node.py -index 1930ffa8..09543c8e 100644 ---- a/pcs/lib/node.py -+++ b/pcs/lib/node.py -@@ -1,5 +1,6 @@ - from typing import ( - Iterable, -+ List, - Optional, - Tuple, - ) -@@ -18,7 +19,7 @@ def get_existing_nodes_names( - corosync_conf: Optional[CorosyncConfigFacade] = None, - cib: Optional[Element] = None, - error_on_missing_name: bool = False --) -> Tuple[Iterable[str], ReportItemList]: -+) -> Tuple[List[str], ReportItemList]: - return __get_nodes_names( - *__get_nodes(corosync_conf, cib), - error_on_missing_name -@@ -56,7 +57,7 @@ def __get_nodes_names( - corosync_nodes: Iterable[CorosyncNode], - remote_and_guest_nodes: Iterable[PacemakerNode], - error_on_missing_name: bool = False --) -> Tuple[Iterable[str], ReportItemList]: -+) -> Tuple[List[str], ReportItemList]: - report_list = [] - corosync_names = [] - name_missing_in_corosync = False -diff --git a/pcs/lib/node_communication_format.py b/pcs/lib/node_communication_format.py -index 6134c66d..1cef35b4 100644 ---- a/pcs/lib/node_communication_format.py -+++ b/pcs/lib/node_communication_format.py -@@ -1,5 +1,9 @@ - import base64 - from collections import namedtuple -+from typing import ( -+ Any, -+ Dict, -+) - - from pcs.lib import reports - from pcs.lib.errors import LibraryError -@@ -55,6 +59,18 @@ def corosync_conf_file(corosync_conf_content): - "corosync.conf": corosync_conf_format(corosync_conf_content) - } - -+def pcs_dr_config_format(dr_conf_content: bytes) -> Dict[str, Any]: -+ return { -+ "type": "pcs_disaster_recovery_conf", -+ "data": base64.b64encode(dr_conf_content).decode("utf-8"), -+ "rewrite_existing": True, -+ } -+ -+def pcs_dr_config_file(dr_conf_content: bytes) -> Dict[str, Any]: -+ return { -+ "disaster-recovery config": pcs_dr_config_format(dr_conf_content) -+ } -+ - def pcs_settings_conf_format(content): - return { - "data": content, -diff --git a/pcs/lib/reports.py b/pcs/lib/reports.py -index e83737b0..1f081007 100644 ---- a/pcs/lib/reports.py -+++ b/pcs/lib/reports.py -@@ -4221,3 +4221,34 @@ def resource_disable_affects_other_resources( - "crm_simulate_plaintext_output": crm_simulate_plaintext_output, - } - ) -+ -+ -+def dr_config_already_exist(): -+ """ -+ Disaster recovery config exists when the opposite was expected -+ """ -+ return ReportItem.error( -+ report_codes.DR_CONFIG_ALREADY_EXIST, -+ ) -+ -+def dr_config_does_not_exist(): -+ """ -+ Disaster recovery config does not exist when the opposite was expected -+ """ -+ return ReportItem.error( -+ report_codes.DR_CONFIG_DOES_NOT_EXIST, -+ ) -+ -+def node_in_local_cluster(node): -+ """ -+ Node is part of local cluster and it cannot be used for example to set up -+ disaster-recovery site -+ -+ node -- node which is part of local cluster -+ """ -+ return ReportItem.error( -+ report_codes.NODE_IN_LOCAL_CLUSTER, -+ info=dict( -+ node=node, -+ ), -+ ) -diff --git a/pcs/pcs.8 b/pcs/pcs.8 -index 5765c6b5..651fda83 100644 ---- a/pcs/pcs.8 -+++ b/pcs/pcs.8 -@@ -75,6 +75,9 @@ alert - .TP - client - Manage pcsd client configuration. -+.TP -+dr -+ Manage disaster recovery configuration. - .SS "resource" - .TP - [status [\fB\-\-hide\-inactive\fR]] -@@ -887,7 +890,7 @@ stop - Stop booth arbitrator service. - .SS "status" - .TP --[status] [\fB\-\-full\fR | \fB\-\-hide\-inactive\fR] -+[status] [\fB\-\-full\fR] [\fB\-\-hide\-inactive\fR] - View all information about the cluster and resources (\fB\-\-full\fR provides more details, \fB\-\-hide\-inactive\fR hides inactive resources). - .TP - resources [\fB\-\-hide\-inactive\fR] -@@ -1015,6 +1018,19 @@ Remove specified recipients. - .TP - local-auth [] [\-u ] [\-p ] - Authenticate current user to local pcsd. This is required to run some pcs commands which may require permissions of root user such as 'pcs cluster start'. -+.SS "dr" -+.TP -+config -+Display disaster-recovery configuration from the local node. -+.TP -+status [\fB\-\-full\fR] [\fB\-\-hide\-inactive\fR] -+Display status of the local and the remote site cluster (\fB\-\-full\fR provides more details, \fB\-\-hide\-inactive\fR hides inactive resources). -+.TP -+set\-recovery\-site -+Set up disaster\-recovery with the local cluster being the primary site. The recovery site is defined by a name of one of its nodes. -+.TP -+destroy -+Permanently destroy disaster-recovery configuration on all sites. - .SH EXAMPLES - .TP - Show all resources -diff --git a/pcs/pcs_internal.py b/pcs/pcs_internal.py -index fecdc8d5..d956d71e 100644 ---- a/pcs/pcs_internal.py -+++ b/pcs/pcs_internal.py -@@ -22,6 +22,7 @@ SUPPORTED_COMMANDS = { - "cluster.setup", - "cluster.add_nodes", - "cluster.remove_nodes", -+ "status.full_cluster_status_plaintext", - } - - -diff --git a/pcs/settings_default.py b/pcs/settings_default.py -index ab61b20b..6d8f33ac 100644 ---- a/pcs/settings_default.py -+++ b/pcs/settings_default.py -@@ -50,6 +50,7 @@ pcsd_users_conf_location = os.path.join(pcsd_var_location, "pcs_users.conf") - pcsd_settings_conf_location = os.path.join( - pcsd_var_location, "pcs_settings.conf" - ) -+pcsd_dr_config_location = os.path.join(pcsd_var_location, "disaster-recovery") - pcsd_exec_location = "/usr/lib/pcsd/" - pcsd_log_location = "/var/log/pcsd/pcsd.log" - pcsd_default_port = 2224 -diff --git a/pcs/usage.py b/pcs/usage.py -index 0b16289e..e4f5af32 100644 ---- a/pcs/usage.py -+++ b/pcs/usage.py -@@ -22,6 +22,7 @@ def full_usage(): - out += strip_extras(host([], False)) - out += strip_extras(alert([], False)) - out += strip_extras(client([], False)) -+ out += strip_extras(dr([], False)) - print(out.strip()) - print("Examples:\n" + examples.replace(r" \ ", "")) - -@@ -124,6 +125,7 @@ def generate_completion_tree_from_usage(): - tree["alert"] = generate_tree(alert([], False)) - tree["booth"] = generate_tree(booth([], False)) - tree["client"] = generate_tree(client([], False)) -+ tree["dr"] = generate_tree(dr([], False)) - return tree - - def generate_tree(usage_txt): -@@ -194,6 +196,7 @@ Commands: - node Manage cluster nodes. - alert Manage pacemaker alerts. - client Manage pcsd client configuration. -+ dr Manage disaster recovery configuration. - """ - # Advanced usage to possibly add later - # --corosync_conf= Specify alternative corosync.conf file -@@ -1517,7 +1520,7 @@ def status(args=(), pout=True): - Usage: pcs status [commands]... - View current cluster and resource status - Commands: -- [status] [--full | --hide-inactive] -+ [status] [--full] [--hide-inactive] - View all information about the cluster and resources (--full provides - more details, --hide-inactive hides inactive resources). - -@@ -2019,6 +2022,32 @@ Commands: - return output - - -+def dr(args=(), pout=True): -+ output = """ -+Usage: pcs dr -+Manage disaster recovery configuration. -+ -+Commands: -+ config -+ Display disaster-recovery configuration from the local node. -+ -+ status [--full] [--hide-inactive] -+ Display status of the local and the remote site cluster (--full -+ provides more details, --hide-inactive hides inactive resources). -+ -+ set-recovery-site -+ Set up disaster-recovery with the local cluster being the primary site. -+ The recovery site is defined by a name of one of its nodes. -+ -+ destroy -+ Permanently destroy disaster-recovery configuration on all sites. -+""" -+ if pout: -+ print(sub_usage(args, output)) -+ return None -+ return output -+ -+ - def show(main_usage_name, rest_usage_names): - usage_map = { - "acl": acl, -@@ -2028,6 +2057,7 @@ def show(main_usage_name, rest_usage_names): - "cluster": cluster, - "config": config, - "constraint": constraint, -+ "dr": dr, - "host": host, - "node": node, - "pcsd": pcsd, -diff --git a/pcs_test/tier0/cli/common/test_console_report.py b/pcs_test/tier0/cli/common/test_console_report.py -index 2deb896d..0d0c2457 100644 ---- a/pcs_test/tier0/cli/common/test_console_report.py -+++ b/pcs_test/tier0/cli/common/test_console_report.py -@@ -4489,3 +4489,27 @@ class ResourceDisableAffectsOtherResources(NameBuildTest): - "crm_simulate output", - ) - ) -+ -+ -+class DrConfigAlreadyExist(NameBuildTest): -+ def test_success(self): -+ self.assert_message_from_report( -+ "Disaster-recovery already configured", -+ reports.dr_config_already_exist() -+ ) -+ -+ -+class DrConfigDoesNotExist(NameBuildTest): -+ def test_success(self): -+ self.assert_message_from_report( -+ "Disaster-recovery is not configured", -+ reports.dr_config_does_not_exist() -+ ) -+ -+ -+class NodeInLocalCluster(NameBuildTest): -+ def test_success(self): -+ self.assert_message_from_report( -+ "Node 'node-name' is part of local cluster", -+ reports.node_in_local_cluster("node-name") -+ ) -diff --git a/pcs_test/tier0/cli/test_dr.py b/pcs_test/tier0/cli/test_dr.py -new file mode 100644 -index 00000000..4422cdc4 ---- /dev/null -+++ b/pcs_test/tier0/cli/test_dr.py -@@ -0,0 +1,293 @@ -+from textwrap import dedent -+from unittest import mock, TestCase -+ -+from pcs_test.tools.misc import dict_to_modifiers -+ -+from pcs.common import report_codes -+ -+from pcs.cli import dr -+from pcs.cli.common.errors import CmdLineInputError -+ -+ -+@mock.patch("pcs.cli.dr.print") -+class Config(TestCase): -+ def setUp(self): -+ self.lib = mock.Mock(spec_set=["dr"]) -+ self.lib.dr = mock.Mock(spec_set=["get_config"]) -+ -+ def _call_cmd(self, argv=None): -+ dr.config(self.lib, argv or [], dict_to_modifiers({})) -+ -+ def test_argv(self, mock_print): -+ with self.assertRaises(CmdLineInputError) as cm: -+ self._call_cmd(["x"]) -+ self.assertIsNone(cm.exception.message) -+ mock_print.assert_not_called() -+ -+ def test_success(self, mock_print): -+ self.lib.dr.get_config.return_value = { -+ "local_site": { -+ "node_list": [], -+ "site_role": "RECOVERY", -+ }, -+ "remote_site_list": [ -+ { -+ "node_list": [ -+ {"name": "nodeA2"}, -+ {"name": "nodeA1"}, -+ ], -+ "site_role": "PRIMARY", -+ }, -+ { -+ "node_list": [ -+ {"name": "nodeB1"}, -+ ], -+ "site_role": "RECOVERY", -+ } -+ ], -+ } -+ self._call_cmd([]) -+ self.lib.dr.get_config.assert_called_once_with() -+ mock_print.assert_called_once_with(dedent("""\ -+ Local site: -+ Role: Recovery -+ Remote site: -+ Role: Primary -+ Nodes: -+ nodeA1 -+ nodeA2 -+ Remote site: -+ Role: Recovery -+ Nodes: -+ nodeB1""")) -+ -+ @mock.patch("pcs.cli.common.console_report.sys.stderr.write") -+ def test_invalid_response(self, mock_stderr, mock_print): -+ self.lib.dr.get_config.return_value = [ -+ "wrong response", -+ {"x": "y"}, -+ ] -+ with self.assertRaises(SystemExit) as cm: -+ self._call_cmd([]) -+ self.assertEqual(cm.exception.code, 1) -+ self.lib.dr.get_config.assert_called_once_with() -+ mock_print.assert_not_called() -+ mock_stderr.assert_called_once_with( -+ "Error: Unable to communicate with pcsd, received response:\n" -+ "['wrong response', {'x': 'y'}]\n" -+ ) -+ -+ -+class SetRecoverySite(TestCase): -+ def setUp(self): -+ self.lib = mock.Mock(spec_set=["dr"]) -+ self.dr = mock.Mock(spec_set=["set_recovery_site"]) -+ self.lib.dr = self.dr -+ -+ def call_cmd(self, argv): -+ dr.set_recovery_site(self.lib, argv, dict_to_modifiers({})) -+ -+ def test_no_node(self): -+ with self.assertRaises(CmdLineInputError) as cm: -+ self.call_cmd([]) -+ self.assertIsNone(cm.exception.message) -+ -+ def test_multiple_nodes(self): -+ with self.assertRaises(CmdLineInputError) as cm: -+ self.call_cmd(["node1", "node2"]) -+ self.assertIsNone(cm.exception.message) -+ -+ def test_success(self): -+ node = "node" -+ self.call_cmd([node]) -+ self.dr.set_recovery_site.assert_called_once_with(node) -+ -+ -+@mock.patch("pcs.cli.dr.print") -+class Status(TestCase): -+ def setUp(self): -+ self.lib = mock.Mock(spec_set=["dr"]) -+ self.lib.dr = mock.Mock(spec_set=["status_all_sites_plaintext"]) -+ -+ def _call_cmd(self, argv, modifiers=None): -+ dr.status(self.lib, argv, dict_to_modifiers(modifiers or {})) -+ -+ def _fixture_response(self, local_success=True, remote_success=True): -+ self.lib.dr.status_all_sites_plaintext.return_value = [ -+ { -+ "local_site": True, -+ "site_role": "PRIMARY", -+ "status_plaintext": ( -+ "local cluster\nstatus" if local_success -+ else "this should never be displayed" -+ ), -+ "status_successfully_obtained": local_success, -+ }, -+ { -+ "local_site": False, -+ "site_role": "RECOVERY", -+ "status_plaintext": ( -+ "remote cluster\nstatus" if remote_success -+ else "this should never be displayed" -+ ), -+ "status_successfully_obtained": remote_success, -+ }, -+ ] -+ -+ @staticmethod -+ def _fixture_print(): -+ return dedent("""\ -+ --- Local cluster - Primary site --- -+ local cluster -+ status -+ -+ -+ --- Remote cluster - Recovery site --- -+ remote cluster -+ status""" -+ ) -+ -+ def test_argv(self, mock_print): -+ with self.assertRaises(CmdLineInputError) as cm: -+ self._call_cmd(["x"]) -+ self.assertIsNone(cm.exception.message) -+ mock_print.assert_not_called() -+ -+ def test_success(self, mock_print): -+ self._fixture_response() -+ self._call_cmd([]) -+ self.lib.dr.status_all_sites_plaintext.assert_called_once_with( -+ hide_inactive_resources=False, verbose=False -+ ) -+ mock_print.assert_called_once_with(self._fixture_print()) -+ -+ def test_success_full(self, mock_print): -+ self._fixture_response() -+ self._call_cmd([], {"full": True}) -+ self.lib.dr.status_all_sites_plaintext.assert_called_once_with( -+ hide_inactive_resources=False, verbose=True -+ ) -+ mock_print.assert_called_once_with(self._fixture_print()) -+ -+ def test_success_hide_inactive(self, mock_print): -+ self._fixture_response() -+ self._call_cmd([], {"hide-inactive": True}) -+ self.lib.dr.status_all_sites_plaintext.assert_called_once_with( -+ hide_inactive_resources=True, verbose=False -+ ) -+ mock_print.assert_called_once_with(self._fixture_print()) -+ -+ def test_success_all_flags(self, mock_print): -+ self._fixture_response() -+ self._call_cmd([], {"full": True, "hide-inactive": True}) -+ self.lib.dr.status_all_sites_plaintext.assert_called_once_with( -+ hide_inactive_resources=True, verbose=True -+ ) -+ mock_print.assert_called_once_with(self._fixture_print()) -+ -+ @mock.patch("pcs.cli.common.console_report.sys.stderr.write") -+ def test_error_local(self, mock_stderr, mock_print): -+ self._fixture_response(local_success=False) -+ with self.assertRaises(SystemExit) as cm: -+ self._call_cmd([]) -+ self.assertEqual(cm.exception.code, 1) -+ self.lib.dr.status_all_sites_plaintext.assert_called_once_with( -+ hide_inactive_resources=False, verbose=False -+ ) -+ mock_print.assert_called_once_with(dedent("""\ -+ --- Local cluster - Primary site --- -+ Error: Unable to get status of the cluster from any node -+ -+ --- Remote cluster - Recovery site --- -+ remote cluster -+ status""" -+ )) -+ mock_stderr.assert_called_once_with( -+ "Error: Unable to get status of all sites\n" -+ ) -+ -+ @mock.patch("pcs.cli.common.console_report.sys.stderr.write") -+ def test_error_remote(self, mock_stderr, mock_print): -+ self._fixture_response(remote_success=False) -+ with self.assertRaises(SystemExit) as cm: -+ self._call_cmd([]) -+ self.assertEqual(cm.exception.code, 1) -+ self.lib.dr.status_all_sites_plaintext.assert_called_once_with( -+ hide_inactive_resources=False, verbose=False -+ ) -+ mock_print.assert_called_once_with(dedent("""\ -+ --- Local cluster - Primary site --- -+ local cluster -+ status -+ -+ -+ --- Remote cluster - Recovery site --- -+ Error: Unable to get status of the cluster from any node""" -+ )) -+ mock_stderr.assert_called_once_with( -+ "Error: Unable to get status of all sites\n" -+ ) -+ -+ @mock.patch("pcs.cli.common.console_report.sys.stderr.write") -+ def test_error_both(self, mock_stderr, mock_print): -+ self._fixture_response(local_success=False, remote_success=False) -+ with self.assertRaises(SystemExit) as cm: -+ self._call_cmd([]) -+ self.assertEqual(cm.exception.code, 1) -+ self.lib.dr.status_all_sites_plaintext.assert_called_once_with( -+ hide_inactive_resources=False, verbose=False -+ ) -+ mock_print.assert_called_once_with(dedent("""\ -+ --- Local cluster - Primary site --- -+ Error: Unable to get status of the cluster from any node -+ -+ --- Remote cluster - Recovery site --- -+ Error: Unable to get status of the cluster from any node""" -+ )) -+ mock_stderr.assert_called_once_with( -+ "Error: Unable to get status of all sites\n" -+ ) -+ -+ @mock.patch("pcs.cli.common.console_report.sys.stderr.write") -+ def test_invalid_response(self, mock_stderr, mock_print): -+ self.lib.dr.status_all_sites_plaintext.return_value = [ -+ "wrong response", -+ {"x": "y"}, -+ ] -+ with self.assertRaises(SystemExit) as cm: -+ self._call_cmd([]) -+ self.assertEqual(cm.exception.code, 1) -+ self.lib.dr.status_all_sites_plaintext.assert_called_once_with( -+ hide_inactive_resources=False, verbose=False -+ ) -+ mock_print.assert_not_called() -+ mock_stderr.assert_called_once_with( -+ "Error: Unable to communicate with pcsd, received response:\n" -+ "['wrong response', {'x': 'y'}]\n" -+ ) -+ -+ -+class Destroy(TestCase): -+ def setUp(self): -+ self.lib = mock.Mock(spec_set=["dr"]) -+ self.dr = mock.Mock(spec_set=["destroy"]) -+ self.lib.dr = self.dr -+ -+ def call_cmd(self, argv, modifiers=None): -+ modifiers = modifiers or {} -+ dr.destroy(self.lib, argv, dict_to_modifiers(modifiers)) -+ -+ def test_some_args(self): -+ with self.assertRaises(CmdLineInputError) as cm: -+ self.call_cmd(["arg"]) -+ self.assertIsNone(cm.exception.message) -+ -+ def test_success(self): -+ self.call_cmd([]) -+ self.dr.destroy.assert_called_once_with(force_flags=[]) -+ -+ def test_skip_offline(self): -+ self.call_cmd([], modifiers={"skip-offline": True}) -+ self.dr.destroy.assert_called_once_with( -+ force_flags=[report_codes.SKIP_OFFLINE_NODES] -+ ) -diff --git a/pcs_test/tier0/common/test_dr.py b/pcs_test/tier0/common/test_dr.py -new file mode 100644 -index 00000000..2ef12855 ---- /dev/null -+++ b/pcs_test/tier0/common/test_dr.py -@@ -0,0 +1,167 @@ -+from unittest import TestCase -+ -+from pcs.common import dr -+ -+ -+class DrConfigNodeDto(TestCase): -+ def setUp(self): -+ self.name = "node-name" -+ -+ def _fixture_dto(self): -+ return dr.DrConfigNodeDto(self.name) -+ -+ def _fixture_dict(self): -+ return dict(name=self.name) -+ -+ def test_to_dict(self): -+ self.assertEqual( -+ self._fixture_dict(), -+ self._fixture_dto().to_dict() -+ ) -+ -+ def test_from_dict(self): -+ dto = dr.DrConfigNodeDto.from_dict(self._fixture_dict()) -+ self.assertEqual(dto.name, self.name) -+ -+ -+class DrConfigSiteDto(TestCase): -+ def setUp(self): -+ self.role = dr.DrRole.PRIMARY -+ self.node_name_list = ["node1", "node2"] -+ -+ def _fixture_dto(self): -+ return dr.DrConfigSiteDto( -+ self.role, -+ [dr.DrConfigNodeDto(name) for name in self.node_name_list] -+ ) -+ -+ def _fixture_dict(self): -+ return dict( -+ site_role=self.role, -+ node_list=[dict(name=name) for name in self.node_name_list] -+ ) -+ -+ def test_to_dict(self): -+ self.assertEqual( -+ self._fixture_dict(), -+ self._fixture_dto().to_dict() -+ ) -+ -+ def test_from_dict(self): -+ dto = dr.DrConfigSiteDto.from_dict(self._fixture_dict()) -+ self.assertEqual(dto.site_role, self.role) -+ self.assertEqual(len(dto.node_list), len(self.node_name_list)) -+ for i, dto_node in enumerate(dto.node_list): -+ self.assertEqual( -+ dto_node.name, -+ self.node_name_list[i], -+ f"index: {i}" -+ ) -+ -+ -+class DrConfig(TestCase): -+ @staticmethod -+ def _fixture_site_dto(role, node_name_list): -+ return dr.DrConfigSiteDto( -+ role, -+ [dr.DrConfigNodeDto(name) for name in node_name_list] -+ ) -+ -+ @staticmethod -+ def _fixture_dict(): -+ return { -+ "local_site": { -+ "node_list": [], -+ "site_role": "RECOVERY", -+ }, -+ "remote_site_list": [ -+ { -+ "node_list": [ -+ {"name": "nodeA1"}, -+ {"name": "nodeA2"}, -+ ], -+ "site_role": "PRIMARY", -+ }, -+ { -+ "node_list": [ -+ {"name": "nodeB1"}, -+ ], -+ "site_role": "RECOVERY", -+ } -+ ], -+ } -+ -+ def test_to_dict(self): -+ self.assertEqual( -+ self._fixture_dict(), -+ dr.DrConfigDto( -+ self._fixture_site_dto(dr.DrRole.RECOVERY, []), -+ [ -+ self._fixture_site_dto( -+ dr.DrRole.PRIMARY, -+ ["nodeA1", "nodeA2"] -+ ), -+ self._fixture_site_dto( -+ dr.DrRole.RECOVERY, -+ ["nodeB1"] -+ ), -+ ] -+ ).to_dict() -+ ) -+ -+ def test_from_dict(self): -+ dto = dr.DrConfigDto.from_dict(self._fixture_dict()) -+ self.assertEqual( -+ dto.local_site.to_dict(), -+ self._fixture_site_dto(dr.DrRole.RECOVERY, []).to_dict() -+ ) -+ self.assertEqual(len(dto.remote_site_list), 2) -+ self.assertEqual( -+ dto.remote_site_list[0].to_dict(), -+ self._fixture_site_dto( -+ dr.DrRole.PRIMARY, ["nodeA1", "nodeA2"] -+ ).to_dict() -+ ) -+ self.assertEqual( -+ dto.remote_site_list[1].to_dict(), -+ self._fixture_site_dto(dr.DrRole.RECOVERY, ["nodeB1"]).to_dict() -+ ) -+ -+class DrSiteStatusDto(TestCase): -+ def setUp(self): -+ self.local = False -+ self.role = dr.DrRole.PRIMARY -+ self.status_plaintext = "plaintext status" -+ self.status_successfully_obtained = True -+ -+ def dto_fixture(self): -+ return dr.DrSiteStatusDto( -+ self.local, -+ self.role, -+ self.status_plaintext, -+ self.status_successfully_obtained, -+ ) -+ -+ def dict_fixture(self): -+ return dict( -+ local_site=self.local, -+ site_role=self.role.value, -+ status_plaintext=self.status_plaintext, -+ status_successfully_obtained=self.status_successfully_obtained, -+ ) -+ -+ def test_to_dict(self): -+ self.assertEqual( -+ self.dict_fixture(), -+ self.dto_fixture().to_dict() -+ ) -+ -+ def test_from_dict(self): -+ dto = dr.DrSiteStatusDto.from_dict(self.dict_fixture()) -+ self.assertEqual(dto.local_site, self.local) -+ self.assertEqual(dto.site_role, self.role) -+ self.assertEqual(dto.status_plaintext, self.status_plaintext) -+ self.assertEqual( -+ dto.status_successfully_obtained, -+ self.status_successfully_obtained -+ ) -diff --git a/pcs_test/tier0/lib/commands/cluster/test_add_nodes.py b/pcs_test/tier0/lib/commands/cluster/test_add_nodes.py -index a570d67e..295c1e6a 100644 ---- a/pcs_test/tier0/lib/commands/cluster/test_add_nodes.py -+++ b/pcs_test/tier0/lib/commands/cluster/test_add_nodes.py -@@ -470,6 +470,11 @@ class LocalConfig(): - return_value=False, - name=f"{local_prefix}fs.isfile.pacemaker_authkey" - ) -+ .fs.isfile( -+ settings.pcsd_dr_config_location, -+ return_value=False, -+ name=f"{local_prefix}fs.isfile.pcsd_disaster_recovery" -+ ) - .fs.isfile( - settings.pcsd_settings_conf_location, - return_value=False, -@@ -480,10 +485,12 @@ class LocalConfig(): - def files_sync(self, node_labels): - corosync_authkey_content = b"corosync authfile" - pcmk_authkey_content = b"pcmk authfile" -- pcs_settings_content = "pcs_settigns.conf data" -+ pcs_disaster_recovery_content = b"disaster recovery config data" -+ pcs_settings_content = "pcs_settings.conf data" - file_list = [ - "corosync authkey", - "pacemaker authkey", -+ "disaster-recovery config", - "pcs_settings.conf", - ] - local_prefix = "local.files_sync." -@@ -512,6 +519,19 @@ class LocalConfig(): - mode="rb", - name=f"{local_prefix}fs.open.pcmk_authkey_read", - ) -+ .fs.isfile( -+ settings.pcsd_dr_config_location, -+ return_value=True, -+ name=f"{local_prefix}fs.isfile.pcsd_disaster_recovery" -+ ) -+ .fs.open( -+ settings.pcsd_dr_config_location, -+ return_value=( -+ mock.mock_open(read_data=pcs_disaster_recovery_content)() -+ ), -+ mode="rb", -+ name=f"{local_prefix}fs.open.pcsd_disaster_recovery_read", -+ ) - .fs.isfile( - settings.pcsd_settings_conf_location, - return_value=True, -@@ -526,6 +546,7 @@ class LocalConfig(): - node_labels=node_labels, - pcmk_authkey=pcmk_authkey_content, - corosync_authkey=corosync_authkey_content, -+ pcs_disaster_recovery_conf=pcs_disaster_recovery_content, - pcs_settings_conf=pcs_settings_content, - name=f"{local_prefix}http.files.put_files", - ) -@@ -2105,13 +2126,16 @@ class FailureFilesDistribution(TestCase): - self.expected_reports = [] - self.pcmk_authkey_content = b"pcmk authkey content" - self.corosync_authkey_content = b"corosync authkey content" -+ self.pcsd_dr_config_content = b"disaster recovery config data" - self.pcmk_authkey_file_id = "pacemaker_remote authkey" - self.corosync_authkey_file_id = "corosync authkey" -+ self.pcsd_dr_config_file_id = "disaster-recovery config" - self.unsuccessful_nodes = self.new_nodes[:1] - self.successful_nodes = self.new_nodes[1:] - self.err_msg = "an error message" - self.corosync_key_open_before_position = "fs.isfile.pacemaker_authkey" -- self.pacemaker_key_open_before_position = "fs.isfile.pcsd_settings" -+ self.pacemaker_key_open_before_position = "fs.isfile.pcsd_dr_config" -+ self.pcsd_dr_config_open_before_position = "fs.isfile.pcsd_settings" - patch_getaddrinfo(self, self.new_nodes) - self.existing_corosync_nodes = [ - node_fixture(node, node_id) -@@ -2149,9 +2173,14 @@ class FailureFilesDistribution(TestCase): - ) - # open will be inserted here - .fs.isfile( -- settings.pcsd_settings_conf_location, return_value=False, -+ settings.pcsd_dr_config_location, return_value=True, - name=self.pacemaker_key_open_before_position - ) -+ # open will be inserted here -+ .fs.isfile( -+ settings.pcsd_settings_conf_location, return_value=False, -+ name=self.pcsd_dr_config_open_before_position -+ ) - ) - self.expected_reports.extend( - [ -@@ -2165,7 +2194,11 @@ class FailureFilesDistribution(TestCase): - self.distribution_started_reports = [ - fixture.info( - report_codes.FILES_DISTRIBUTION_STARTED, -- file_list=["corosync authkey", "pacemaker authkey"], -+ file_list=[ -+ self.corosync_authkey_file_id, -+ "pacemaker authkey", -+ self.pcsd_dr_config_file_id, -+ ], - node_list=self.new_nodes, - ) - ] -@@ -2181,6 +2214,12 @@ class FailureFilesDistribution(TestCase): - node=node, - file_description="pacemaker authkey", - ) for node in self.successful_nodes -+ ] + [ -+ fixture.info( -+ report_codes.FILE_DISTRIBUTION_SUCCESS, -+ node=node, -+ file_description=self.pcsd_dr_config_file_id, -+ ) for node in self.successful_nodes - ] - - def _add_nodes_with_lib_error(self): -@@ -2210,6 +2249,15 @@ class FailureFilesDistribution(TestCase): - name="fs.open.pacemaker_authkey", - before=self.pacemaker_key_open_before_position, - ) -+ self.config.fs.open( -+ settings.pcsd_dr_config_location, -+ mode="rb", -+ side_effect=EnvironmentError( -+ 1, self.err_msg, settings.pcsd_dr_config_location -+ ), -+ name="fs.open.pcsd_dr_config", -+ before=self.pcsd_dr_config_open_before_position, -+ ) - - self._add_nodes_with_lib_error() - -@@ -2236,7 +2284,17 @@ class FailureFilesDistribution(TestCase): - f"{self.err_msg}: '{settings.pacemaker_authkey_file}'" - ), - operation=RawFileError.ACTION_READ, -- ) -+ ), -+ fixture.error( -+ report_codes.FILE_IO_ERROR, -+ force_code=report_codes.SKIP_FILE_DISTRIBUTION_ERRORS, -+ file_type_code=file_type_codes.PCS_DR_CONFIG, -+ file_path=settings.pcsd_dr_config_location, -+ reason=( -+ f"{self.err_msg}: '{settings.pcsd_dr_config_location}'" -+ ), -+ operation=RawFileError.ACTION_READ, -+ ), - ] - ) - -@@ -2260,6 +2318,15 @@ class FailureFilesDistribution(TestCase): - name="fs.open.pacemaker_authkey", - before=self.pacemaker_key_open_before_position, - ) -+ .fs.open( -+ settings.pcsd_dr_config_location, -+ mode="rb", -+ side_effect=EnvironmentError( -+ 1, self.err_msg, settings.pcsd_dr_config_location -+ ), -+ name="fs.open.pcsd_dr_config", -+ before=self.pcsd_dr_config_open_before_position, -+ ) - .local.distribute_and_reload_corosync_conf( - corosync_conf_fixture( - self.existing_corosync_nodes + [ -@@ -2301,7 +2368,16 @@ class FailureFilesDistribution(TestCase): - f"{self.err_msg}: '{settings.pacemaker_authkey_file}'" - ), - operation=RawFileError.ACTION_READ, -- ) -+ ), -+ fixture.warn( -+ report_codes.FILE_IO_ERROR, -+ file_type_code=file_type_codes.PCS_DR_CONFIG, -+ file_path=settings.pcsd_dr_config_location, -+ reason=( -+ f"{self.err_msg}: '{settings.pcsd_dr_config_location}'" -+ ), -+ operation=RawFileError.ACTION_READ, -+ ), - ] - ) - -@@ -2325,9 +2401,19 @@ class FailureFilesDistribution(TestCase): - name="fs.open.pacemaker_authkey", - before=self.pacemaker_key_open_before_position, - ) -+ .fs.open( -+ settings.pcsd_dr_config_location, -+ return_value=mock.mock_open( -+ read_data=self.pcsd_dr_config_content -+ )(), -+ mode="rb", -+ name="fs.open.pcsd_dr_config", -+ before=self.pcsd_dr_config_open_before_position, -+ ) - .http.files.put_files( - pcmk_authkey=self.pcmk_authkey_content, - corosync_authkey=self.corosync_authkey_content, -+ pcs_disaster_recovery_conf=self.pcsd_dr_config_content, - communication_list=[ - dict( - label=node, -@@ -2339,7 +2425,11 @@ class FailureFilesDistribution(TestCase): - self.pcmk_authkey_file_id: dict( - code="unexpected", - message=self.err_msg -- ) -+ ), -+ self.pcsd_dr_config_file_id: dict( -+ code="unexpected", -+ message=self.err_msg -+ ), - })) - ) for node in self.unsuccessful_nodes - ] + [ -@@ -2374,6 +2464,15 @@ class FailureFilesDistribution(TestCase): - reason=self.err_msg, - ) for node in self.unsuccessful_nodes - ] -+ + -+ [ -+ fixture.error( -+ report_codes.FILE_DISTRIBUTION_ERROR, -+ node=node, -+ file_description=self.pcsd_dr_config_file_id, -+ reason=self.err_msg, -+ ) for node in self.unsuccessful_nodes -+ ] - ) - - def test_communication_failure(self): -@@ -2396,9 +2495,19 @@ class FailureFilesDistribution(TestCase): - name="fs.open.pacemaker_authkey", - before=self.pacemaker_key_open_before_position, - ) -+ .fs.open( -+ settings.pcsd_dr_config_location, -+ return_value=mock.mock_open( -+ read_data=self.pcsd_dr_config_content -+ )(), -+ mode="rb", -+ name="fs.open.pcsd_dr_config", -+ before=self.pcsd_dr_config_open_before_position, -+ ) - .http.files.put_files( - pcmk_authkey=self.pcmk_authkey_content, - corosync_authkey=self.corosync_authkey_content, -+ pcs_disaster_recovery_conf=self.pcsd_dr_config_content, - communication_list=[ - dict( - label=node, -@@ -2450,9 +2559,19 @@ class FailureFilesDistribution(TestCase): - name="fs.open.pacemaker_authkey", - before=self.pacemaker_key_open_before_position, - ) -+ .fs.open( -+ settings.pcsd_dr_config_location, -+ return_value=mock.mock_open( -+ read_data=self.pcsd_dr_config_content -+ )(), -+ mode="rb", -+ name="fs.open.pcsd_dr_config", -+ before=self.pcsd_dr_config_open_before_position, -+ ) - .http.files.put_files( - pcmk_authkey=self.pcmk_authkey_content, - corosync_authkey=self.corosync_authkey_content, -+ pcs_disaster_recovery_conf=self.pcsd_dr_config_content, - communication_list=[ - dict( - label=node, -@@ -2501,9 +2620,19 @@ class FailureFilesDistribution(TestCase): - name="fs.open.pacemaker_authkey", - before=self.pacemaker_key_open_before_position, - ) -+ .fs.open( -+ settings.pcsd_dr_config_location, -+ return_value=mock.mock_open( -+ read_data=self.pcsd_dr_config_content -+ )(), -+ mode="rb", -+ name="fs.open.pcsd_dr_config", -+ before=self.pcsd_dr_config_open_before_position, -+ ) - .http.files.put_files( - pcmk_authkey=self.pcmk_authkey_content, - corosync_authkey=self.corosync_authkey_content, -+ pcs_disaster_recovery_conf=self.pcsd_dr_config_content, - communication_list=[ - dict( - label=node, -diff --git a/pcs_test/tier0/lib/commands/dr/__init__.py b/pcs_test/tier0/lib/commands/dr/__init__.py -new file mode 100644 -index 00000000..e69de29b -diff --git a/pcs_test/tier0/lib/commands/dr/test_destroy.py b/pcs_test/tier0/lib/commands/dr/test_destroy.py -new file mode 100644 -index 00000000..de50b21c ---- /dev/null -+++ b/pcs_test/tier0/lib/commands/dr/test_destroy.py -@@ -0,0 +1,342 @@ -+import json -+from unittest import TestCase -+ -+from pcs_test.tools import fixture -+from pcs_test.tools.command_env import get_env_tools -+ -+from pcs import settings -+from pcs.common import ( -+ file_type_codes, -+ report_codes, -+) -+from pcs.common.file import RawFileError -+from pcs.lib.commands import dr -+ -+ -+DR_CONF = "pcs disaster-recovery config" -+REASON = "error msg" -+ -+ -+def generate_nodes(nodes_num, prefix=""): -+ return [f"{prefix}node{i}" for i in range(1, nodes_num + 1)] -+ -+ -+class CheckLive(TestCase): -+ def setUp(self): -+ self.env_assist, self.config = get_env_tools(self) -+ -+ def assert_live_required(self, forbidden_options): -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.destroy(self.env_assist.get_env()), -+ [ -+ fixture.error( -+ report_codes.LIVE_ENVIRONMENT_REQUIRED, -+ forbidden_options=forbidden_options -+ ) -+ ], -+ expected_in_processor=False -+ ) -+ -+ def test_mock_corosync(self): -+ self.config.env.set_corosync_conf_data("corosync conf data") -+ self.assert_live_required([file_type_codes.COROSYNC_CONF]) -+ -+ def test_mock_cib(self): -+ self.config.env.set_cib_data("") -+ self.assert_live_required([file_type_codes.CIB]) -+ -+ def test_mock(self): -+ self.config.env.set_corosync_conf_data("corosync conf data") -+ self.config.env.set_cib_data("") -+ self.assert_live_required([ -+ file_type_codes.CIB, -+ file_type_codes.COROSYNC_CONF, -+ ]) -+ -+ -+class FixtureMixin: -+ def _fixture_load_configs(self): -+ self.config.raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ ) -+ self.config.raw_file.read( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ content=""" -+ {{ -+ "local": {{ -+ "role": "PRIMARY" -+ }}, -+ "remote_sites": [ -+ {{ -+ "nodes": [{nodes}], -+ "role": "RECOVERY" -+ }} -+ ] -+ }} -+ """.format( -+ nodes=", ".join([ -+ json.dumps(dict(name=node)) -+ for node in self.remote_nodes -+ ]) -+ ) -+ ) -+ self.config.corosync_conf.load(node_name_list=self.local_nodes) -+ -+ def _success_reports(self): -+ return [ -+ fixture.info( -+ report_codes.FILES_REMOVE_FROM_NODES_STARTED, -+ file_list=[DR_CONF], -+ node_list=self.remote_nodes + self.local_nodes, -+ ) -+ ] + [ -+ fixture.info( -+ report_codes.FILE_REMOVE_FROM_NODE_SUCCESS, -+ file_description=DR_CONF, -+ node=node, -+ ) for node in (self.remote_nodes + self.local_nodes) -+ ] -+ -+ -+class Success(FixtureMixin, TestCase): -+ def setUp(self): -+ self.env_assist, self.config = get_env_tools(self) -+ self.local_nodes = generate_nodes(5) -+ self.remote_nodes = generate_nodes(3, prefix="remote-") -+ self.config.env.set_known_nodes(self.local_nodes + self.remote_nodes) -+ -+ def test_minimal(self): -+ self._fixture_load_configs() -+ self.config.http.files.remove_files( -+ node_labels=self.remote_nodes + self.local_nodes, -+ pcs_disaster_recovery_conf=True, -+ ) -+ dr.destroy(self.env_assist.get_env()) -+ self.env_assist.assert_reports(self._success_reports()) -+ -+ -+class FatalConfigIssue(FixtureMixin, TestCase): -+ def setUp(self): -+ self.env_assist, self.config = get_env_tools(self) -+ self.local_nodes = generate_nodes(5) -+ self.remote_nodes = generate_nodes(3, prefix="remote-") -+ -+ def test_config_missing(self): -+ self.config.raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ exists=False, -+ ) -+ -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.destroy(self.env_assist.get_env()), -+ ) -+ self.env_assist.assert_reports([ -+ fixture.error( -+ report_codes.DR_CONFIG_DOES_NOT_EXIST, -+ ), -+ ]) -+ -+ def test_config_read_error(self): -+ self.config.raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ ) -+ self.config.raw_file.read( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ exception_msg=REASON, -+ ) -+ -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.destroy(self.env_assist.get_env()), -+ ) -+ self.env_assist.assert_reports([ -+ fixture.error( -+ report_codes.FILE_IO_ERROR, -+ file_type_code=file_type_codes.PCS_DR_CONFIG, -+ file_path=settings.pcsd_dr_config_location, -+ operation=RawFileError.ACTION_READ, -+ reason=REASON, -+ ), -+ ]) -+ -+ def test_config_parse_error(self): -+ self.config.raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ ) -+ self.config.raw_file.read( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ content="bad content", -+ ) -+ -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.destroy(self.env_assist.get_env()), -+ ) -+ self.env_assist.assert_reports([ -+ fixture.error( -+ report_codes.PARSE_ERROR_JSON_FILE, -+ file_type_code=file_type_codes.PCS_DR_CONFIG, -+ file_path=settings.pcsd_dr_config_location, -+ line_number=1, -+ column_number=1, -+ position=0, -+ reason="Expecting value", -+ full_msg="Expecting value: line 1 column 1 (char 0)", -+ ), -+ ]) -+ -+ def test_corosync_conf_read_error(self): -+ self._fixture_load_configs() -+ self.config.corosync_conf.load_content( -+ "", exception_msg=REASON, instead="corosync_conf.load" -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.destroy(self.env_assist.get_env()), -+ [ -+ fixture.error( -+ report_codes.UNABLE_TO_READ_COROSYNC_CONFIG, -+ path=settings.corosync_conf_file, -+ reason=REASON, -+ ), -+ ], -+ expected_in_processor=False -+ ) -+ -+ def test_corosync_conf_parse_error(self): -+ self._fixture_load_configs() -+ self.config.corosync_conf.load_content( -+ "wrong {\n corosync", instead="corosync_conf.load" -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.destroy(self.env_assist.get_env()), -+ [ -+ fixture.error( -+ report_codes -+ .PARSE_ERROR_COROSYNC_CONF_LINE_IS_NOT_SECTION_NOR_KEY_VALUE -+ ), -+ ], -+ expected_in_processor=False -+ ) -+ -+ -+class CommunicationIssue(FixtureMixin, TestCase): -+ def setUp(self): -+ self.env_assist, self.config = get_env_tools(self) -+ self.local_nodes = generate_nodes(5) -+ self.remote_nodes = generate_nodes(3, prefix="remote-") -+ -+ def test_unknown_node(self): -+ self.config.env.set_known_nodes( -+ self.local_nodes[1:] + self.remote_nodes[1:] -+ ) -+ self._fixture_load_configs() -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.destroy(self.env_assist.get_env()) -+ ) -+ self.env_assist.assert_reports([ -+ fixture.error( -+ report_codes.HOST_NOT_FOUND, -+ host_list=self.local_nodes[:1] + self.remote_nodes[:1], -+ force_code=report_codes.SKIP_OFFLINE_NODES, -+ ), -+ ]) -+ -+ def test_unknown_node_force(self): -+ existing_nodes = self.remote_nodes[1:] + self.local_nodes[1:] -+ self.config.env.set_known_nodes(existing_nodes) -+ self._fixture_load_configs() -+ self.config.http.files.remove_files( -+ node_labels=existing_nodes, -+ pcs_disaster_recovery_conf=True, -+ ) -+ dr.destroy( -+ self.env_assist.get_env(), -+ force_flags=[report_codes.SKIP_OFFLINE_NODES], -+ ) -+ self.env_assist.assert_reports([ -+ fixture.warn( -+ report_codes.HOST_NOT_FOUND, -+ host_list=self.local_nodes[:1] + self.remote_nodes[:1], -+ ), -+ ] + [ -+ fixture.info( -+ report_codes.FILES_REMOVE_FROM_NODES_STARTED, -+ file_list=[DR_CONF], -+ node_list=existing_nodes, -+ ) -+ ] + [ -+ fixture.info( -+ report_codes.FILE_REMOVE_FROM_NODE_SUCCESS, -+ file_description=DR_CONF, -+ node=node, -+ ) for node in existing_nodes -+ ]) -+ -+ def test_node_issues(self): -+ self.config.env.set_known_nodes(self.local_nodes + self.remote_nodes) -+ self._fixture_load_configs() -+ self.config.http.files.remove_files( -+ pcs_disaster_recovery_conf=True, -+ communication_list=[ -+ dict(label=node) for node in self.remote_nodes -+ ] + [ -+ dict( -+ label=self.local_nodes[0], -+ was_connected=False, -+ error_msg=REASON, -+ ), -+ dict( -+ label=self.local_nodes[1], -+ output="invalid data", -+ ), -+ dict( -+ label=self.local_nodes[2], -+ output=json.dumps(dict(files={ -+ DR_CONF: dict( -+ code="unexpected", -+ message=REASON, -+ ), -+ })), -+ ), -+ ] + [ -+ dict(label=node) for node in self.local_nodes[3:] -+ ] -+ ) -+ -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.destroy(self.env_assist.get_env()) -+ ) -+ self.env_assist.assert_reports([ -+ fixture.info( -+ report_codes.FILES_REMOVE_FROM_NODES_STARTED, -+ file_list=[DR_CONF], -+ node_list=self.remote_nodes + self.local_nodes, -+ ), -+ fixture.error( -+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, -+ command="remote/remove_file", -+ node=self.local_nodes[0], -+ reason=REASON, -+ ), -+ fixture.error( -+ report_codes.INVALID_RESPONSE_FORMAT, -+ node=self.local_nodes[1], -+ ), -+ fixture.error( -+ report_codes.FILE_REMOVE_FROM_NODE_ERROR, -+ file_description=DR_CONF, -+ reason=REASON, -+ node=self.local_nodes[2], -+ ), -+ ] + [ -+ fixture.info( -+ report_codes.FILE_REMOVE_FROM_NODE_SUCCESS, -+ file_description=DR_CONF, -+ node=node, -+ ) for node in self.local_nodes[3:] + self.remote_nodes -+ ]) -diff --git a/pcs_test/tier0/lib/commands/dr/test_get_config.py b/pcs_test/tier0/lib/commands/dr/test_get_config.py -new file mode 100644 -index 00000000..b2297c8a ---- /dev/null -+++ b/pcs_test/tier0/lib/commands/dr/test_get_config.py -@@ -0,0 +1,134 @@ -+from unittest import TestCase -+ -+from pcs import settings -+from pcs.common import ( -+ file_type_codes, -+ report_codes, -+) -+from pcs.common.file import RawFileError -+from pcs.lib.commands import dr -+ -+from pcs_test.tools.command_env import get_env_tools -+from pcs_test.tools import fixture -+ -+REASON = "error msg" -+ -+class Config(TestCase): -+ def setUp(self): -+ self.env_assist, self.config = get_env_tools(self) -+ -+ def test_success(self): -+ (self.config -+ .raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ ) -+ .raw_file.read( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ content=""" -+ { -+ "local": { -+ "role": "PRIMARY" -+ }, -+ "remote_sites": [ -+ { -+ "nodes": [ -+ { -+ "name": "recovery-node" -+ } -+ ], -+ "role": "RECOVERY" -+ } -+ ] -+ } -+ """, -+ ) -+ ) -+ self.assertEqual( -+ dr.get_config(self.env_assist.get_env()), -+ { -+ "local_site": { -+ "node_list": [], -+ "site_role": "PRIMARY", -+ }, -+ "remote_site_list": [ -+ { -+ "node_list": [ -+ {"name": "recovery-node"}, -+ ], -+ "site_role": "RECOVERY", -+ }, -+ ], -+ } -+ ) -+ -+ def test_config_missing(self): -+ (self.config -+ .raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ exists=False, -+ ) -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.get_config(self.env_assist.get_env()), -+ ) -+ self.env_assist.assert_reports([ -+ fixture.error( -+ report_codes.DR_CONFIG_DOES_NOT_EXIST, -+ ), -+ ]) -+ -+ def test_config_read_error(self): -+ (self.config -+ .raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ ) -+ .raw_file.read( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ exception_msg=REASON, -+ ) -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.get_config(self.env_assist.get_env()), -+ ) -+ self.env_assist.assert_reports([ -+ fixture.error( -+ report_codes.FILE_IO_ERROR, -+ file_type_code=file_type_codes.PCS_DR_CONFIG, -+ file_path=settings.pcsd_dr_config_location, -+ operation=RawFileError.ACTION_READ, -+ reason=REASON, -+ ), -+ ]) -+ -+ def test_config_parse_error(self): -+ (self.config -+ .raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ ) -+ .raw_file.read( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ content="bad content", -+ ) -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.get_config(self.env_assist.get_env()), -+ ) -+ self.env_assist.assert_reports([ -+ fixture.error( -+ report_codes.PARSE_ERROR_JSON_FILE, -+ file_type_code=file_type_codes.PCS_DR_CONFIG, -+ file_path=settings.pcsd_dr_config_location, -+ line_number=1, -+ column_number=1, -+ position=0, -+ reason="Expecting value", -+ full_msg="Expecting value: line 1 column 1 (char 0)", -+ ), -+ ]) -diff --git a/pcs_test/tier0/lib/commands/dr/test_set_recovery_site.py b/pcs_test/tier0/lib/commands/dr/test_set_recovery_site.py -new file mode 100644 -index 00000000..06d80df1 ---- /dev/null -+++ b/pcs_test/tier0/lib/commands/dr/test_set_recovery_site.py -@@ -0,0 +1,702 @@ -+import json -+from unittest import TestCase -+ -+from pcs_test.tools import fixture -+from pcs_test.tools.command_env import get_env_tools -+ -+from pcs import settings -+from pcs.common import ( -+ file_type_codes, -+ report_codes, -+) -+from pcs.lib.dr.config.facade import DrRole -+from pcs.lib.commands import dr -+ -+DR_CFG_DESC = "disaster-recovery config" -+ -+COROSYNC_CONF_TEMPLATE = """\ -+totem {{ -+ version: 2 -+ cluster_name: cluster_name -+}} -+ -+nodelist {{ -+{node_list}}} -+""" -+ -+NODE_TEMPLATE_NO_NAME = """\ -+ node {{ -+ ring0_addr: {node} -+ nodeid: {id} -+ }} -+""" -+ -+NODE_TEMPLATE = """\ -+ node {{ -+ ring0_addr: {node} -+ name: {node} -+ nodeid: {id} -+ }} -+""" -+ -+ -+def export_cfg(cfg_struct): -+ return json.dumps(cfg_struct, indent=4, sort_keys=True).encode("utf-8") -+ -+def dr_cfg_fixture(local_role, remote_role, nodes): -+ return export_cfg(dict( -+ local=dict( -+ role=local_role.value, -+ ), -+ remote_sites=[ -+ dict( -+ role=remote_role.value, -+ nodes=[dict(name=node) for node in nodes], -+ ), -+ ] -+ )) -+ -+def corosync_conf_fixture(node_list): -+ return COROSYNC_CONF_TEMPLATE.format( -+ node_list="\n".join(node_list_fixture(node_list)), -+ ) -+ -+def node_list_fixture(node_list): -+ return [ -+ NODE_TEMPLATE.format(node=node, id=i) -+ for i, node in enumerate(node_list, start=1) -+ ] -+ -+ -+def generate_nodes(nodes_num, prefix=""): -+ return [f"{prefix}node{i}" for i in range(1, nodes_num + 1)] -+ -+ -+class CheckLive(TestCase): -+ def setUp(self): -+ self.env_assist, self.config = get_env_tools(self) -+ -+ def assert_live_required(self, forbidden_options): -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.set_recovery_site(self.env_assist.get_env(), "node"), -+ [ -+ fixture.error( -+ report_codes.LIVE_ENVIRONMENT_REQUIRED, -+ forbidden_options=forbidden_options -+ ) -+ ], -+ expected_in_processor=False -+ ) -+ -+ def test_mock_corosync(self): -+ self.config.env.set_corosync_conf_data( -+ corosync_conf_fixture(generate_nodes(3)) -+ ) -+ self.assert_live_required([file_type_codes.COROSYNC_CONF]) -+ -+ def test_mock_cib(self): -+ self.config.env.set_cib_data("") -+ self.assert_live_required([file_type_codes.CIB]) -+ -+ def test_mock(self): -+ self.config.env.set_corosync_conf_data( -+ corosync_conf_fixture(generate_nodes(3)) -+ ) -+ self.config.env.set_cib_data("") -+ self.assert_live_required([ -+ file_type_codes.CIB, -+ file_type_codes.COROSYNC_CONF, -+ ]) -+ -+ -+class SetRecoverySiteSuccess(TestCase): -+ def setUp(self): -+ self.env_assist, self.config = get_env_tools(self) -+ -+ def _test_minimal(self, local_cluster_size, recovery_cluster_size): -+ local_nodes = generate_nodes(local_cluster_size) -+ remote_nodes = generate_nodes(recovery_cluster_size, prefix="recovery-") -+ orig_node = remote_nodes[-1] -+ cfg = self.config -+ cfg.env.set_known_nodes(local_nodes + remote_nodes) -+ cfg.raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ exists=False, -+ ) -+ cfg.corosync_conf.load_content(corosync_conf_fixture(local_nodes)) -+ cfg.http.corosync.get_corosync_conf( -+ corosync_conf_fixture(remote_nodes), node_labels=[orig_node] -+ ) -+ cfg.http.files.put_files( -+ node_labels=remote_nodes, -+ pcs_disaster_recovery_conf=dr_cfg_fixture( -+ DrRole.RECOVERY, DrRole.PRIMARY, local_nodes -+ ), -+ name="distribute_remote", -+ ) -+ cfg.http.files.put_files( -+ node_labels=local_nodes, -+ pcs_disaster_recovery_conf=dr_cfg_fixture( -+ DrRole.PRIMARY, DrRole.RECOVERY, remote_nodes -+ ), -+ name="distribute_local", -+ ) -+ dr.set_recovery_site(self.env_assist.get_env(), orig_node) -+ self.env_assist.assert_reports( -+ [ -+ fixture.info( -+ report_codes.FILES_DISTRIBUTION_STARTED, -+ file_list=[DR_CFG_DESC], -+ node_list=remote_nodes, -+ ) -+ ] + [ -+ fixture.info( -+ report_codes.FILE_DISTRIBUTION_SUCCESS, -+ file_description=DR_CFG_DESC, -+ node=node, -+ ) for node in remote_nodes -+ ] + [ -+ fixture.info( -+ report_codes.FILES_DISTRIBUTION_STARTED, -+ file_list=[DR_CFG_DESC], -+ node_list=local_nodes, -+ ) -+ ] + [ -+ fixture.info( -+ report_codes.FILE_DISTRIBUTION_SUCCESS, -+ file_description=DR_CFG_DESC, -+ node=node, -+ ) for node in local_nodes -+ ] -+ ) -+ -+ def test_minimal_local_1_remote_1(self): -+ self._test_minimal(1, 1) -+ -+ def test_minimal_local_1_remote_2(self): -+ self._test_minimal(1, 2) -+ -+ def test_minimal_local_1_remote_3(self): -+ self._test_minimal(1, 3) -+ -+ def test_minimal_local_2_remote_1(self): -+ self._test_minimal(2, 1) -+ -+ def test_minimal_local_2_remote_2(self): -+ self._test_minimal(2, 2) -+ -+ def test_minimal_local_2_remote_3(self): -+ self._test_minimal(2, 3) -+ -+ def test_minimal_local_3_remote_1(self): -+ self._test_minimal(3, 1) -+ -+ def test_minimal_local_3_remote_2(self): -+ self._test_minimal(3, 2) -+ -+ def test_minimal_local_3_remote_3(self): -+ self._test_minimal(3, 3) -+ -+ -+class FailureValidations(TestCase): -+ def setUp(self): -+ self.env_assist, self.config = get_env_tools(self) -+ self.local_nodes = generate_nodes(4) -+ -+ def test_dr_cfg_exist(self): -+ orig_node = "node" -+ cfg = self.config -+ cfg.env.set_known_nodes(self.local_nodes + [orig_node]) -+ cfg.raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ exists=True, -+ ) -+ cfg.corosync_conf.load_content(corosync_conf_fixture(self.local_nodes)) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.set_recovery_site(self.env_assist.get_env(), orig_node), -+ ) -+ self.env_assist.assert_reports([ -+ fixture.error( -+ report_codes.DR_CONFIG_ALREADY_EXIST, -+ ) -+ ]) -+ -+ def test_local_nodes_name_missing(self): -+ orig_node = "node" -+ cfg = self.config -+ cfg.env.set_known_nodes(self.local_nodes + [orig_node]) -+ cfg.raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ exists=False, -+ ) -+ cfg.corosync_conf.load_content( -+ COROSYNC_CONF_TEMPLATE.format( -+ node_list="\n".join( -+ [ -+ NODE_TEMPLATE_NO_NAME.format( -+ node=self.local_nodes[0], id=len(self.local_nodes) -+ ) -+ ] + node_list_fixture(self.local_nodes[1:]) -+ ) -+ ) -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.set_recovery_site(self.env_assist.get_env(), orig_node), -+ ) -+ self.env_assist.assert_reports([ -+ fixture.error( -+ report_codes.COROSYNC_CONFIG_MISSING_NAMES_OF_NODES, -+ fatal=True, -+ ) -+ ]) -+ -+ def test_node_part_of_local_cluster(self): -+ orig_node = self.local_nodes[-1] -+ cfg = self.config -+ cfg.env.set_known_nodes(self.local_nodes + [orig_node]) -+ cfg.raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ exists=False, -+ ) -+ cfg.corosync_conf.load_content(corosync_conf_fixture(self.local_nodes)) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.set_recovery_site(self.env_assist.get_env(), orig_node), -+ ) -+ self.env_assist.assert_reports([ -+ fixture.error( -+ report_codes.NODE_IN_LOCAL_CLUSTER, -+ node=orig_node, -+ ) -+ ]) -+ -+ def test_tokens_missing_for_local_nodes(self): -+ orig_node = "node" -+ cfg = self.config -+ cfg.env.set_known_nodes(self.local_nodes[:-1] + [orig_node]) -+ cfg.raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ exists=False, -+ ) -+ cfg.corosync_conf.load_content(corosync_conf_fixture(self.local_nodes)) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.set_recovery_site(self.env_assist.get_env(), orig_node), -+ ) -+ self.env_assist.assert_reports([ -+ fixture.error( -+ report_codes.HOST_NOT_FOUND, -+ host_list=self.local_nodes[-1:], -+ ) -+ ]) -+ -+ def test_token_missing_for_node(self): -+ orig_node = "node" -+ cfg = self.config -+ cfg.env.set_known_nodes(self.local_nodes) -+ cfg.raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ exists=False, -+ ) -+ cfg.corosync_conf.load_content(corosync_conf_fixture(self.local_nodes)) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.set_recovery_site(self.env_assist.get_env(), orig_node), -+ ) -+ self.env_assist.assert_reports([ -+ fixture.error( -+ report_codes.HOST_NOT_FOUND, -+ host_list=[orig_node], -+ ) -+ ]) -+ -+ def test_tokens_missing_for_remote_cluster(self): -+ remote_nodes = generate_nodes(3, prefix="recovery-") -+ orig_node = remote_nodes[0] -+ cfg = self.config -+ cfg.env.set_known_nodes(self.local_nodes + remote_nodes[:-1]) -+ cfg.raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ exists=False, -+ ) -+ cfg.corosync_conf.load_content(corosync_conf_fixture(self.local_nodes)) -+ cfg.http.corosync.get_corosync_conf( -+ corosync_conf_fixture(remote_nodes), node_labels=[orig_node] -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.set_recovery_site(self.env_assist.get_env(), orig_node), -+ ) -+ self.env_assist.assert_reports([ -+ fixture.error( -+ report_codes.HOST_NOT_FOUND, -+ host_list=remote_nodes[-1:], -+ ) -+ ]) -+ -+ -+REASON = "error msg" -+ -+ -+class FailureRemoteCorocyncConf(TestCase): -+ def setUp(self): -+ self.env_assist, self.config = get_env_tools(self) -+ self.local_nodes = generate_nodes(4) -+ self.remote_nodes = generate_nodes(3, prefix="recovery-") -+ self.node = self.remote_nodes[0] -+ -+ self.config.env.set_known_nodes(self.local_nodes + self.remote_nodes) -+ self.config.raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ exists=False, -+ ) -+ self.config.corosync_conf.load_content( -+ corosync_conf_fixture(self.local_nodes) -+ ) -+ -+ def test_network_issue(self): -+ self.config.http.corosync.get_corosync_conf( -+ communication_list=[ -+ dict( -+ label=self.node, -+ was_connected=False, -+ error_msg=REASON, -+ ) -+ ] -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.set_recovery_site(self.env_assist.get_env(), self.node), -+ ) -+ self.env_assist.assert_reports([ -+ fixture.warn( -+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, -+ node=self.node, -+ command="remote/get_corosync_conf", -+ reason=REASON, -+ -+ ), -+ fixture.error(report_codes.UNABLE_TO_PERFORM_OPERATION_ON_ANY_NODE) -+ ]) -+ -+ def test_file_does_not_exist(self): -+ self.config.http.corosync.get_corosync_conf( -+ communication_list=[ -+ dict( -+ label=self.node, -+ response_code=400, -+ output=REASON, -+ ) -+ ] -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.set_recovery_site(self.env_assist.get_env(), self.node), -+ ) -+ self.env_assist.assert_reports([ -+ fixture.warn( -+ report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL, -+ node=self.node, -+ command="remote/get_corosync_conf", -+ reason=REASON, -+ -+ ), -+ fixture.error(report_codes.UNABLE_TO_PERFORM_OPERATION_ON_ANY_NODE) -+ ]) -+ -+ def test_node_names_missing(self): -+ self.config.http.corosync.get_corosync_conf( -+ COROSYNC_CONF_TEMPLATE.format( -+ node_list="\n".join( -+ [ -+ NODE_TEMPLATE_NO_NAME.format( -+ node=self.remote_nodes[-1], -+ id=len(self.remote_nodes), -+ ) -+ ] + node_list_fixture(self.remote_nodes[:-1]) -+ ) -+ ), -+ node_labels=[self.node], -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.set_recovery_site(self.env_assist.get_env(), self.node), -+ ) -+ self.env_assist.assert_reports([ -+ fixture.error( -+ report_codes.COROSYNC_CONFIG_MISSING_NAMES_OF_NODES, -+ fatal=True, -+ ) -+ ]) -+ -+ -+class FailureRemoteDrCfgDistribution(TestCase): -+ # pylint: disable=too-many-instance-attributes -+ def setUp(self): -+ self.env_assist, self.config = get_env_tools(self) -+ self.local_nodes = generate_nodes(4) -+ self.remote_nodes = generate_nodes(3, prefix="recovery-") -+ self.node = self.remote_nodes[0] -+ self.failed_nodes = self.remote_nodes[-1:] -+ successful_nodes = self.remote_nodes[:-1] -+ -+ self.config.env.set_known_nodes(self.local_nodes + self.remote_nodes) -+ self.config.raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ exists=False, -+ ) -+ self.config.corosync_conf.load_content( -+ corosync_conf_fixture(self.local_nodes) -+ ) -+ self.config.http.corosync.get_corosync_conf( -+ corosync_conf_fixture(self.remote_nodes), node_labels=[self.node] -+ ) -+ -+ self.success_communication = [ -+ dict(label=node) for node in successful_nodes -+ ] -+ self.expected_reports = [ -+ fixture.info( -+ report_codes.FILES_DISTRIBUTION_STARTED, -+ file_list=[DR_CFG_DESC], -+ node_list=self.remote_nodes, -+ ) -+ ] + [ -+ fixture.info( -+ report_codes.FILE_DISTRIBUTION_SUCCESS, -+ file_description=DR_CFG_DESC, -+ node=node, -+ ) for node in successful_nodes -+ ] -+ -+ def test_write_failure(self): -+ self.config.http.files.put_files( -+ communication_list=self.success_communication + [ -+ dict( -+ label=node, -+ output=json.dumps(dict(files={ -+ DR_CFG_DESC: dict( -+ code="unexpected", -+ message=REASON -+ ), -+ })) -+ ) for node in self.failed_nodes -+ ], -+ pcs_disaster_recovery_conf=dr_cfg_fixture( -+ DrRole.RECOVERY, DrRole.PRIMARY, self.local_nodes -+ ), -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.set_recovery_site(self.env_assist.get_env(), self.node), -+ ) -+ self.env_assist.assert_reports( -+ self.expected_reports + [ -+ fixture.error( -+ report_codes.FILE_DISTRIBUTION_ERROR, -+ file_description=DR_CFG_DESC, -+ reason=REASON, -+ node=node, -+ ) for node in self.failed_nodes -+ ] -+ ) -+ -+ def test_network_failure(self): -+ self.config.http.files.put_files( -+ communication_list=self.success_communication + [ -+ dict( -+ label=node, -+ was_connected=False, -+ error_msg=REASON, -+ ) for node in self.failed_nodes -+ ], -+ pcs_disaster_recovery_conf=dr_cfg_fixture( -+ DrRole.RECOVERY, DrRole.PRIMARY, self.local_nodes -+ ), -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.set_recovery_site(self.env_assist.get_env(), self.node), -+ ) -+ self.env_assist.assert_reports( -+ self.expected_reports + [ -+ fixture.error( -+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, -+ command="remote/put_file", -+ reason=REASON, -+ node=node, -+ ) for node in self.failed_nodes -+ ] -+ ) -+ -+ def test_communication_error(self): -+ self.config.http.files.put_files( -+ communication_list=self.success_communication + [ -+ dict( -+ label=node, -+ response_code=400, -+ output=REASON, -+ ) for node in self.failed_nodes -+ ], -+ pcs_disaster_recovery_conf=dr_cfg_fixture( -+ DrRole.RECOVERY, DrRole.PRIMARY, self.local_nodes -+ ), -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.set_recovery_site(self.env_assist.get_env(), self.node), -+ ) -+ self.env_assist.assert_reports( -+ self.expected_reports + [ -+ fixture.error( -+ report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL, -+ command="remote/put_file", -+ reason=REASON, -+ node=node, -+ ) for node in self.failed_nodes -+ ] -+ ) -+ -+ -+class FailureLocalDrCfgDistribution(TestCase): -+ # pylint: disable=too-many-instance-attributes -+ def setUp(self): -+ self.env_assist, self.config = get_env_tools(self) -+ local_nodes = generate_nodes(4) -+ self.remote_nodes = generate_nodes(3, prefix="recovery-") -+ self.node = self.remote_nodes[0] -+ self.failed_nodes = local_nodes[-1:] -+ successful_nodes = local_nodes[:-1] -+ -+ self.config.env.set_known_nodes(local_nodes + self.remote_nodes) -+ self.config.raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ exists=False, -+ ) -+ self.config.corosync_conf.load_content( -+ corosync_conf_fixture(local_nodes) -+ ) -+ self.config.http.corosync.get_corosync_conf( -+ corosync_conf_fixture(self.remote_nodes), node_labels=[self.node] -+ ) -+ self.config.http.files.put_files( -+ node_labels=self.remote_nodes, -+ pcs_disaster_recovery_conf=dr_cfg_fixture( -+ DrRole.RECOVERY, DrRole.PRIMARY, local_nodes -+ ), -+ name="distribute_remote", -+ ) -+ -+ self.success_communication = [ -+ dict(label=node) for node in successful_nodes -+ ] -+ self.expected_reports = [ -+ fixture.info( -+ report_codes.FILES_DISTRIBUTION_STARTED, -+ file_list=[DR_CFG_DESC], -+ node_list=self.remote_nodes, -+ ) -+ ] + [ -+ fixture.info( -+ report_codes.FILE_DISTRIBUTION_SUCCESS, -+ file_description=DR_CFG_DESC, -+ node=node, -+ ) for node in self.remote_nodes -+ ] + [ -+ fixture.info( -+ report_codes.FILES_DISTRIBUTION_STARTED, -+ file_list=[DR_CFG_DESC], -+ node_list=local_nodes, -+ ) -+ ] + [ -+ fixture.info( -+ report_codes.FILE_DISTRIBUTION_SUCCESS, -+ file_description=DR_CFG_DESC, -+ node=node, -+ ) for node in successful_nodes -+ ] -+ -+ def test_write_failure(self): -+ self.config.http.files.put_files( -+ communication_list=self.success_communication + [ -+ dict( -+ label=node, -+ output=json.dumps(dict(files={ -+ DR_CFG_DESC: dict( -+ code="unexpected", -+ message=REASON -+ ), -+ })) -+ ) for node in self.failed_nodes -+ ], -+ pcs_disaster_recovery_conf=dr_cfg_fixture( -+ DrRole.PRIMARY, DrRole.RECOVERY, self.remote_nodes -+ ), -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.set_recovery_site(self.env_assist.get_env(), self.node), -+ ) -+ self.env_assist.assert_reports( -+ self.expected_reports + [ -+ fixture.error( -+ report_codes.FILE_DISTRIBUTION_ERROR, -+ file_description=DR_CFG_DESC, -+ reason=REASON, -+ node=node, -+ ) for node in self.failed_nodes -+ ] -+ ) -+ -+ def test_network_failure(self): -+ self.config.http.files.put_files( -+ communication_list=self.success_communication + [ -+ dict( -+ label=node, -+ was_connected=False, -+ error_msg=REASON, -+ ) for node in self.failed_nodes -+ ], -+ pcs_disaster_recovery_conf=dr_cfg_fixture( -+ DrRole.PRIMARY, DrRole.RECOVERY, self.remote_nodes -+ ), -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.set_recovery_site(self.env_assist.get_env(), self.node), -+ ) -+ self.env_assist.assert_reports( -+ self.expected_reports + [ -+ fixture.error( -+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, -+ command="remote/put_file", -+ reason=REASON, -+ node=node, -+ ) for node in self.failed_nodes -+ ] -+ ) -+ -+ def test_communication_error(self): -+ self.config.http.files.put_files( -+ communication_list=self.success_communication + [ -+ dict( -+ label=node, -+ response_code=400, -+ output=REASON, -+ ) for node in self.failed_nodes -+ ], -+ pcs_disaster_recovery_conf=dr_cfg_fixture( -+ DrRole.PRIMARY, DrRole.RECOVERY, self.remote_nodes -+ ), -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.set_recovery_site(self.env_assist.get_env(), self.node), -+ ) -+ self.env_assist.assert_reports( -+ self.expected_reports + [ -+ fixture.error( -+ report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL, -+ command="remote/put_file", -+ reason=REASON, -+ node=node, -+ ) for node in self.failed_nodes -+ ] -+ ) -diff --git a/pcs_test/tier0/lib/commands/dr/test_status.py b/pcs_test/tier0/lib/commands/dr/test_status.py -new file mode 100644 -index 00000000..b46eb757 ---- /dev/null -+++ b/pcs_test/tier0/lib/commands/dr/test_status.py -@@ -0,0 +1,756 @@ -+import json -+import re -+from unittest import TestCase -+ -+from pcs import settings -+from pcs.common import ( -+ file_type_codes, -+ report_codes, -+) -+from pcs.common.dr import DrRole -+from pcs.common.file import RawFileError -+from pcs.lib.commands import dr -+ -+from pcs_test.tools.command_env import get_env_tools -+from pcs_test.tools import fixture -+ -+ -+REASON = "error msg" -+ -+class CheckLive(TestCase): -+ def setUp(self): -+ self.env_assist, self.config = get_env_tools(self) -+ -+ def assert_live_required(self, forbidden_options): -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.status_all_sites_plaintext(self.env_assist.get_env()), -+ [ -+ fixture.error( -+ report_codes.LIVE_ENVIRONMENT_REQUIRED, -+ forbidden_options=forbidden_options -+ ) -+ ], -+ expected_in_processor=False -+ ) -+ -+ def test_mock_corosync(self): -+ self.config.env.set_corosync_conf_data("corosync conf") -+ self.assert_live_required([file_type_codes.COROSYNC_CONF]) -+ -+ def test_mock_cib(self): -+ self.config.env.set_cib_data("") -+ self.assert_live_required([file_type_codes.CIB]) -+ -+ def test_mock(self): -+ self.config.env.set_corosync_conf_data("corosync conf") -+ self.config.env.set_cib_data("") -+ self.assert_live_required([ -+ file_type_codes.CIB, -+ file_type_codes.COROSYNC_CONF, -+ ]) -+ -+class FixtureMixin(): -+ def _set_up(self, local_node_count=2): -+ self.local_node_name_list = [ -+ f"node{i}" for i in range(1, local_node_count + 1) -+ ] -+ self.remote_node_name_list = ["recovery-node"] -+ self.config.env.set_known_nodes( -+ self.local_node_name_list + self.remote_node_name_list -+ ) -+ self.local_status = "local cluster\nstatus\n" -+ self.remote_status = "remote cluster\nstatus\n" -+ -+ def _fixture_load_configs(self): -+ (self.config -+ .raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ ) -+ .raw_file.read( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ content=""" -+ { -+ "local": { -+ "role": "PRIMARY" -+ }, -+ "remote_sites": [ -+ { -+ "nodes": [ -+ { -+ "name": "recovery-node" -+ } -+ ], -+ "role": "RECOVERY" -+ } -+ ] -+ } -+ """, -+ ) -+ .corosync_conf.load(node_name_list=self.local_node_name_list) -+ ) -+ -+ def _fixture_result(self, local_success=True, remote_success=True): -+ return [ -+ { -+ "local_site": True, -+ "site_role": DrRole.PRIMARY, -+ "status_plaintext": self.local_status if local_success else "", -+ "status_successfully_obtained": local_success, -+ }, -+ { -+ "local_site": False, -+ "site_role": DrRole.RECOVERY, -+ "status_plaintext": ( -+ self.remote_status if remote_success else "" -+ ), -+ "status_successfully_obtained": remote_success, -+ } -+ ] -+ -+class Success(FixtureMixin, TestCase): -+ def setUp(self): -+ self.env_assist, self.config = get_env_tools(self) -+ self._set_up() -+ -+ def _assert_success(self, hide_inactive_resources, verbose): -+ self._fixture_load_configs() -+ (self.config -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.local", -+ node_labels=self.local_node_name_list[:1], -+ hide_inactive_resources=hide_inactive_resources, -+ verbose=verbose, -+ cluster_status_plaintext=self.local_status, -+ ) -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.remote", -+ node_labels=self.remote_node_name_list[:1], -+ hide_inactive_resources=hide_inactive_resources, -+ verbose=verbose, -+ cluster_status_plaintext=self.remote_status, -+ ) -+ ) -+ result = dr.status_all_sites_plaintext( -+ self.env_assist.get_env(), -+ hide_inactive_resources=hide_inactive_resources, -+ verbose=verbose, -+ ) -+ self.assertEqual(result, self._fixture_result()) -+ -+ def test_success_minimal(self): -+ self._assert_success(False, False) -+ -+ def test_success_full(self): -+ self._assert_success(False, True) -+ -+ def test_success_hide_inactive(self): -+ self._assert_success(True, False) -+ -+ def test_success_all_flags(self): -+ self._assert_success(True, True) -+ -+ def test_local_not_running_first_node(self): -+ self._fixture_load_configs() -+ (self.config -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.local", -+ cluster_status_plaintext=self.local_status, -+ communication_list=[ -+ [dict( -+ label=self.local_node_name_list[0], -+ output=json.dumps(dict( -+ status="error", -+ status_msg="", -+ data=None, -+ report_list=[ -+ { -+ "severity": "ERROR", -+ "code": "CRM_MON_ERROR", -+ "info": { -+ "reason": REASON, -+ }, -+ "forceable": None, -+ "report_text": "translated report", -+ } -+ ] -+ )), -+ )], -+ [dict( -+ label=self.local_node_name_list[1], -+ )], -+ ] -+ ) -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.remote", -+ node_labels=self.remote_node_name_list[:1], -+ cluster_status_plaintext=self.remote_status, -+ ) -+ ) -+ result = dr.status_all_sites_plaintext(self.env_assist.get_env()) -+ self.assertEqual(result, self._fixture_result()) -+ self.env_assist.assert_reports([ -+ fixture.error( -+ report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL, -+ node=self.local_node_name_list[0], -+ command="remote/cluster_status_plaintext", -+ reason="translated report", -+ ), -+ ]) -+ -+ def test_local_not_running(self): -+ self._fixture_load_configs() -+ (self.config -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.local", -+ cmd_status="error", -+ cmd_status_msg="", -+ cluster_status_plaintext="", -+ report_list=[ -+ { -+ "severity": "ERROR", -+ "code": "CRM_MON_ERROR", -+ "info": { -+ "reason": REASON, -+ }, -+ "forceable": None, -+ "report_text": "translated report", -+ } -+ ], -+ communication_list=[ -+ [dict( -+ label=self.local_node_name_list[0], -+ )], -+ [dict( -+ label=self.local_node_name_list[1], -+ )], -+ ] -+ ) -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.remote", -+ node_labels=self.remote_node_name_list[:1], -+ cluster_status_plaintext=self.remote_status, -+ ) -+ ) -+ result = dr.status_all_sites_plaintext(self.env_assist.get_env()) -+ self.assertEqual(result, self._fixture_result(local_success=False)) -+ self.env_assist.assert_reports( -+ [ -+ fixture.error( -+ report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL, -+ node=node, -+ command="remote/cluster_status_plaintext", -+ reason="translated report", -+ ) -+ for node in self.local_node_name_list -+ ] -+ ) -+ -+ def test_remote_not_running(self): -+ self._fixture_load_configs() -+ (self.config -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.local", -+ node_labels=self.local_node_name_list[:1], -+ cluster_status_plaintext=self.local_status, -+ ) -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.remote", -+ node_labels=self.remote_node_name_list[:1], -+ cmd_status="error", -+ cmd_status_msg="", -+ cluster_status_plaintext="", -+ report_list=[ -+ { -+ "severity": "ERROR", -+ "code": "CRM_MON_ERROR", -+ "info": { -+ "reason": REASON, -+ }, -+ "forceable": None, -+ "report_text": "translated report", -+ } -+ ], -+ ) -+ ) -+ result = dr.status_all_sites_plaintext(self.env_assist.get_env()) -+ self.assertEqual(result, self._fixture_result(remote_success=False)) -+ self.env_assist.assert_reports( -+ [ -+ fixture.error( -+ report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL, -+ node=node, -+ command="remote/cluster_status_plaintext", -+ reason="translated report", -+ ) -+ for node in self.remote_node_name_list -+ ] -+ ) -+ -+ def test_both_not_running(self): -+ self._fixture_load_configs() -+ (self.config -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.local", -+ cmd_status="error", -+ cmd_status_msg="", -+ cluster_status_plaintext="", -+ report_list=[ -+ { -+ "severity": "ERROR", -+ "code": "CRM_MON_ERROR", -+ "info": { -+ "reason": REASON, -+ }, -+ "forceable": None, -+ "report_text": "translated report", -+ } -+ ], -+ communication_list=[ -+ [dict( -+ label=self.local_node_name_list[0], -+ )], -+ [dict( -+ label=self.local_node_name_list[1], -+ )], -+ ] -+ ) -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.remote", -+ node_labels=self.remote_node_name_list[:1], -+ cmd_status="error", -+ cmd_status_msg="", -+ cluster_status_plaintext="", -+ report_list=[ -+ { -+ "severity": "ERROR", -+ "code": "CRM_MON_ERROR", -+ "info": { -+ "reason": REASON, -+ }, -+ "forceable": None, -+ "report_text": "translated report", -+ } -+ ], -+ ) -+ ) -+ result = dr.status_all_sites_plaintext(self.env_assist.get_env()) -+ self.assertEqual(result, self._fixture_result( -+ local_success=False, remote_success=False -+ )) -+ self.env_assist.assert_reports( -+ [ -+ fixture.error( -+ report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL, -+ node=node, -+ command="remote/cluster_status_plaintext", -+ reason="translated report", -+ ) -+ for node in ( -+ self.local_node_name_list + self.remote_node_name_list -+ ) -+ ] -+ ) -+ -+ -+class CommunicationIssue(FixtureMixin, TestCase): -+ def setUp(self): -+ self.env_assist, self.config = get_env_tools(self) -+ self._set_up() -+ -+ def test_unknown_node(self): -+ self.config.env.set_known_nodes( -+ self.local_node_name_list[1:] + self.remote_node_name_list -+ ) -+ self._fixture_load_configs() -+ (self.config -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.local", -+ node_labels=self.local_node_name_list[1:], -+ cluster_status_plaintext=self.local_status, -+ ) -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.remote", -+ node_labels=self.remote_node_name_list[:1], -+ cluster_status_plaintext=self.remote_status, -+ ) -+ ) -+ result = dr.status_all_sites_plaintext(self.env_assist.get_env()) -+ self.assertEqual(result, self._fixture_result()) -+ self.env_assist.assert_reports([ -+ fixture.warn( -+ report_codes.HOST_NOT_FOUND, -+ host_list=["node1"], -+ ), -+ ]) -+ -+ def test_unknown_all_nodes_in_site(self): -+ self.config.env.set_known_nodes( -+ self.local_node_name_list -+ ) -+ self._fixture_load_configs() -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.status_all_sites_plaintext(self.env_assist.get_env()), -+ ) -+ self.env_assist.assert_reports([ -+ fixture.warn( -+ report_codes.HOST_NOT_FOUND, -+ host_list=self.remote_node_name_list, -+ ), -+ fixture.error( -+ report_codes.NONE_HOST_FOUND, -+ ), -+ ]) -+ -+ def test_missing_node_names(self): -+ self._fixture_load_configs() -+ coro_call = self.config.calls.get("corosync_conf.load") -+ (self.config -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.local", -+ node_labels=[], -+ ) -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.remote", -+ node_labels=self.remote_node_name_list[:1], -+ cluster_status_plaintext=self.remote_status, -+ ) -+ ) -+ coro_call.content = re.sub(r"name: node\d", "", coro_call.content) -+ result = dr.status_all_sites_plaintext(self.env_assist.get_env()) -+ self.assertEqual(result, self._fixture_result(local_success=False)) -+ self.env_assist.assert_reports([ -+ fixture.warn( -+ report_codes.COROSYNC_CONFIG_MISSING_NAMES_OF_NODES, -+ fatal=False, -+ ), -+ ]) -+ -+ def test_node_issues(self): -+ self._set_up(local_node_count=7) -+ self._fixture_load_configs() -+ (self.config -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.local", -+ cluster_status_plaintext=self.local_status, -+ communication_list=[ -+ [dict( -+ label=self.local_node_name_list[0], -+ was_connected=False, -+ )], -+ [dict( -+ label=self.local_node_name_list[1], -+ response_code=401, -+ )], -+ [dict( -+ label=self.local_node_name_list[2], -+ response_code=500, -+ )], -+ [dict( -+ label=self.local_node_name_list[3], -+ response_code=404, -+ )], -+ [dict( -+ label=self.local_node_name_list[4], -+ output="invalid data", -+ )], -+ [dict( -+ label=self.local_node_name_list[5], -+ output=json.dumps(dict(status="success")) -+ )], -+ [dict( -+ label=self.local_node_name_list[6], -+ )], -+ ] -+ ) -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.remote", -+ node_labels=self.remote_node_name_list[:1], -+ cluster_status_plaintext=self.remote_status, -+ ) -+ ) -+ result = dr.status_all_sites_plaintext(self.env_assist.get_env()) -+ self.assertEqual(result, self._fixture_result()) -+ self.env_assist.assert_reports([ -+ fixture.warn( -+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, -+ command="remote/cluster_status_plaintext", -+ node="node1", -+ reason=None, -+ ), -+ fixture.warn( -+ report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED, -+ command="remote/cluster_status_plaintext", -+ node="node2", -+ reason="HTTP error: 401", -+ ), -+ fixture.warn( -+ report_codes.NODE_COMMUNICATION_ERROR, -+ command="remote/cluster_status_plaintext", -+ node="node3", -+ reason="HTTP error: 500", -+ ), -+ fixture.warn( -+ report_codes.NODE_COMMUNICATION_ERROR_UNSUPPORTED_COMMAND, -+ command="remote/cluster_status_plaintext", -+ node="node4", -+ reason="HTTP error: 404", -+ ), -+ fixture.warn( -+ report_codes.INVALID_RESPONSE_FORMAT, -+ node="node5", -+ ), -+ fixture.warn( -+ report_codes.INVALID_RESPONSE_FORMAT, -+ node="node6", -+ ), -+ ]) -+ -+ def test_local_site_down(self): -+ self._fixture_load_configs() -+ (self.config -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.local", -+ cluster_status_plaintext=self.local_status, -+ communication_list=[ -+ [dict( -+ label=self.local_node_name_list[0], -+ was_connected=False, -+ )], -+ [dict( -+ label=self.local_node_name_list[1], -+ was_connected=False, -+ )], -+ ] -+ ) -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.remote", -+ node_labels=self.remote_node_name_list[:1], -+ cluster_status_plaintext=self.remote_status, -+ ) -+ ) -+ result = dr.status_all_sites_plaintext(self.env_assist.get_env()) -+ self.assertEqual(result, self._fixture_result(local_success=False)) -+ self.env_assist.assert_reports([ -+ fixture.warn( -+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, -+ command="remote/cluster_status_plaintext", -+ node="node1", -+ reason=None, -+ ), -+ fixture.warn( -+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, -+ command="remote/cluster_status_plaintext", -+ node="node2", -+ reason=None, -+ ), -+ ]) -+ -+ def test_remote_site_down(self): -+ self._fixture_load_configs() -+ (self.config -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.local", -+ node_labels=self.local_node_name_list[:1], -+ cluster_status_plaintext=self.local_status, -+ ) -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.remote", -+ cluster_status_plaintext=self.remote_status, -+ communication_list=[ -+ [dict( -+ label=self.remote_node_name_list[0], -+ was_connected=False, -+ )], -+ ] -+ ) -+ ) -+ result = dr.status_all_sites_plaintext(self.env_assist.get_env()) -+ self.assertEqual(result, self._fixture_result(remote_success=False)) -+ self.env_assist.assert_reports([ -+ fixture.warn( -+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, -+ command="remote/cluster_status_plaintext", -+ node="recovery-node", -+ reason=None, -+ ), -+ ]) -+ -+ def test_both_sites_down(self): -+ self._fixture_load_configs() -+ (self.config -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.local", -+ cluster_status_plaintext=self.local_status, -+ communication_list=[ -+ [dict( -+ label=self.local_node_name_list[0], -+ was_connected=False, -+ )], -+ [dict( -+ label=self.local_node_name_list[1], -+ was_connected=False, -+ )], -+ ] -+ ) -+ .http.status.get_full_cluster_status_plaintext( -+ name="http.status.get_full_cluster_status_plaintext.remote", -+ cluster_status_plaintext=self.remote_status, -+ communication_list=[ -+ [dict( -+ label=self.remote_node_name_list[0], -+ was_connected=False, -+ )], -+ ] -+ ) -+ ) -+ result = dr.status_all_sites_plaintext(self.env_assist.get_env()) -+ self.assertEqual( -+ result, -+ self._fixture_result(local_success=False, remote_success=False) -+ ) -+ self.env_assist.assert_reports([ -+ fixture.warn( -+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, -+ command="remote/cluster_status_plaintext", -+ node="node1", -+ reason=None, -+ ), -+ fixture.warn( -+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, -+ command="remote/cluster_status_plaintext", -+ node="node2", -+ reason=None, -+ ), -+ fixture.warn( -+ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, -+ command="remote/cluster_status_plaintext", -+ node="recovery-node", -+ reason=None, -+ ), -+ ]) -+ -+ -+class FatalConfigIssue(TestCase): -+ def setUp(self): -+ self.env_assist, self.config = get_env_tools(self) -+ -+ def test_config_missing(self): -+ (self.config -+ .raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ exists=False, -+ ) -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.status_all_sites_plaintext(self.env_assist.get_env()), -+ ) -+ self.env_assist.assert_reports([ -+ fixture.error( -+ report_codes.DR_CONFIG_DOES_NOT_EXIST, -+ ), -+ ]) -+ -+ def test_config_read_error(self): -+ (self.config -+ .raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ ) -+ .raw_file.read( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ exception_msg=REASON, -+ ) -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.status_all_sites_plaintext(self.env_assist.get_env()), -+ ) -+ self.env_assist.assert_reports([ -+ fixture.error( -+ report_codes.FILE_IO_ERROR, -+ file_type_code=file_type_codes.PCS_DR_CONFIG, -+ file_path=settings.pcsd_dr_config_location, -+ operation=RawFileError.ACTION_READ, -+ reason=REASON, -+ ), -+ ]) -+ -+ def test_config_parse_error(self): -+ (self.config -+ .raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ ) -+ .raw_file.read( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ content="bad content", -+ ) -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.status_all_sites_plaintext(self.env_assist.get_env()), -+ ) -+ self.env_assist.assert_reports([ -+ fixture.error( -+ report_codes.PARSE_ERROR_JSON_FILE, -+ file_type_code=file_type_codes.PCS_DR_CONFIG, -+ file_path=settings.pcsd_dr_config_location, -+ line_number=1, -+ column_number=1, -+ position=0, -+ reason="Expecting value", -+ full_msg="Expecting value: line 1 column 1 (char 0)", -+ ), -+ ]) -+ -+ def test_corosync_conf_read_error(self): -+ (self.config -+ .raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ ) -+ .raw_file.read( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ content="{}", -+ ) -+ .corosync_conf.load_content("", exception_msg=REASON) -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.status_all_sites_plaintext(self.env_assist.get_env()), -+ [ -+ fixture.error( -+ report_codes.UNABLE_TO_READ_COROSYNC_CONFIG, -+ path=settings.corosync_conf_file, -+ reason=REASON, -+ ), -+ ], -+ expected_in_processor=False -+ ) -+ -+ def test_corosync_conf_parse_error(self): -+ (self.config -+ .raw_file.exists( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ ) -+ .raw_file.read( -+ file_type_codes.PCS_DR_CONFIG, -+ settings.pcsd_dr_config_location, -+ content="{}", -+ ) -+ .corosync_conf.load_content("wrong {\n corosync") -+ ) -+ self.env_assist.assert_raise_library_error( -+ lambda: dr.status_all_sites_plaintext(self.env_assist.get_env()), -+ [ -+ fixture.error( -+ report_codes -+ .PARSE_ERROR_COROSYNC_CONF_LINE_IS_NOT_SECTION_NOR_KEY_VALUE -+ ), -+ ], -+ expected_in_processor=False -+ ) -diff --git a/pcs_test/tier0/lib/communication/test_status.py b/pcs_test/tier0/lib/communication/test_status.py -new file mode 100644 -index 00000000..b8db7a73 ---- /dev/null -+++ b/pcs_test/tier0/lib/communication/test_status.py -@@ -0,0 +1,7 @@ -+from unittest import TestCase -+ -+class GetFullClusterStatusPlaintext(TestCase): -+ """ -+ tested in: -+ pcs_test.tier0.lib.commands.dr.test_status -+ """ -diff --git a/pcs_test/tier0/lib/dr/__init__.py b/pcs_test/tier0/lib/dr/__init__.py -new file mode 100644 -index 00000000..e69de29b -diff --git a/pcs_test/tier0/lib/dr/test_facade.py b/pcs_test/tier0/lib/dr/test_facade.py -new file mode 100644 -index 00000000..baa17b1e ---- /dev/null -+++ b/pcs_test/tier0/lib/dr/test_facade.py -@@ -0,0 +1,138 @@ -+from unittest import TestCase -+ -+from pcs.common.dr import DrRole -+from pcs.lib.dr.config import facade -+ -+ -+class Facade(TestCase): -+ def test_create(self): -+ for role in DrRole: -+ with self.subTest(local_role=role.value): -+ self.assertEqual( -+ dict( -+ local=dict( -+ role=role.value, -+ ), -+ remote_sites=[], -+ ), -+ facade.Facade.create(role).config, -+ ) -+ -+ def test_local_role(self): -+ for role in DrRole: -+ with self.subTest(local_role=role.value): -+ cfg = facade.Facade({ -+ "local": { -+ "role": role.value, -+ }, -+ "remote_sites": [ -+ ], -+ }) -+ self.assertEqual(cfg.local_role, role) -+ -+ def test_add_site(self): -+ node_list = [f"node{i}" for i in range(4)] -+ cfg = facade.Facade.create(DrRole.PRIMARY) -+ cfg.add_site(DrRole.RECOVERY, node_list) -+ self.assertEqual( -+ dict( -+ local=dict( -+ role=DrRole.PRIMARY.value, -+ ), -+ remote_sites=[ -+ dict( -+ role=DrRole.RECOVERY.value, -+ nodes=[dict(name=node) for node in node_list], -+ ), -+ ] -+ ), -+ cfg.config -+ ) -+ -+class GetRemoteSiteList(TestCase): -+ def test_no_sites(self): -+ cfg = facade.Facade({ -+ "local": { -+ "role": DrRole.PRIMARY.value, -+ }, -+ "remote_sites": [ -+ ], -+ }) -+ self.assertEqual( -+ cfg.get_remote_site_list(), -+ [] -+ ) -+ -+ def test_one_site(self): -+ cfg = facade.Facade({ -+ "local": { -+ "role": DrRole.PRIMARY.value, -+ }, -+ "remote_sites": [ -+ { -+ "role": DrRole.RECOVERY.value, -+ "nodes": [ -+ {"name": "node1"}, -+ ], -+ }, -+ ], -+ }) -+ self.assertEqual( -+ cfg.get_remote_site_list(), -+ [ -+ facade.DrSite(role=DrRole.RECOVERY, node_name_list=["node1"]), -+ ] -+ ) -+ -+ def test_more_sites(self): -+ cfg = facade.Facade({ -+ "local": { -+ "role": DrRole.RECOVERY.value, -+ }, -+ "remote_sites": [ -+ { -+ "role": DrRole.PRIMARY.value, -+ "nodes": [ -+ {"name": "nodeA1"}, -+ {"name": "nodeA2"}, -+ ], -+ }, -+ { -+ "role": DrRole.RECOVERY.value, -+ "nodes": [ -+ {"name": "nodeB1"}, -+ {"name": "nodeB2"}, -+ ], -+ }, -+ ], -+ }) -+ self.assertEqual( -+ cfg.get_remote_site_list(), -+ [ -+ facade.DrSite( -+ role=DrRole.PRIMARY, node_name_list=["nodeA1", "nodeA2"] -+ ), -+ facade.DrSite( -+ role=DrRole.RECOVERY, node_name_list=["nodeB1", "nodeB2"] -+ ), -+ ] -+ ) -+ -+ def test_no_nodes(self): -+ cfg = facade.Facade({ -+ "local": { -+ "role": DrRole.PRIMARY.value, -+ }, -+ "remote_sites": [ -+ { -+ "role": DrRole.RECOVERY.value, -+ "nodes": [], -+ }, -+ ], -+ }) -+ self.assertEqual( -+ cfg.get_remote_site_list(), -+ [ -+ facade.DrSite(role=DrRole.RECOVERY, node_name_list=[]), -+ ] -+ ) -diff --git a/pcs_test/tier0/lib/test_env.py b/pcs_test/tier0/lib/test_env.py -index edab9dc6..5c1c6a39 100644 ---- a/pcs_test/tier0/lib/test_env.py -+++ b/pcs_test/tier0/lib/test_env.py -@@ -9,7 +9,7 @@ from pcs_test.tools.misc import ( - get_test_resource as rc, - ) - --from pcs.common import report_codes -+from pcs.common import file_type_codes, report_codes - from pcs.lib.env import LibraryEnvironment - from pcs.lib.errors import ReportItemSeverity as severity - -@@ -57,6 +57,46 @@ class LibraryEnvironmentTest(TestCase): - env = LibraryEnvironment(self.mock_logger, self.mock_reporter) - self.assertEqual([], env.user_groups) - -+class GhostFileCodes(TestCase): -+ def setUp(self): -+ self.mock_logger = mock.MagicMock(logging.Logger) -+ self.mock_reporter = MockLibraryReportProcessor() -+ -+ def _fixture_get_env(self, cib_data=None, corosync_conf_data=None): -+ return LibraryEnvironment( -+ self.mock_logger, -+ self.mock_reporter, -+ cib_data=cib_data, -+ corosync_conf_data=corosync_conf_data -+ ) -+ -+ def test_nothing(self): -+ self.assertEqual( -+ self._fixture_get_env().ghost_file_codes, -+ set() -+ ) -+ -+ def test_corosync(self): -+ self.assertEqual( -+ self._fixture_get_env(corosync_conf_data="x").ghost_file_codes, -+ set([file_type_codes.COROSYNC_CONF]) -+ ) -+ -+ def test_cib(self): -+ self.assertEqual( -+ self._fixture_get_env(cib_data="x").ghost_file_codes, -+ set([file_type_codes.CIB]) -+ ) -+ -+ def test_all(self): -+ self.assertEqual( -+ self._fixture_get_env( -+ cib_data="x", -+ corosync_conf_data="x", -+ ).ghost_file_codes, -+ set([file_type_codes.COROSYNC_CONF, file_type_codes.CIB]) -+ ) -+ - @patch_env("CommandRunner") - class CmdRunner(TestCase): - def setUp(self): -diff --git a/pcs_test/tools/command_env/config_corosync_conf.py b/pcs_test/tools/command_env/config_corosync_conf.py -index 3db57cee..a0bd9f33 100644 ---- a/pcs_test/tools/command_env/config_corosync_conf.py -+++ b/pcs_test/tools/command_env/config_corosync_conf.py -@@ -9,9 +9,14 @@ class CorosyncConf: - self.__calls = call_collection - - def load_content( -- self, content, name="corosync_conf.load_content", instead=None -+ self, content, name="corosync_conf.load_content", instead=None, -+ exception_msg=None - ): -- self.__calls.place(name, Call(content), instead=instead) -+ self.__calls.place( -+ name, -+ Call(content, exception_msg=exception_msg), -+ instead=instead -+ ) - - def load( - self, node_name_list=None, name="corosync_conf.load", -diff --git a/pcs_test/tools/command_env/config_http.py b/pcs_test/tools/command_env/config_http.py -index 6827c2b1..911a82df 100644 ---- a/pcs_test/tools/command_env/config_http.py -+++ b/pcs_test/tools/command_env/config_http.py -@@ -7,6 +7,7 @@ from pcs_test.tools.command_env.config_http_files import FilesShortcuts - from pcs_test.tools.command_env.config_http_host import HostShortcuts - from pcs_test.tools.command_env.config_http_pcmk import PcmkShortcuts - from pcs_test.tools.command_env.config_http_sbd import SbdShortcuts -+from pcs_test.tools.command_env.config_http_status import StatusShortcuts - from pcs_test.tools.command_env.mock_node_communicator import( - place_communication, - place_requests, -@@ -34,6 +35,7 @@ def _mutual_exclusive(param_names, **kwargs): - - - class HttpConfig: -+ # pylint: disable=too-many-instance-attributes - def __init__(self, call_collection, wrap_helper): - self.__calls = call_collection - -@@ -43,6 +45,7 @@ class HttpConfig: - self.host = wrap_helper(HostShortcuts(self.__calls)) - self.pcmk = wrap_helper(PcmkShortcuts(self.__calls)) - self.sbd = wrap_helper(SbdShortcuts(self.__calls)) -+ self.status = wrap_helper(StatusShortcuts(self.__calls)) - - def add_communication(self, name, communication_list, **kwargs): - """ -diff --git a/pcs_test/tools/command_env/config_http_corosync.py b/pcs_test/tools/command_env/config_http_corosync.py -index f7df73c1..3d89e649 100644 ---- a/pcs_test/tools/command_env/config_http_corosync.py -+++ b/pcs_test/tools/command_env/config_http_corosync.py -@@ -29,6 +29,30 @@ class CorosyncShortcuts: - output='{"corosync":false}' - ) - -+ def get_corosync_conf( -+ self, -+ corosync_conf="", -+ node_labels=None, -+ communication_list=None, -+ name="http.corosync.get_corosync_conf", -+ ): -+ """ -+ Create a call for loading corosync.conf text from remote nodes -+ -+ string corosync_conf -- corosync.conf text to be loaded -+ list node_labels -- create success responses from these nodes -+ list communication_list -- create custom responses -+ string name -- the key of this call -+ """ -+ place_multinode_call( -+ self.__calls, -+ name, -+ node_labels, -+ communication_list, -+ action="remote/get_corosync_conf", -+ output=corosync_conf, -+ ) -+ - def set_corosync_conf( - self, corosync_conf, node_labels=None, communication_list=None, - name="http.corosync.set_corosync_conf" -diff --git a/pcs_test/tools/command_env/config_http_files.py b/pcs_test/tools/command_env/config_http_files.py -index 8cc9b878..b4e93d64 100644 ---- a/pcs_test/tools/command_env/config_http_files.py -+++ b/pcs_test/tools/command_env/config_http_files.py -@@ -11,9 +11,11 @@ class FilesShortcuts: - - def put_files( - self, node_labels=None, pcmk_authkey=None, corosync_authkey=None, -- corosync_conf=None, pcs_settings_conf=None, communication_list=None, -+ corosync_conf=None, pcs_disaster_recovery_conf=None, -+ pcs_settings_conf=None, communication_list=None, - name="http.files.put_files", - ): -+ # pylint: disable=too-many-arguments - """ - Create a call for the files distribution to the nodes. - -@@ -21,6 +23,7 @@ class FilesShortcuts: - pcmk_authkey bytes -- content of pacemaker authkey file - corosync_authkey bytes -- content of corosync authkey file - corosync_conf string -- content of corosync.conf -+ pcs_disaster_recovery_conf string -- content of pcs DR config - pcs_settings_conf string -- content of pcs_settings.conf - communication_list list -- create custom responses - name string -- the key of this call -@@ -58,6 +61,17 @@ class FilesShortcuts: - ) - output_data[file_id] = written_output_dict - -+ if pcs_disaster_recovery_conf: -+ file_id = "disaster-recovery config" -+ input_data[file_id] = dict( -+ data=base64.b64encode( -+ pcs_disaster_recovery_conf -+ ).decode("utf-8"), -+ type="pcs_disaster_recovery_conf", -+ rewrite_existing=True, -+ ) -+ output_data[file_id] = written_output_dict -+ - if pcs_settings_conf: - file_id = "pcs_settings.conf" - input_data[file_id] = dict( -@@ -78,7 +92,8 @@ class FilesShortcuts: - ) - - def remove_files( -- self, node_labels=None, pcsd_settings=False, communication_list=None, -+ self, node_labels=None, pcsd_settings=False, -+ pcs_disaster_recovery_conf=False, communication_list=None, - name="http.files.remove_files" - ): - """ -@@ -86,6 +101,7 @@ class FilesShortcuts: - - node_labels list -- create success responses from these nodes - pcsd_settings bool -- if True, remove file pcsd_settings -+ pcs_disaster_recovery_conf bool -- if True, remove pcs DR config - communication_list list -- create custom responses - name string -- the key of this call - """ -@@ -100,6 +116,14 @@ class FilesShortcuts: - message="", - ) - -+ if pcs_disaster_recovery_conf: -+ file_id = "pcs disaster-recovery config" -+ input_data[file_id] = dict(type="pcs_disaster_recovery_conf") -+ output_data[file_id] = dict( -+ code="deleted", -+ message="", -+ ) -+ - place_multinode_call( - self.__calls, - name, -diff --git a/pcs_test/tools/command_env/config_http_status.py b/pcs_test/tools/command_env/config_http_status.py -new file mode 100644 -index 00000000..888b27bb ---- /dev/null -+++ b/pcs_test/tools/command_env/config_http_status.py -@@ -0,0 +1,52 @@ -+import json -+ -+from pcs_test.tools.command_env.mock_node_communicator import ( -+ place_multinode_call, -+) -+ -+class StatusShortcuts: -+ def __init__(self, calls): -+ self.__calls = calls -+ -+ def get_full_cluster_status_plaintext( -+ self, node_labels=None, communication_list=None, -+ name="http.status.get_full_cluster_status_plaintext", -+ hide_inactive_resources=False, verbose=False, -+ cmd_status="success", cmd_status_msg="", report_list=None, -+ cluster_status_plaintext="", -+ ): -+ # pylint: disable=too-many-arguments -+ """ -+ Create a call for getting cluster status in plaintext -+ -+ node_labels list -- create success responses from these nodes -+ communication_list list -- create custom responses -+ name string -- the key of this call -+ bool hide_inactive_resources -- input flag -+ bool verbose -- input flag -+ string cmd_status -- did the command succeed? -+ string_cmd_status_msg -- details for cmd_status -+ iterable report_list -- reports from a remote node -+ string cluster_status_plaintext -- resulting cluster status -+ """ -+ report_list = report_list or [] -+ place_multinode_call( -+ self.__calls, -+ name, -+ node_labels, -+ communication_list, -+ action="remote/cluster_status_plaintext", -+ param_list=[( -+ "data_json", -+ json.dumps(dict( -+ hide_inactive_resources=hide_inactive_resources, -+ verbose=verbose, -+ )) -+ )], -+ output=json.dumps(dict( -+ status=cmd_status, -+ status_msg=cmd_status_msg, -+ data=cluster_status_plaintext, -+ report_list=report_list, -+ )), -+ ) -diff --git a/pcs_test/tools/command_env/mock_get_local_corosync_conf.py b/pcs_test/tools/command_env/mock_get_local_corosync_conf.py -index 854cb8f0..01eca5f1 100644 ---- a/pcs_test/tools/command_env/mock_get_local_corosync_conf.py -+++ b/pcs_test/tools/command_env/mock_get_local_corosync_conf.py -@@ -1,10 +1,15 @@ -+from pcs import settings -+from pcs.lib import reports -+from pcs.lib.errors import LibraryError -+ - CALL_TYPE_GET_LOCAL_COROSYNC_CONF = "CALL_TYPE_GET_LOCAL_COROSYNC_CONF" - - class Call: - type = CALL_TYPE_GET_LOCAL_COROSYNC_CONF - -- def __init__(self, content): -+ def __init__(self, content, exception_msg=None): - self.content = content -+ self.exception_msg = exception_msg - - def __repr__(self): - return str("") -@@ -13,5 +18,10 @@ class Call: - def get_get_local_corosync_conf(call_queue): - def get_local_corosync_conf(): - _, expected_call = call_queue.take(CALL_TYPE_GET_LOCAL_COROSYNC_CONF) -+ if expected_call.exception_msg: -+ raise LibraryError(reports.corosync_config_read_error( -+ settings.corosync_conf_file, -+ expected_call.exception_msg, -+ )) - return expected_call.content - return get_local_corosync_conf -diff --git a/pcsd/capabilities.xml b/pcsd/capabilities.xml -index f9a76a22..1adb57ce 100644 ---- a/pcsd/capabilities.xml -+++ b/pcsd/capabilities.xml -@@ -1696,6 +1696,18 @@ - - - -+ -+ -+ Configure disaster-recovery with the local cluster as the primary site -+ and one recovery site. Display local disaster-recovery config. Display -+ status of all sites. Remove disaster-recovery config. -+ -+ pcs commands: dr config, dr destroy, dr set-recovery-site, dr status -+ -+ -+ -+ -+ - - - Describe a resource agent - present its metadata. -diff --git a/pcsd/pcsd_file.rb b/pcsd/pcsd_file.rb -index 486b764d..d82b55d2 100644 ---- a/pcsd/pcsd_file.rb -+++ b/pcsd/pcsd_file.rb -@@ -198,6 +198,20 @@ module PcsdFile - end - end - -+ class PutPcsDrConf < PutFile -+ def full_file_name -+ @full_file_name ||= PCSD_DR_CONFIG_LOCATION -+ end -+ -+ def binary?() -+ return true -+ end -+ -+ def permissions() -+ return 0600 -+ end -+ end -+ - TYPES = { - "booth_authfile" => PutFileBoothAuthfile, - "booth_config" => PutFileBoothConfig, -@@ -205,6 +219,7 @@ module PcsdFile - "corosync_authkey" => PutFileCorosyncAuthkey, - "corosync_conf" => PutFileCorosyncConf, - "pcs_settings_conf" => PutPcsSettingsConf, -+ "pcs_disaster_recovery_conf" => PutPcsDrConf, - } - end - -diff --git a/pcsd/pcsd_remove_file.rb b/pcsd/pcsd_remove_file.rb -index 1038402d..ffaed8e3 100644 ---- a/pcsd/pcsd_remove_file.rb -+++ b/pcsd/pcsd_remove_file.rb -@@ -41,8 +41,15 @@ module PcsdRemoveFile - end - end - -+ class RemovePcsDrConf < RemoveFile -+ def full_file_name -+ @full_file_name ||= PCSD_DR_CONFIG_LOCATION -+ end -+ end -+ - TYPES = { - "pcmk_remote_authkey" => RemovePcmkRemoteAuthkey, - "pcsd_settings" => RemovePcsdSettings, -+ "pcs_disaster_recovery_conf" => RemovePcsDrConf, - } - end -diff --git a/pcsd/remote.rb b/pcsd/remote.rb -index 6f454681..28b91382 100644 ---- a/pcsd/remote.rb -+++ b/pcsd/remote.rb -@@ -27,6 +27,7 @@ def remote(params, request, auth_user) - :status => method(:node_status), - :status_all => method(:status_all), - :cluster_status => method(:cluster_status_remote), -+ :cluster_status_plaintext => method(:cluster_status_plaintext), - :auth => method(:auth), - :check_auth => method(:check_auth), - :cluster_setup => method(:cluster_setup), -@@ -219,6 +220,18 @@ def cluster_status_remote(params, request, auth_user) - return JSON.generate(status) - end - -+# get cluster status in plaintext (over-the-network version of 'pcs status') -+def cluster_status_plaintext(params, request, auth_user) -+ if not allowed_for_local_cluster(auth_user, Permissions::READ) -+ return 403, 'Permission denied' -+ end -+ return pcs_internal_proxy( -+ auth_user, -+ params.fetch(:data_json, ""), -+ "status.full_cluster_status_plaintext" -+ ) -+end -+ - def cluster_start(params, request, auth_user) - if params[:name] - code, response = send_request_with_token( -@@ -444,7 +457,11 @@ def get_corosync_conf_remote(params, request, auth_user) - if not allowed_for_local_cluster(auth_user, Permissions::READ) - return 403, 'Permission denied' - end -- return get_corosync_conf() -+ begin -+ return get_corosync_conf() -+ rescue -+ return 400, 'Unable to read corosync.conf' -+ end - end - - # deprecated, use /remote/put_file (note that put_file doesn't support backup -diff --git a/pcsd/settings.rb b/pcsd/settings.rb -index a6fd0a26..e8dc0c96 100644 ---- a/pcsd/settings.rb -+++ b/pcsd/settings.rb -@@ -9,6 +9,7 @@ KEY_FILE = PCSD_VAR_LOCATION + 'pcsd.key' - KNOWN_HOSTS_FILE_NAME = 'known-hosts' - PCSD_SETTINGS_CONF_LOCATION = PCSD_VAR_LOCATION + "pcs_settings.conf" - PCSD_USERS_CONF_LOCATION = PCSD_VAR_LOCATION + "pcs_users.conf" -+PCSD_DR_CONFIG_LOCATION = PCSD_VAR_LOCATION + "disaster-recovery" - - CRM_MON = "/usr/sbin/crm_mon" - CRM_NODE = "/usr/sbin/crm_node" -diff --git a/pcsd/settings.rb.debian b/pcsd/settings.rb.debian -index 5d830af9..daaae37b 100644 ---- a/pcsd/settings.rb.debian -+++ b/pcsd/settings.rb.debian -@@ -9,6 +9,7 @@ KEY_FILE = PCSD_VAR_LOCATION + 'pcsd.key' - KNOWN_HOSTS_FILE_NAME = 'known-hosts' - PCSD_SETTINGS_CONF_LOCATION = PCSD_VAR_LOCATION + "pcs_settings.conf" - PCSD_USERS_CONF_LOCATION = PCSD_VAR_LOCATION + "pcs_users.conf" -+PCSD_DR_CONFIG_LOCATION = PCSD_VAR_LOCATION + "disaster-recovery" - - CRM_MON = "/usr/sbin/crm_mon" - CRM_NODE = "/usr/sbin/crm_node" -diff --git a/pylintrc b/pylintrc -index 5fc4c200..9255a804 100644 ---- a/pylintrc -+++ b/pylintrc -@@ -19,7 +19,7 @@ max-parents=10 - min-public-methods=0 - - [BASIC] --good-names=e, i, op, ip, el, maxDiff, cm, ok, T -+good-names=e, i, op, ip, el, maxDiff, cm, ok, T, dr - - [VARIABLES] - # A regular expression matching the name of dummy variables (i.e. expectedly --- -2.21.0 - diff --git a/SOURCES/bz1743731-01-fix-error-msg-when-cluster-is-not-set-up.patch b/SOURCES/bz1743731-01-fix-error-msg-when-cluster-is-not-set-up.patch deleted file mode 100644 index 06f551e..0000000 --- a/SOURCES/bz1743731-01-fix-error-msg-when-cluster-is-not-set-up.patch +++ /dev/null @@ -1,130 +0,0 @@ -From 8058591d0d79942bf6c61f105a180592bac7cf69 Mon Sep 17 00:00:00 2001 -From: Ondrej Mular -Date: Thu, 28 Nov 2019 16:57:24 +0100 -Subject: [PATCH 2/3] fix error msg when cluster is not set up - ---- - CHANGELOG.md | 4 +++ - pcs/cluster.py | 3 +++ - pcs/lib/commands/qdevice.py | 2 ++ - pcs_test/tier0/lib/commands/test_qdevice.py | 27 +++++++++++++++++++-- - 4 files changed, 34 insertions(+), 2 deletions(-) - -diff --git a/CHANGELOG.md b/CHANGELOG.md -index 889436c3..5a7ec377 100644 ---- a/CHANGELOG.md -+++ b/CHANGELOG.md -@@ -6,7 +6,11 @@ - - It is possible to configure a disaster-recovery site and display its status - ([rhbz#1676431]) - -+### Fixed -+- Error messages in cases when cluster is not set up ([rhbz#1743731]) -+ - [rhbz#1676431]: https://bugzilla.redhat.com/show_bug.cgi?id=1676431 -+[rhbz#1743731]: https://bugzilla.redhat.com/show_bug.cgi?id=1743731 - - - ## [0.10.4] - 2019-11-28 -diff --git a/pcs/cluster.py b/pcs/cluster.py -index 9473675f..0e9b3365 100644 ---- a/pcs/cluster.py -+++ b/pcs/cluster.py -@@ -190,6 +190,9 @@ def start_cluster(argv): - wait_for_nodes_started(nodes, wait_timeout) - return - -+ if not utils.hasCorosyncConf(): -+ utils.err("cluster is not currently configured on this node") -+ - print("Starting Cluster...") - service_list = ["corosync"] - if utils.need_to_handle_qdevice_service(): -diff --git a/pcs/lib/commands/qdevice.py b/pcs/lib/commands/qdevice.py -index 3d7af234..41f7c296 100644 ---- a/pcs/lib/commands/qdevice.py -+++ b/pcs/lib/commands/qdevice.py -@@ -81,6 +81,8 @@ def qdevice_start(lib_env, model): - start qdevice now on local host - """ - _check_model(model) -+ if not qdevice_net.qdevice_initialized(): -+ raise LibraryError(reports.qdevice_not_initialized(model)) - _service_start(lib_env, qdevice_net.qdevice_start) - - def qdevice_stop(lib_env, model, proceed_if_used=False): -diff --git a/pcs_test/tier0/lib/commands/test_qdevice.py b/pcs_test/tier0/lib/commands/test_qdevice.py -index b2c83ca4..af23db61 100644 ---- a/pcs_test/tier0/lib/commands/test_qdevice.py -+++ b/pcs_test/tier0/lib/commands/test_qdevice.py -@@ -689,6 +689,7 @@ class QdeviceNetDisableTest(QdeviceTestCase): - ) - - -+@mock.patch("pcs.lib.corosync.qdevice_net.qdevice_initialized") - @mock.patch("pcs.lib.external.start_service") - @mock.patch.object( - LibraryEnvironment, -@@ -696,9 +697,11 @@ class QdeviceNetDisableTest(QdeviceTestCase): - lambda self: "mock_runner" - ) - class QdeviceNetStartTest(QdeviceTestCase): -- def test_success(self, mock_net_start): -+ def test_success(self, mock_net_start, mock_qdevice_initialized): -+ mock_qdevice_initialized.return_value = True - lib.qdevice_start(self.lib_env, "net") - mock_net_start.assert_called_once_with("mock_runner", "corosync-qnetd") -+ mock_qdevice_initialized.assert_called_once_with() - assert_report_item_list_equal( - self.mock_reporter.report_item_list, - [ -@@ -719,11 +722,12 @@ class QdeviceNetStartTest(QdeviceTestCase): - ] - ) - -- def test_failed(self, mock_net_start): -+ def test_failed(self, mock_net_start, mock_qdevice_initialized): - mock_net_start.side_effect = StartServiceError( - "test service", - "test error" - ) -+ mock_qdevice_initialized.return_value = True - - assert_raise_library_error( - lambda: lib.qdevice_start(self.lib_env, "net"), -@@ -737,6 +741,7 @@ class QdeviceNetStartTest(QdeviceTestCase): - ) - ) - mock_net_start.assert_called_once_with("mock_runner", "corosync-qnetd") -+ mock_qdevice_initialized.assert_called_once_with() - assert_report_item_list_equal( - self.mock_reporter.report_item_list, - [ -@@ -750,6 +755,24 @@ class QdeviceNetStartTest(QdeviceTestCase): - ] - ) - -+ def test_qdevice_not_initialized( -+ self, mock_net_start, mock_qdevice_initialized -+ ): -+ mock_qdevice_initialized.return_value = False -+ -+ assert_raise_library_error( -+ lambda: lib.qdevice_start(self.lib_env, "net"), -+ ( -+ severity.ERROR, -+ report_codes.QDEVICE_NOT_INITIALIZED, -+ { -+ "model": "net", -+ } -+ ) -+ ) -+ mock_net_start.assert_not_called() -+ mock_qdevice_initialized.assert_called_once_with() -+ - - @mock.patch("pcs.lib.corosync.qdevice_net.qdevice_status_cluster_text") - @mock.patch("pcs.lib.external.stop_service") --- -2.21.0 - diff --git a/SOURCES/bz1805082-01-fix-resource-stonith-refresh-documentation.patch b/SOURCES/bz1805082-01-fix-resource-stonith-refresh-documentation.patch new file mode 100644 index 0000000..7703e96 --- /dev/null +++ b/SOURCES/bz1805082-01-fix-resource-stonith-refresh-documentation.patch @@ -0,0 +1,57 @@ +From be40fe494ddeb4f7132389ca0f3c1193de0e425d Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek +Date: Tue, 23 Jun 2020 12:57:05 +0200 +Subject: [PATCH 2/3] fix 'resource | stonith refresh' documentation + +--- + pcs/pcs.8 | 4 ++-- + pcs/usage.py | 4 ++-- + 2 files changed, 4 insertions(+), 4 deletions(-) + +diff --git a/pcs/pcs.8 b/pcs/pcs.8 +index c887d332..3efc5bb2 100644 +--- a/pcs/pcs.8 ++++ b/pcs/pcs.8 +@@ -325,7 +325,7 @@ If a node is not specified then resources / stonith devices on all nodes will be + refresh [] [node=] [\fB\-\-strict\fR] + Make the cluster forget the complete operation history (including failures) of the resource and re\-detect its current state. If you are interested in forgetting failed operations only, use the 'pcs resource cleanup' command. + .br +-If the named resource is part of a group, or one numbered instance of a clone or bundled resource, the clean\-up applies to the whole collective resource unless \fB\-\-strict\fR is given. ++If the named resource is part of a group, or one numbered instance of a clone or bundled resource, the refresh applies to the whole collective resource unless \fB\-\-strict\fR is given. + .br + If a resource id is not specified then all resources / stonith devices will be refreshed. + .br +@@ -613,7 +613,7 @@ If a node is not specified then resources / stonith devices on all nodes will be + refresh [] [\fB\-\-node\fR ] [\fB\-\-strict\fR] + Make the cluster forget the complete operation history (including failures) of the stonith device and re\-detect its current state. If you are interested in forgetting failed operations only, use the 'pcs stonith cleanup' command. + .br +-If the named stonith device is part of a group, or one numbered instance of a clone or bundled resource, the clean\-up applies to the whole collective resource unless \fB\-\-strict\fR is given. ++If the named stonith device is part of a group, or one numbered instance of a clone or bundled resource, the refresh applies to the whole collective resource unless \fB\-\-strict\fR is given. + .br + If a stonith id is not specified then all resources / stonith devices will be refreshed. + .br +diff --git a/pcs/usage.py b/pcs/usage.py +index 8722bd7b..0f3c95a3 100644 +--- a/pcs/usage.py ++++ b/pcs/usage.py +@@ -663,7 +663,7 @@ Commands: + interested in forgetting failed operations only, use the 'pcs resource + cleanup' command. + If the named resource is part of a group, or one numbered instance of a +- clone or bundled resource, the clean-up applies to the whole collective ++ clone or bundled resource, the refresh applies to the whole collective + resource unless --strict is given. + If a resource id is not specified then all resources / stonith devices + will be refreshed. +@@ -1214,7 +1214,7 @@ Commands: + are interested in forgetting failed operations only, use the 'pcs + stonith cleanup' command. + If the named stonith device is part of a group, or one numbered +- instance of a clone or bundled resource, the clean-up applies to the ++ instance of a clone or bundled resource, the refresh applies to the + whole collective resource unless --strict is given. + If a stonith id is not specified then all resources / stonith devices + will be refreshed. +-- +2.25.4 + diff --git a/SOURCES/bz1817547-01-resource-and-operation-defaults.patch b/SOURCES/bz1817547-01-resource-and-operation-defaults.patch new file mode 100644 index 0000000..34d1795 --- /dev/null +++ b/SOURCES/bz1817547-01-resource-and-operation-defaults.patch @@ -0,0 +1,7605 @@ +From ec4f8fc199891ad13235729272c0f115918cade9 Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek +Date: Thu, 21 May 2020 16:51:25 +0200 +Subject: [PATCH 1/3] squash bz1817547 Resource and operation defaults that + apply to specific resource/operation types + +add rule parser for rsc and op expressions + +improvements to rule parser + +make rule parts independent of the parser + +export parsed rules into cib + +add a command for adding new rsc and op defaults + +display rsc and op defaults with multiple nvsets + +fix parsing and processing of rsc_expression in rules + +improve syntax for creating a new nvset + +make the rule parser produce dataclasses + +fix for pyparsing-2.4.0 + +add commands for removing rsc and op defaults sets + +add commands for updating rsc and op defaults sets + +update chagelog, capabilities + +add tier1 tests for rules + +various minor fixes + +fix routing, create 'defaults update' command + +better error messages for unallowed rule expressions +--- + .gitlab-ci.yml | 3 + + README.md | 1 + + mypy.ini | 9 + + pcs.spec.in | 3 + + pcs/cli/common/lib_wrapper.py | 10 +- + pcs/cli/nvset.py | 53 ++ + pcs/cli/reports/messages.py | 39 + + pcs/cli/routing/resource.py | 77 +- + pcs/cli/rule.py | 89 +++ + pcs/common/interface/dto.py | 9 +- + pcs/common/pacemaker/nvset.py | 26 + + pcs/common/pacemaker/rule.py | 28 + + pcs/common/reports/codes.py | 3 + + pcs/common/reports/const.py | 6 + + pcs/common/reports/messages.py | 73 ++ + pcs/common/reports/types.py | 1 + + pcs/common/str_tools.py | 32 + + pcs/common/types.py | 13 + + pcs/config.py | 20 +- + pcs/lib/cib/nvpair_multi.py | 323 +++++++++ + pcs/lib/cib/rule/__init__.py | 8 + + pcs/lib/cib/rule/cib_to_dto.py | 185 +++++ + pcs/lib/cib/rule/expression_part.py | 49 ++ + pcs/lib/cib/rule/parsed_to_cib.py | 103 +++ + pcs/lib/cib/rule/parser.py | 232 ++++++ + pcs/lib/cib/rule/validator.py | 62 ++ + pcs/lib/cib/tools.py | 8 +- + pcs/lib/commands/cib_options.py | 322 ++++++++- + pcs/lib/validate.py | 15 + + pcs/lib/xml_tools.py | 9 +- + pcs/pcs.8 | 86 ++- + pcs/resource.py | 258 ++++++- + pcs/usage.py | 94 ++- + pcs_test/resources/cib-empty-3.1.xml | 2 +- + pcs_test/resources/cib-empty-3.2.xml | 2 +- + pcs_test/resources/cib-empty-3.3.xml | 10 + + pcs_test/resources/cib-empty-3.4.xml | 10 + + pcs_test/resources/cib-empty.xml | 2 +- + pcs_test/tier0/cli/reports/test_messages.py | 29 + + pcs_test/tier0/cli/resource/test_defaults.py | 324 +++++++++ + pcs_test/tier0/cli/test_nvset.py | 92 +++ + pcs_test/tier0/cli/test_rule.py | 477 +++++++++++++ + .../tier0/common/reports/test_messages.py | 55 +- + pcs_test/tier0/common/test_str_tools.py | 33 + + .../cib_options => cib/rule}/__init__.py | 0 + .../tier0/lib/cib/rule/test_cib_to_dto.py | 593 ++++++++++++++++ + .../tier0/lib/cib/rule/test_parsed_to_cib.py | 214 ++++++ + pcs_test/tier0/lib/cib/rule/test_parser.py | 270 +++++++ + pcs_test/tier0/lib/cib/rule/test_validator.py | 68 ++ + pcs_test/tier0/lib/cib/test_nvpair_multi.py | 513 ++++++++++++++ + pcs_test/tier0/lib/cib/test_tools.py | 13 +- + .../cib_options/test_operations_defaults.py | 120 ---- + .../cib_options/test_resources_defaults.py | 120 ---- + .../tier0/lib/commands/test_cib_options.py | 669 ++++++++++++++++++ + pcs_test/tier0/lib/test_validate.py | 27 + + pcs_test/tier1/legacy/test_resource.py | 8 +- + pcs_test/tier1/legacy/test_stonith.py | 8 +- + pcs_test/tier1/test_cib_options.py | 571 +++++++++++++++ + pcs_test/tier1/test_tag.py | 4 +- + pcs_test/tools/fixture.py | 4 +- + pcs_test/tools/misc.py | 61 +- + pcsd/capabilities.xml | 30 + + test/centos8/Dockerfile | 1 + + test/fedora30/Dockerfile | 1 + + test/fedora31/Dockerfile | 1 + + test/fedora32/Dockerfile | 1 + + 66 files changed, 6216 insertions(+), 366 deletions(-) + create mode 100644 pcs/cli/nvset.py + create mode 100644 pcs/cli/rule.py + create mode 100644 pcs/common/pacemaker/nvset.py + create mode 100644 pcs/common/pacemaker/rule.py + create mode 100644 pcs/lib/cib/nvpair_multi.py + create mode 100644 pcs/lib/cib/rule/__init__.py + create mode 100644 pcs/lib/cib/rule/cib_to_dto.py + create mode 100644 pcs/lib/cib/rule/expression_part.py + create mode 100644 pcs/lib/cib/rule/parsed_to_cib.py + create mode 100644 pcs/lib/cib/rule/parser.py + create mode 100644 pcs/lib/cib/rule/validator.py + create mode 100644 pcs_test/resources/cib-empty-3.3.xml + create mode 100644 pcs_test/resources/cib-empty-3.4.xml + create mode 100644 pcs_test/tier0/cli/resource/test_defaults.py + create mode 100644 pcs_test/tier0/cli/test_nvset.py + create mode 100644 pcs_test/tier0/cli/test_rule.py + rename pcs_test/tier0/lib/{commands/cib_options => cib/rule}/__init__.py (100%) + create mode 100644 pcs_test/tier0/lib/cib/rule/test_cib_to_dto.py + create mode 100644 pcs_test/tier0/lib/cib/rule/test_parsed_to_cib.py + create mode 100644 pcs_test/tier0/lib/cib/rule/test_parser.py + create mode 100644 pcs_test/tier0/lib/cib/rule/test_validator.py + create mode 100644 pcs_test/tier0/lib/cib/test_nvpair_multi.py + delete mode 100644 pcs_test/tier0/lib/commands/cib_options/test_operations_defaults.py + delete mode 100644 pcs_test/tier0/lib/commands/cib_options/test_resources_defaults.py + create mode 100644 pcs_test/tier0/lib/commands/test_cib_options.py + create mode 100644 pcs_test/tier1/test_cib_options.py + +diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml +index 83eba12d..24444b72 100644 +--- a/.gitlab-ci.yml ++++ b/.gitlab-ci.yml +@@ -51,6 +51,7 @@ pylint: + python3-pip + python3-pycurl + python3-pyOpenSSL ++ python3-pyparsing + findutils + make + time +@@ -69,6 +70,7 @@ mypy: + python3-pip + python3-pycurl + python3-pyOpenSSL ++ python3-pyparsing + git + make + tar +@@ -112,6 +114,7 @@ python_tier0_tests: + python3-pip + python3-pycurl + python3-pyOpenSSL ++ python3-pyparsing + which + " + - make install_pip +diff --git a/README.md b/README.md +index f888da68..efb4d0d5 100644 +--- a/README.md ++++ b/README.md +@@ -30,6 +30,7 @@ These are the runtime dependencies of pcs and pcsd: + * python3-pycurl + * python3-setuptools + * python3-pyOpenSSL (python3-openssl) ++* python3-pyparsing + * python3-tornado 6.x + * python dataclasses (`pip install dataclasses`; required only for python 3.6, + already included in 3.7+) +diff --git a/mypy.ini b/mypy.ini +index ad3d1f18..ac6789a9 100644 +--- a/mypy.ini ++++ b/mypy.ini +@@ -8,12 +8,18 @@ disallow_untyped_defs = True + [mypy-pcs.lib.cib.resource.relations] + disallow_untyped_defs = True + ++[mypy-pcs.lib.cib.rule] ++disallow_untyped_defs = True ++ + [mypy-pcs.lib.cib.tag] + disallow_untyped_defs = True + + [mypy-pcs.lib.commands.tag] + disallow_untyped_defs = True + ++[mypy-pcs.lib.commands.cib_options] ++disallow_untyped_defs = True ++ + [mypy-pcs.lib.dr.*] + disallow_untyped_defs = True + disallow_untyped_calls = True +@@ -84,3 +90,6 @@ ignore_missing_imports = True + + [mypy-distro] + ignore_missing_imports = True ++ ++[mypy-pyparsing] ++ignore_missing_imports = True +diff --git a/pcs.spec.in b/pcs.spec.in +index c52c2fe4..e292a708 100644 +--- a/pcs.spec.in ++++ b/pcs.spec.in +@@ -122,6 +122,8 @@ BuildRequires: platform-python-setuptools + %endif + + BuildRequires: python3-devel ++# for tier0 tests ++BuildRequires: python3-pyparsing + + # gcc for compiling custom rubygems + BuildRequires: gcc +@@ -155,6 +157,7 @@ Requires: platform-python-setuptools + + Requires: python3-lxml + Requires: python3-pycurl ++Requires: python3-pyparsing + # clufter and its dependencies + Requires: python3-clufter => 0.70.0 + %if "%{python3_version}" != "3.6" && "%{python3_version}" != "3.7" +diff --git a/pcs/cli/common/lib_wrapper.py b/pcs/cli/common/lib_wrapper.py +index 9fd05ac0..192a3dac 100644 +--- a/pcs/cli/common/lib_wrapper.py ++++ b/pcs/cli/common/lib_wrapper.py +@@ -388,8 +388,14 @@ def load_module(env, middleware_factory, name): + env, + middleware.build(middleware_factory.cib,), + { +- "set_operations_defaults": cib_options.set_operations_defaults, +- "set_resources_defaults": cib_options.set_resources_defaults, ++ "operation_defaults_config": cib_options.operation_defaults_config, ++ "operation_defaults_create": cib_options.operation_defaults_create, ++ "operation_defaults_remove": cib_options.operation_defaults_remove, ++ "operation_defaults_update": cib_options.operation_defaults_update, ++ "resource_defaults_config": cib_options.resource_defaults_config, ++ "resource_defaults_create": cib_options.resource_defaults_create, ++ "resource_defaults_remove": cib_options.resource_defaults_remove, ++ "resource_defaults_update": cib_options.resource_defaults_update, + }, + ) + +diff --git a/pcs/cli/nvset.py b/pcs/cli/nvset.py +new file mode 100644 +index 00000000..69442df3 +--- /dev/null ++++ b/pcs/cli/nvset.py +@@ -0,0 +1,53 @@ ++from typing import ( ++ cast, ++ Iterable, ++ List, ++ Optional, ++) ++ ++from pcs.cli.rule import rule_expression_dto_to_lines ++from pcs.common.pacemaker.nvset import CibNvsetDto ++from pcs.common.str_tools import ( ++ format_name_value_list, ++ indent, ++) ++from pcs.common.types import CibNvsetType ++ ++ ++def nvset_dto_list_to_lines( ++ nvset_dto_list: Iterable[CibNvsetDto], ++ with_ids: bool = False, ++ text_if_empty: Optional[str] = None, ++) -> List[str]: ++ if not nvset_dto_list: ++ return [text_if_empty] if text_if_empty else [] ++ return [ ++ line ++ for nvset_dto in nvset_dto_list ++ for line in nvset_dto_to_lines(nvset_dto, with_ids=with_ids) ++ ] ++ ++ ++def nvset_dto_to_lines(nvset: CibNvsetDto, with_ids: bool = False) -> List[str]: ++ nvset_label = _nvset_type_to_label.get(nvset.type, "Options Set") ++ heading_parts = [f"{nvset_label}: {nvset.id}"] ++ if nvset.options: ++ heading_parts.append( ++ " ".join(format_name_value_list(sorted(nvset.options.items()))) ++ ) ++ ++ lines = format_name_value_list( ++ sorted([(nvpair.name, nvpair.value) for nvpair in nvset.nvpairs]) ++ ) ++ if nvset.rule: ++ lines.extend( ++ rule_expression_dto_to_lines(nvset.rule, with_ids=with_ids) ++ ) ++ ++ return [" ".join(heading_parts)] + indent(lines) ++ ++ ++_nvset_type_to_label = { ++ cast(str, CibNvsetType.INSTANCE): "Attributes", ++ cast(str, CibNvsetType.META): "Meta Attrs", ++} +diff --git a/pcs/cli/reports/messages.py b/pcs/cli/reports/messages.py +index 36f00a9e..7ccc8ab0 100644 +--- a/pcs/cli/reports/messages.py ++++ b/pcs/cli/reports/messages.py +@@ -402,6 +402,45 @@ class TagCannotRemoveReferencesWithoutRemovingTag(CliReportMessageCustom): + ) + + ++class RuleExpressionParseError(CliReportMessageCustom): ++ _obj: messages.RuleExpressionParseError ++ ++ @property ++ def message(self) -> str: ++ # Messages coming from the parser are not very useful and readable, ++ # they mostly contain one line grammar expression covering the whole ++ # rule. No user would be able to parse that. Therefore we omit the ++ # messages. ++ marker = "-" * (self._obj.column_number - 1) + "^" ++ return ( ++ f"'{self._obj.rule_string}' is not a valid rule expression, parse " ++ f"error near or after line {self._obj.line_number} column " ++ f"{self._obj.column_number}\n" ++ f" {self._obj.rule_line}\n" ++ f" {marker}" ++ ) ++ ++ ++class CibNvsetAmbiguousProvideNvsetId(CliReportMessageCustom): ++ _obj: messages.CibNvsetAmbiguousProvideNvsetId ++ ++ @property ++ def message(self) -> str: ++ command_map = { ++ const.PCS_COMMAND_RESOURCE_DEFAULTS_UPDATE: ( ++ "pcs resource defaults set update" ++ ), ++ const.PCS_COMMAND_OPERATION_DEFAULTS_UPDATE: ( ++ "pcs resource op defaults set update" ++ ), ++ } ++ command = command_map.get(self._obj.pcs_command, "") ++ return ( ++ f"Several options sets exist, please use the '{command}' command " ++ "and specify an option set ID" ++ ) ++ ++ + def _create_report_msg_map() -> Dict[str, type]: + result: Dict[str, type] = {} + for report_msg_cls in get_all_subclasses(CliReportMessageCustom): +diff --git a/pcs/cli/routing/resource.py b/pcs/cli/routing/resource.py +index 28bb3d5e..0706f43b 100644 +--- a/pcs/cli/routing/resource.py ++++ b/pcs/cli/routing/resource.py +@@ -1,15 +1,88 @@ + from functools import partial ++from typing import ( ++ Any, ++ List, ++) + + from pcs import ( + resource, + usage, + ) + from pcs.cli.common.errors import raise_command_replaced ++from pcs.cli.common.parse_args import InputModifiers + from pcs.cli.common.routing import create_router + + from pcs.cli.resource.relations import show_resource_relations_cmd + + ++def resource_defaults_cmd( ++ lib: Any, argv: List[str], modifiers: InputModifiers ++) -> None: ++ """ ++ Options: ++ * -f - CIB file ++ * --force - allow unknown options ++ """ ++ if argv and "=" in argv[0]: ++ # DEPRECATED legacy command ++ return resource.resource_defaults_legacy_cmd( ++ lib, argv, modifiers, deprecated_syntax_used=True ++ ) ++ ++ router = create_router( ++ { ++ "config": resource.resource_defaults_config_cmd, ++ "set": create_router( ++ { ++ "create": resource.resource_defaults_set_create_cmd, ++ "delete": resource.resource_defaults_set_remove_cmd, ++ "remove": resource.resource_defaults_set_remove_cmd, ++ "update": resource.resource_defaults_set_update_cmd, ++ }, ++ ["resource", "defaults", "set"], ++ ), ++ "update": resource.resource_defaults_legacy_cmd, ++ }, ++ ["resource", "defaults"], ++ default_cmd="config", ++ ) ++ return router(lib, argv, modifiers) ++ ++ ++def resource_op_defaults_cmd( ++ lib: Any, argv: List[str], modifiers: InputModifiers ++) -> None: ++ """ ++ Options: ++ * -f - CIB file ++ * --force - allow unknown options ++ """ ++ if argv and "=" in argv[0]: ++ # DEPRECATED legacy command ++ return resource.resource_op_defaults_legacy_cmd( ++ lib, argv, modifiers, deprecated_syntax_used=True ++ ) ++ ++ router = create_router( ++ { ++ "config": resource.resource_op_defaults_config_cmd, ++ "set": create_router( ++ { ++ "create": resource.resource_op_defaults_set_create_cmd, ++ "delete": resource.resource_op_defaults_set_remove_cmd, ++ "remove": resource.resource_op_defaults_set_remove_cmd, ++ "update": resource.resource_op_defaults_set_update_cmd, ++ }, ++ ["resource", "op", "defaults", "set"], ++ ), ++ "update": resource.resource_op_defaults_legacy_cmd, ++ }, ++ ["resource", "op", "defaults"], ++ default_cmd="config", ++ ) ++ return router(lib, argv, modifiers) ++ ++ + resource_cmd = create_router( + { + "help": lambda lib, argv, modifiers: usage.resource(argv), +@@ -68,14 +141,14 @@ resource_cmd = create_router( + "failcount": resource.resource_failcount, + "op": create_router( + { +- "defaults": resource.resource_op_defaults_cmd, ++ "defaults": resource_op_defaults_cmd, + "add": resource.resource_op_add_cmd, + "remove": resource.resource_op_delete_cmd, + "delete": resource.resource_op_delete_cmd, + }, + ["resource", "op"], + ), +- "defaults": resource.resource_defaults_cmd, ++ "defaults": resource_defaults_cmd, + "cleanup": resource.resource_cleanup, + "refresh": resource.resource_refresh, + "relocate": create_router( +diff --git a/pcs/cli/rule.py b/pcs/cli/rule.py +new file mode 100644 +index 00000000..c1149fff +--- /dev/null ++++ b/pcs/cli/rule.py +@@ -0,0 +1,89 @@ ++from typing import List ++ ++from pcs.common.pacemaker.rule import CibRuleExpressionDto ++from pcs.common.str_tools import ( ++ format_name_value_list, ++ indent, ++) ++from pcs.common.types import CibRuleExpressionType ++ ++ ++def rule_expression_dto_to_lines( ++ rule_expr: CibRuleExpressionDto, with_ids: bool = False ++) -> List[str]: ++ if rule_expr.type == CibRuleExpressionType.RULE: ++ return _rule_dto_to_lines(rule_expr, with_ids) ++ if rule_expr.type == CibRuleExpressionType.DATE_EXPRESSION: ++ return _date_dto_to_lines(rule_expr, with_ids) ++ return _simple_expr_to_lines(rule_expr, with_ids) ++ ++ ++def _rule_dto_to_lines( ++ rule_expr: CibRuleExpressionDto, with_ids: bool = False ++) -> List[str]: ++ heading_parts = [ ++ "Rule{0}:".format(" (expired)" if rule_expr.is_expired else "") ++ ] ++ heading_parts.extend( ++ format_name_value_list(sorted(rule_expr.options.items())) ++ ) ++ if with_ids: ++ heading_parts.append(f"(id:{rule_expr.id})") ++ ++ lines = [] ++ for child in rule_expr.expressions: ++ lines.extend(rule_expression_dto_to_lines(child, with_ids)) ++ ++ return [" ".join(heading_parts)] + indent(lines) ++ ++ ++def _date_dto_to_lines( ++ rule_expr: CibRuleExpressionDto, with_ids: bool = False ++) -> List[str]: ++ # pylint: disable=too-many-branches ++ operation = rule_expr.options.get("operation", None) ++ ++ if operation == "date_spec": ++ heading_parts = ["Expression:"] ++ if with_ids: ++ heading_parts.append(f"(id:{rule_expr.id})") ++ line_parts = ["Date Spec:"] ++ if rule_expr.date_spec: ++ line_parts.extend( ++ format_name_value_list( ++ sorted(rule_expr.date_spec.options.items()) ++ ) ++ ) ++ if with_ids: ++ line_parts.append(f"(id:{rule_expr.date_spec.id})") ++ return [" ".join(heading_parts)] + indent([" ".join(line_parts)]) ++ ++ if operation == "in_range" and rule_expr.duration: ++ heading_parts = ["Expression:", "date", "in_range"] ++ if "start" in rule_expr.options: ++ heading_parts.append(rule_expr.options["start"]) ++ heading_parts.extend(["to", "duration"]) ++ if with_ids: ++ heading_parts.append(f"(id:{rule_expr.id})") ++ lines = [" ".join(heading_parts)] ++ ++ line_parts = ["Duration:"] ++ line_parts.extend( ++ format_name_value_list(sorted(rule_expr.duration.options.items())) ++ ) ++ if with_ids: ++ line_parts.append(f"(id:{rule_expr.duration.id})") ++ lines.extend(indent([" ".join(line_parts)])) ++ ++ return lines ++ ++ return _simple_expr_to_lines(rule_expr, with_ids=with_ids) ++ ++ ++def _simple_expr_to_lines( ++ rule_expr: CibRuleExpressionDto, with_ids: bool = False ++) -> List[str]: ++ parts = ["Expression:", rule_expr.as_string] ++ if with_ids: ++ parts.append(f"(id:{rule_expr.id})") ++ return [" ".join(parts)] +diff --git a/pcs/common/interface/dto.py b/pcs/common/interface/dto.py +index fb40fc5e..768156d6 100644 +--- a/pcs/common/interface/dto.py ++++ b/pcs/common/interface/dto.py +@@ -42,7 +42,14 @@ def from_dict(cls: Type[DtoType], data: DtoPayload) -> DtoType: + data=data, + # NOTE: all enum types has to be listed here in key cast + # see: https://github.com/konradhalas/dacite#casting +- config=dacite.Config(cast=[types.DrRole, types.ResourceRelationType,],), ++ config=dacite.Config( ++ cast=[ ++ types.CibNvsetType, ++ types.CibRuleExpressionType, ++ types.DrRole, ++ types.ResourceRelationType, ++ ] ++ ), + ) + + +diff --git a/pcs/common/pacemaker/nvset.py b/pcs/common/pacemaker/nvset.py +new file mode 100644 +index 00000000..6d72c787 +--- /dev/null ++++ b/pcs/common/pacemaker/nvset.py +@@ -0,0 +1,26 @@ ++from dataclasses import dataclass ++from typing import ( ++ Mapping, ++ Optional, ++ Sequence, ++) ++ ++from pcs.common.interface.dto import DataTransferObject ++from pcs.common.pacemaker.rule import CibRuleExpressionDto ++from pcs.common.types import CibNvsetType ++ ++ ++@dataclass(frozen=True) ++class CibNvpairDto(DataTransferObject): ++ id: str # pylint: disable=invalid-name ++ name: str ++ value: str ++ ++ ++@dataclass(frozen=True) ++class CibNvsetDto(DataTransferObject): ++ id: str # pylint: disable=invalid-name ++ type: CibNvsetType ++ options: Mapping[str, str] ++ rule: Optional[CibRuleExpressionDto] ++ nvpairs: Sequence[CibNvpairDto] +diff --git a/pcs/common/pacemaker/rule.py b/pcs/common/pacemaker/rule.py +new file mode 100644 +index 00000000..306e65e6 +--- /dev/null ++++ b/pcs/common/pacemaker/rule.py +@@ -0,0 +1,28 @@ ++from dataclasses import dataclass ++from typing import ( ++ Mapping, ++ Optional, ++ Sequence, ++) ++ ++from pcs.common.interface.dto import DataTransferObject ++from pcs.common.types import CibRuleExpressionType ++ ++ ++@dataclass(frozen=True) ++class CibRuleDateCommonDto(DataTransferObject): ++ id: str # pylint: disable=invalid-name ++ options: Mapping[str, str] ++ ++ ++@dataclass(frozen=True) ++class CibRuleExpressionDto(DataTransferObject): ++ # pylint: disable=too-many-instance-attributes ++ id: str # pylint: disable=invalid-name ++ type: CibRuleExpressionType ++ is_expired: bool # only valid for type==rule ++ options: Mapping[str, str] ++ date_spec: Optional[CibRuleDateCommonDto] ++ duration: Optional[CibRuleDateCommonDto] ++ expressions: Sequence["CibRuleExpressionDto"] ++ as_string: str +diff --git a/pcs/common/reports/codes.py b/pcs/common/reports/codes.py +index 26eb8b51..8bcabfab 100644 +--- a/pcs/common/reports/codes.py ++++ b/pcs/common/reports/codes.py +@@ -123,6 +123,7 @@ CIB_LOAD_ERROR = M("CIB_LOAD_ERROR") + CIB_LOAD_ERROR_GET_NODES_FOR_VALIDATION = M( + "CIB_LOAD_ERROR_GET_NODES_FOR_VALIDATION" + ) ++CIB_NVSET_AMBIGUOUS_PROVIDE_NVSET_ID = M("CIB_NVSET_AMBIGUOUS_PROVIDE_NVSET_ID") + CIB_LOAD_ERROR_SCOPE_MISSING = M("CIB_LOAD_ERROR_SCOPE_MISSING") + CIB_PUSH_FORCED_FULL_DUE_TO_CRM_FEATURE_SET = M( + "CIB_PUSH_FORCED_FULL_DUE_TO_CRM_FEATURE_SET" +@@ -405,6 +406,8 @@ RESOURCE_UNMOVE_UNBAN_PCMK_SUCCESS = M("RESOURCE_UNMOVE_UNBAN_PCMK_SUCCESS") + RESOURCE_UNMOVE_UNBAN_PCMK_EXPIRED_NOT_SUPPORTED = M( + "RESOURCE_UNMOVE_UNBAN_PCMK_EXPIRED_NOT_SUPPORTED" + ) ++RULE_EXPRESSION_PARSE_ERROR = M("RULE_EXPRESSION_PARSE_ERROR") ++RULE_EXPRESSION_NOT_ALLOWED = M("RULE_EXPRESSION_NOT_ALLOWED") + RUN_EXTERNAL_PROCESS_ERROR = M("RUN_EXTERNAL_PROCESS_ERROR") + RUN_EXTERNAL_PROCESS_FINISHED = M("RUN_EXTERNAL_PROCESS_FINISHED") + RUN_EXTERNAL_PROCESS_STARTED = M("RUN_EXTERNAL_PROCESS_STARTED") +diff --git a/pcs/common/reports/const.py b/pcs/common/reports/const.py +index aeb593ee..fa2122d0 100644 +--- a/pcs/common/reports/const.py ++++ b/pcs/common/reports/const.py +@@ -1,9 +1,15 @@ + from .types import ( + DefaultAddressSource, ++ PcsCommand, + ReasonType, + ServiceAction, + ) + ++PCS_COMMAND_OPERATION_DEFAULTS_UPDATE = PcsCommand( ++ "resource op defaults update" ++) ++PCS_COMMAND_RESOURCE_DEFAULTS_UPDATE = PcsCommand("resource defaults update") ++ + SERVICE_ACTION_START = ServiceAction("START") + SERVICE_ACTION_STOP = ServiceAction("STOP") + SERVICE_ACTION_ENABLE = ServiceAction("ENABLE") +diff --git a/pcs/common/reports/messages.py b/pcs/common/reports/messages.py +index 540e8c69..f04d8632 100644 +--- a/pcs/common/reports/messages.py ++++ b/pcs/common/reports/messages.py +@@ -27,6 +27,7 @@ from pcs.common.str_tools import ( + indent, + is_iterable_not_str, + ) ++from pcs.common.types import CibRuleExpressionType + + from . import ( + codes, +@@ -120,6 +121,7 @@ _type_articles = { + "ACL user": "an", + "ACL role": "an", + "ACL permission": "an", ++ "options set": "an", + } + + +@@ -6399,3 +6401,74 @@ class TagIdsNotInTheTag(ReportItemMessage): + ids=format_plural(self.id_list, "id"), + id_list=format_list(self.id_list), + ) ++ ++ ++@dataclass(frozen=True) ++class RuleExpressionParseError(ReportItemMessage): ++ """ ++ Unable to parse pacemaker cib rule expression string ++ ++ rule_string -- the whole rule expression string ++ reason -- error message from rule parser ++ rule_line -- part of rule_string - the line where the error occurred ++ line_number -- the line where parsing failed ++ column_number -- the column where parsing failed ++ position -- the start index where parsing failed ++ """ ++ ++ rule_string: str ++ reason: str ++ rule_line: str ++ line_number: int ++ column_number: int ++ position: int ++ _code = codes.RULE_EXPRESSION_PARSE_ERROR ++ ++ @property ++ def message(self) -> str: ++ # Messages coming from the parser are not very useful and readable, ++ # they mostly contain one line grammar expression covering the whole ++ # rule. No user would be able to parse that. Therefore we omit the ++ # messages. ++ return ( ++ f"'{self.rule_string}' is not a valid rule expression, parse error " ++ f"near or after line {self.line_number} column {self.column_number}" ++ ) ++ ++ ++@dataclass(frozen=True) ++class RuleExpressionNotAllowed(ReportItemMessage): ++ """ ++ Used rule expression is not allowed in current context ++ ++ expression_type -- disallowed expression type ++ """ ++ ++ expression_type: CibRuleExpressionType ++ _code = codes.RULE_EXPRESSION_NOT_ALLOWED ++ ++ @property ++ def message(self) -> str: ++ type_map = { ++ CibRuleExpressionType.OP_EXPRESSION: "op", ++ CibRuleExpressionType.RSC_EXPRESSION: "resource", ++ } ++ return ( ++ f"Keyword '{type_map[self.expression_type]}' cannot be used " ++ "in a rule in this command" ++ ) ++ ++ ++@dataclass(frozen=True) ++class CibNvsetAmbiguousProvideNvsetId(ReportItemMessage): ++ """ ++ An old command supporting only one nvset have been used when several nvsets ++ exist. We require an nvset ID the command should work with to be specified. ++ """ ++ ++ pcs_command: types.PcsCommand ++ _code = codes.CIB_NVSET_AMBIGUOUS_PROVIDE_NVSET_ID ++ ++ @property ++ def message(self) -> str: ++ return "Several options sets exist, please specify an option set ID" +diff --git a/pcs/common/reports/types.py b/pcs/common/reports/types.py +index 5973279e..541046ea 100644 +--- a/pcs/common/reports/types.py ++++ b/pcs/common/reports/types.py +@@ -3,6 +3,7 @@ from typing import NewType + DefaultAddressSource = NewType("DefaultAddressSource", str) + ForceCode = NewType("ForceCode", str) + MessageCode = NewType("MessageCode", str) ++PcsCommand = NewType("PcsCommand", str) + ReasonType = NewType("ReasonType", str) + ServiceAction = NewType("ServiceAction", str) + SeverityLevel = NewType("SeverityLevel", str) +diff --git a/pcs/common/str_tools.py b/pcs/common/str_tools.py +index deb38799..80864b50 100644 +--- a/pcs/common/str_tools.py ++++ b/pcs/common/str_tools.py +@@ -3,6 +3,8 @@ from typing import ( + Any, + List, + Mapping, ++ Sequence, ++ Tuple, + TypeVar, + ) + +@@ -49,6 +51,36 @@ def format_list_custom_last_separator( + ) + + ++# For now, Tuple[str, str] is sufficient. Feel free to change it if needed, ++# e.g. when values can be integers. ++def format_name_value_list(item_list: Sequence[Tuple[str, str]]) -> List[str]: ++ """ ++ Turn 2-tuples to 'name=value' strings with standard quoting ++ """ ++ output = [] ++ for name, value in item_list: ++ name = quote(name, "= ") ++ value = quote(value, "= ") ++ output.append(f"{name}={value}") ++ return output ++ ++ ++def quote(string: str, chars_to_quote: str) -> str: ++ """ ++ Quote a string if it contains specified characters ++ ++ string -- the string to be processed ++ chars_to_quote -- the characters causing quoting ++ """ ++ if not frozenset(chars_to_quote) & frozenset(string): ++ return string ++ if '"' not in string: ++ return f'"{string}"' ++ if "'" not in string: ++ return f"'{string}'" ++ return '"{string}"'.format(string=string.replace('"', '\\"')) ++ ++ + def join_multilines(strings): + return "\n".join([a.strip() for a in strings if a.strip()]) + +diff --git a/pcs/common/types.py b/pcs/common/types.py +index dace6f6d..0b656cc0 100644 +--- a/pcs/common/types.py ++++ b/pcs/common/types.py +@@ -3,6 +3,19 @@ from enum import auto + from pcs.common.tools import AutoNameEnum + + ++class CibNvsetType(AutoNameEnum): ++ INSTANCE = auto() ++ META = auto() ++ ++ ++class CibRuleExpressionType(AutoNameEnum): ++ RULE = auto() ++ EXPRESSION = auto() ++ DATE_EXPRESSION = auto() ++ OP_EXPRESSION = auto() ++ RSC_EXPRESSION = auto() ++ ++ + class ResourceRelationType(AutoNameEnum): + ORDER = auto() + ORDER_SET = auto() +diff --git a/pcs/config.py b/pcs/config.py +index 058ec55a..67aa6e0e 100644 +--- a/pcs/config.py ++++ b/pcs/config.py +@@ -48,6 +48,7 @@ from pcs import ( + from pcs.cli.common import middleware + from pcs.cli.common.errors import CmdLineInputError + from pcs.cli.constraint import command as constraint_command ++from pcs.cli.nvset import nvset_dto_list_to_lines + from pcs.cli.reports import process_library_reports + from pcs.common.reports import constraints as constraints_reports + from pcs.common.str_tools import indent +@@ -96,7 +97,8 @@ def _config_show_cib_lines(lib): + Commandline options: + * -f - CIB file + """ +- # update of pcs_options will change output of constraint show ++ # update of pcs_options will change output of constraint show and ++ # displaying resources and operations defaults + utils.pcs_options["--full"] = 1 + # get latest modifiers object after updating pcs_options + modifiers = utils.get_input_modifiers() +@@ -172,11 +174,23 @@ def _config_show_cib_lines(lib): + all_lines.append("") + all_lines.append("Resources Defaults:") + all_lines.extend( +- indent(resource.show_defaults(cib_dom, "rsc_defaults"), indent_step=1) ++ indent( ++ nvset_dto_list_to_lines( ++ lib.cib_options.resource_defaults_config(), ++ with_ids=modifiers.get("--full"), ++ text_if_empty="No defaults set", ++ ) ++ ) + ) + all_lines.append("Operations Defaults:") + all_lines.extend( +- indent(resource.show_defaults(cib_dom, "op_defaults"), indent_step=1) ++ indent( ++ nvset_dto_list_to_lines( ++ lib.cib_options.operation_defaults_config(), ++ with_ids=modifiers.get("--full"), ++ text_if_empty="No defaults set", ++ ) ++ ) + ) + + all_lines.append("") +diff --git a/pcs/lib/cib/nvpair_multi.py b/pcs/lib/cib/nvpair_multi.py +new file mode 100644 +index 00000000..7bdc2f55 +--- /dev/null ++++ b/pcs/lib/cib/nvpair_multi.py +@@ -0,0 +1,323 @@ ++from typing import ( ++ cast, ++ Iterable, ++ List, ++ Mapping, ++ NewType, ++ Optional, ++ Tuple, ++) ++from xml.etree.ElementTree import Element ++ ++from lxml import etree ++from lxml.etree import _Element ++ ++from pcs.common import reports ++from pcs.common.pacemaker.nvset import ( ++ CibNvpairDto, ++ CibNvsetDto, ++) ++from pcs.common.reports import ReportItemList ++from pcs.common.types import CibNvsetType ++from pcs.lib import validate ++from pcs.lib.cib.rule import ( ++ RuleParseError, ++ RuleRoot, ++ RuleValidator, ++ parse_rule, ++ rule_element_to_dto, ++ rule_to_cib, ++) ++from pcs.lib.cib.tools import ( ++ ElementSearcher, ++ IdProvider, ++ create_subelement_id, ++) ++from pcs.lib.xml_tools import ( ++ export_attributes, ++ remove_one_element, ++) ++ ++ ++NvsetTag = NewType("NvsetTag", str) ++NVSET_INSTANCE = NvsetTag("instance_attributes") ++NVSET_META = NvsetTag("meta_attributes") ++ ++_tag_to_type = { ++ str(NVSET_META): CibNvsetType.META, ++ str(NVSET_INSTANCE): CibNvsetType.INSTANCE, ++} ++ ++ ++def nvpair_element_to_dto(nvpair_el: Element) -> CibNvpairDto: ++ """ ++ Export an nvpair xml element to its DTO ++ """ ++ return CibNvpairDto( ++ nvpair_el.get("id", ""), ++ nvpair_el.get("name", ""), ++ nvpair_el.get("value", ""), ++ ) ++ ++ ++def nvset_element_to_dto(nvset_el: Element) -> CibNvsetDto: ++ """ ++ Export an nvset xml element to its DTO ++ """ ++ rule_el = nvset_el.find("./rule") ++ return CibNvsetDto( ++ nvset_el.get("id", ""), ++ _tag_to_type[nvset_el.tag], ++ export_attributes(nvset_el, with_id=False), ++ None if rule_el is None else rule_element_to_dto(rule_el), ++ [ ++ nvpair_element_to_dto(nvpair_el) ++ for nvpair_el in nvset_el.iterfind("./nvpair") ++ ], ++ ) ++ ++ ++def find_nvsets(parent_element: Element) -> List[Element]: ++ """ ++ Get all nvset xml elements in the given parent element ++ ++ parent_element -- an element to look for nvsets in ++ """ ++ return cast( ++ # The xpath method has a complicated return value, but we know our xpath ++ # expression returns only elements. ++ List[Element], ++ cast(_Element, parent_element).xpath( ++ "./*[{nvset_tags}]".format( ++ nvset_tags=" or ".join(f"self::{tag}" for tag in _tag_to_type) ++ ) ++ ), ++ ) ++ ++ ++def find_nvsets_by_ids( ++ parent_element: Element, id_list: Iterable[str] ++) -> Tuple[List[Element], ReportItemList]: ++ """ ++ Find nvset elements by their IDs and return them with non-empty report ++ list in case of errors. ++ ++ parent_element -- an element to look for nvsets in ++ id_list -- nvset IDs to be looked for ++ """ ++ element_list = [] ++ report_list: ReportItemList = [] ++ for nvset_id in id_list: ++ searcher = ElementSearcher( ++ _tag_to_type.keys(), ++ nvset_id, ++ parent_element, ++ element_type_desc="options set", ++ ) ++ if searcher.element_found(): ++ element_list.append(searcher.get_element()) ++ else: ++ report_list.extend(searcher.get_errors()) ++ return element_list, report_list ++ ++ ++class ValidateNvsetAppendNew: ++ """ ++ Validator for creating new nvset and appending it to CIB ++ """ ++ ++ def __init__( ++ self, ++ id_provider: IdProvider, ++ nvpair_dict: Mapping[str, str], ++ nvset_options: Mapping[str, str], ++ nvset_rule: Optional[str] = None, ++ rule_allows_rsc_expr: bool = False, ++ rule_allows_op_expr: bool = False, ++ ): ++ """ ++ id_provider -- elements' ids generator ++ nvpair_dict -- nvpairs to be put into the new nvset ++ nvset_options -- additional attributes of the created nvset ++ nvset_rule -- optional rule describing when the created nvset applies ++ rule_allows_rsc_expr -- is rsc_expression element allowed in nvset_rule? ++ rule_allows_op_expr -- is op_expression element allowed in nvset_rule? ++ """ ++ self._id_provider = id_provider ++ self._nvpair_dict = nvpair_dict ++ self._nvset_options = nvset_options ++ self._nvset_rule = nvset_rule ++ self._allow_rsc_expr = rule_allows_rsc_expr ++ self._allow_op_expr = rule_allows_op_expr ++ self._nvset_rule_parsed: Optional[RuleRoot] = None ++ ++ def validate(self, force_options: bool = False) -> reports.ReportItemList: ++ report_list: reports.ReportItemList = [] ++ ++ # Nvpair dict is intentionally not validated: it may contain any keys ++ # and values. This can change in the future and then we add a ++ # validation. Until then there is really nothing to validate there. ++ ++ # validate nvset options ++ validators = [ ++ validate.NamesIn( ++ ("id", "score"), ++ **validate.set_warning( ++ reports.codes.FORCE_OPTIONS, force_options ++ ), ++ ), ++ # with id_provider it validates that the id is available as well ++ validate.ValueId( ++ "id", option_name_for_report="id", id_provider=self._id_provider ++ ), ++ validate.ValueScore("score"), ++ ] ++ report_list.extend( ++ validate.ValidatorAll(validators).validate(self._nvset_options) ++ ) ++ ++ # parse and validate rule ++ # TODO write and call parsed rule validation and cleanup and tests ++ if self._nvset_rule: ++ try: ++ # Allow flags are set to True always, the parsed rule tree is ++ # checked in the validator instead. That gives us better error ++ # messages, such as "op expression cannot be used in this ++ # context" instead of a universal "parse error". ++ self._nvset_rule_parsed = parse_rule( ++ self._nvset_rule, allow_rsc_expr=True, allow_op_expr=True ++ ) ++ report_list.extend( ++ RuleValidator( ++ self._nvset_rule_parsed, ++ allow_rsc_expr=self._allow_rsc_expr, ++ allow_op_expr=self._allow_op_expr, ++ ).get_reports() ++ ) ++ except RuleParseError as e: ++ report_list.append( ++ reports.ReportItem.error( ++ reports.messages.RuleExpressionParseError( ++ e.rule_string, ++ e.msg, ++ e.rule_line, ++ e.lineno, ++ e.colno, ++ e.pos, ++ ) ++ ) ++ ) ++ ++ return report_list ++ ++ def get_parsed_rule(self) -> Optional[RuleRoot]: ++ return self._nvset_rule_parsed ++ ++ ++def nvset_append_new( ++ parent_element: Element, ++ id_provider: IdProvider, ++ nvset_tag: NvsetTag, ++ nvpair_dict: Mapping[str, str], ++ nvset_options: Mapping[str, str], ++ nvset_rule: Optional[RuleRoot] = None, ++) -> Element: ++ """ ++ Create new nvset and append it to CIB ++ ++ parent_element -- the created nvset will be appended into this element ++ id_provider -- elements' ids generator ++ nvset_tag -- type and actual tag of the nvset ++ nvpair_dict -- nvpairs to be put into the new nvset ++ nvset_options -- additional attributes of the created nvset ++ nvset_rule -- optional rule describing when the created nvset applies ++ """ ++ nvset_options = dict(nvset_options) # make a copy which we can modify ++ if "id" not in nvset_options or not nvset_options["id"]: ++ nvset_options["id"] = create_subelement_id( ++ parent_element, nvset_tag, id_provider ++ ) ++ ++ nvset_el = etree.SubElement(cast(_Element, parent_element), nvset_tag) ++ for name, value in nvset_options.items(): ++ if value != "": ++ nvset_el.attrib[name] = value ++ if nvset_rule: ++ rule_to_cib(cast(Element, nvset_el), id_provider, nvset_rule) ++ for name, value in nvpair_dict.items(): ++ _set_nvpair(cast(Element, nvset_el), id_provider, name, value) ++ return cast(Element, nvset_el) ++ ++ ++def nvset_remove(nvset_el_list: Iterable[Element]) -> None: ++ """ ++ Remove given nvset elements from CIB ++ ++ nvset_el_list -- nvset elements to be removed ++ """ ++ for nvset_el in nvset_el_list: ++ remove_one_element(nvset_el) ++ ++ ++def nvset_update( ++ nvset_el: Element, id_provider: IdProvider, nvpair_dict: Mapping[str, str], ++) -> None: ++ """ ++ Update an existing nvset ++ ++ nvset_el -- nvset to be updated ++ id_provider -- elements' ids generator ++ nvpair_dict -- nvpairs to be put into the nvset ++ """ ++ # Do not ever remove the nvset element, even if it is empty. There may be ++ # ACLs set in pacemaker which allow "write" for nvpairs (adding, changing ++ # and removing) but not nvsets. In such a case, removing the nvset would ++ # cause the whole change to be rejected by pacemaker with a "permission ++ # denied" message. ++ # https://bugzilla.redhat.com/show_bug.cgi?id=1642514 ++ for name, value in nvpair_dict.items(): ++ _set_nvpair(nvset_el, id_provider, name, value) ++ ++ ++def _set_nvpair( ++ nvset_element: Element, id_provider: IdProvider, name: str, value: str ++): ++ """ ++ Ensure name-value pair is set / removed in specified nvset ++ ++ nvset_element -- container for nvpair elements to update ++ id_provider -- elements' ids generator ++ name -- name of the nvpair to be set ++ value -- value of the nvpair to be set, if "" the nvpair will be removed ++ """ ++ nvpair_el_list = cast( ++ # The xpath method has a complicated return value, but we know our xpath ++ # expression returns only elements. ++ List[Element], ++ cast(_Element, nvset_element).xpath("./nvpair[@name=$name]", name=name), ++ ) ++ ++ if not nvpair_el_list: ++ if value != "": ++ etree.SubElement( ++ cast(_Element, nvset_element), ++ "nvpair", ++ { ++ "id": create_subelement_id( ++ nvset_element, ++ # limit id length to prevent excessively long ids ++ name[:20], ++ id_provider, ++ ), ++ "name": name, ++ "value": value, ++ }, ++ ) ++ return ++ ++ if value != "": ++ nvpair_el_list[0].set("value", value) ++ else: ++ nvset_element.remove(nvpair_el_list[0]) ++ for nvpair_el in nvpair_el_list[1:]: ++ nvset_element.remove(nvpair_el) +diff --git a/pcs/lib/cib/rule/__init__.py b/pcs/lib/cib/rule/__init__.py +new file mode 100644 +index 00000000..94228572 +--- /dev/null ++++ b/pcs/lib/cib/rule/__init__.py +@@ -0,0 +1,8 @@ ++from .cib_to_dto import rule_element_to_dto ++from .expression_part import BoolExpr as RuleRoot ++from .parser import ( ++ parse_rule, ++ RuleParseError, ++) ++from .parsed_to_cib import export as rule_to_cib ++from .validator import Validator as RuleValidator +diff --git a/pcs/lib/cib/rule/cib_to_dto.py b/pcs/lib/cib/rule/cib_to_dto.py +new file mode 100644 +index 00000000..d8198e0c +--- /dev/null ++++ b/pcs/lib/cib/rule/cib_to_dto.py +@@ -0,0 +1,185 @@ ++from typing import cast ++from xml.etree.ElementTree import Element ++ ++from lxml.etree import _Element ++ ++from pcs.common.pacemaker.rule import ( ++ CibRuleDateCommonDto, ++ CibRuleExpressionDto, ++) ++from pcs.common.str_tools import ( ++ format_name_value_list, ++ quote, ++) ++from pcs.common.types import CibRuleExpressionType ++from pcs.lib.xml_tools import export_attributes ++ ++ ++def rule_element_to_dto(rule_el: Element) -> CibRuleExpressionDto: ++ """ ++ Export a rule xml element including its children to their DTOs ++ """ ++ return _tag_to_export[rule_el.tag](rule_el) ++ ++ ++def _attrs_to_str(el: Element) -> str: ++ return " ".join( ++ format_name_value_list( ++ sorted(export_attributes(el, with_id=False).items()) ++ ) ++ ) ++ ++ ++def _rule_to_dto(rule_el: Element) -> CibRuleExpressionDto: ++ children_dto_list = [ ++ _tag_to_export[child.tag](child) ++ # The xpath method has a complicated return value, but we know our xpath ++ # expression only returns elements. ++ for child in cast( ++ Element, cast(_Element, rule_el).xpath(_xpath_for_export) ++ ) ++ ] ++ # "and" is a documented pacemaker default ++ # https://clusterlabs.org/pacemaker/doc/en-US/Pacemaker/2.0/html-single/Pacemaker_Explained/index.html#_rule_properties ++ boolean_op = rule_el.get("boolean-op", "and") ++ string_parts = [] ++ for child_dto in children_dto_list: ++ if child_dto.type == CibRuleExpressionType.RULE: ++ string_parts.append(f"({child_dto.as_string})") ++ else: ++ string_parts.append(child_dto.as_string) ++ return CibRuleExpressionDto( ++ rule_el.get("id", ""), ++ _tag_to_type[rule_el.tag], ++ False, # TODO implement is_expired ++ export_attributes(rule_el, with_id=False), ++ None, ++ None, ++ children_dto_list, ++ f" {boolean_op} ".join(string_parts), ++ ) ++ ++ ++def _common_expr_to_dto( ++ expr_el: Element, as_string: str ++) -> CibRuleExpressionDto: ++ return CibRuleExpressionDto( ++ expr_el.get("id", ""), ++ _tag_to_type[expr_el.tag], ++ False, ++ export_attributes(expr_el, with_id=False), ++ None, ++ None, ++ [], ++ as_string, ++ ) ++ ++ ++def _simple_expr_to_dto(expr_el: Element) -> CibRuleExpressionDto: ++ string_parts = [] ++ if "value" in expr_el.attrib: ++ # "attribute" and "operation" are defined as mandatory in CIB schema ++ string_parts.extend( ++ [expr_el.get("attribute", ""), expr_el.get("operation", "")] ++ ) ++ if "type" in expr_el.attrib: ++ string_parts.append(expr_el.get("type", "")) ++ string_parts.append(quote(expr_el.get("value", ""), " ")) ++ else: ++ # "attribute" and "operation" are defined as mandatory in CIB schema ++ string_parts.extend( ++ [expr_el.get("operation", ""), expr_el.get("attribute", "")] ++ ) ++ return _common_expr_to_dto(expr_el, " ".join(string_parts)) ++ ++ ++def _date_common_to_dto(expr_el: Element) -> CibRuleDateCommonDto: ++ return CibRuleDateCommonDto( ++ expr_el.get("id", ""), export_attributes(expr_el, with_id=False), ++ ) ++ ++ ++def _date_expr_to_dto(expr_el: Element) -> CibRuleExpressionDto: ++ date_spec = expr_el.find("./date_spec") ++ duration = expr_el.find("./duration") ++ ++ string_parts = [] ++ # "operation" is defined as mandatory in CIB schema ++ operation = expr_el.get("operation", "") ++ if operation == "date_spec": ++ string_parts.append("date-spec") ++ if date_spec is not None: ++ string_parts.append(_attrs_to_str(date_spec)) ++ elif operation == "in_range": ++ string_parts.extend(["date", "in_range"]) ++ # CIB schema allows "start" + "duration" or optional "start" + "end" ++ if "start" in expr_el.attrib: ++ string_parts.extend([expr_el.get("start", ""), "to"]) ++ if "end" in expr_el.attrib: ++ string_parts.append(expr_el.get("end", "")) ++ if duration is not None: ++ string_parts.append("duration") ++ string_parts.append(_attrs_to_str(duration)) ++ else: ++ # CIB schema allows operation=="gt" + "start" or operation=="lt" + "end" ++ string_parts.extend(["date", expr_el.get("operation", "")]) ++ if "start" in expr_el.attrib: ++ string_parts.append(expr_el.get("start", "")) ++ if "end" in expr_el.attrib: ++ string_parts.append(expr_el.get("end", "")) ++ ++ return CibRuleExpressionDto( ++ expr_el.get("id", ""), ++ _tag_to_type[expr_el.tag], ++ False, ++ export_attributes(expr_el, with_id=False), ++ None if date_spec is None else _date_common_to_dto(date_spec), ++ None if duration is None else _date_common_to_dto(duration), ++ [], ++ " ".join(string_parts), ++ ) ++ ++ ++def _op_expr_to_dto(expr_el: Element) -> CibRuleExpressionDto: ++ string_parts = ["op"] ++ string_parts.append(expr_el.get("name", "")) ++ if "interval" in expr_el.attrib: ++ string_parts.append( ++ "interval={interval}".format(interval=expr_el.get("interval", "")) ++ ) ++ return _common_expr_to_dto(expr_el, " ".join(string_parts)) ++ ++ ++def _rsc_expr_to_dto(expr_el: Element) -> CibRuleExpressionDto: ++ return _common_expr_to_dto( ++ expr_el, ++ ( ++ "resource " ++ + ":".join( ++ [ ++ expr_el.get(attr, "") ++ for attr in ["class", "provider", "type"] ++ ] ++ ) ++ ), ++ ) ++ ++ ++_tag_to_type = { ++ "rule": CibRuleExpressionType.RULE, ++ "expression": CibRuleExpressionType.EXPRESSION, ++ "date_expression": CibRuleExpressionType.DATE_EXPRESSION, ++ "op_expression": CibRuleExpressionType.OP_EXPRESSION, ++ "rsc_expression": CibRuleExpressionType.RSC_EXPRESSION, ++} ++ ++_tag_to_export = { ++ "rule": _rule_to_dto, ++ "expression": _simple_expr_to_dto, ++ "date_expression": _date_expr_to_dto, ++ "op_expression": _op_expr_to_dto, ++ "rsc_expression": _rsc_expr_to_dto, ++} ++_xpath_for_export = "./*[{export_tags}]".format( ++ export_tags=" or ".join(f"self::{tag}" for tag in _tag_to_export) ++) +diff --git a/pcs/lib/cib/rule/expression_part.py b/pcs/lib/cib/rule/expression_part.py +new file mode 100644 +index 00000000..3ba63aa2 +--- /dev/null ++++ b/pcs/lib/cib/rule/expression_part.py +@@ -0,0 +1,49 @@ ++""" ++Provides classes used as nodes of a semantic tree of a parsed rule expression. ++""" ++from dataclasses import dataclass ++from typing import ( ++ NewType, ++ Optional, ++ Sequence, ++) ++ ++ ++class RuleExprPart: ++ pass ++ ++ ++BoolOperator = NewType("BoolOperator", str) ++BOOL_AND = BoolOperator("AND") ++BOOL_OR = BoolOperator("OR") ++ ++ ++@dataclass(frozen=True) ++class BoolExpr(RuleExprPart): ++ """ ++ Represents a rule combining RuleExprPart objects by AND or OR operation. ++ """ ++ ++ operator: BoolOperator ++ children: Sequence[RuleExprPart] ++ ++ ++@dataclass(frozen=True) ++class RscExpr(RuleExprPart): ++ """ ++ Represents a resource expression in a rule. ++ """ ++ ++ standard: Optional[str] ++ provider: Optional[str] ++ type: Optional[str] ++ ++ ++@dataclass(frozen=True) ++class OpExpr(RuleExprPart): ++ """ ++ Represents an op expression in a rule. ++ """ ++ ++ name: str ++ interval: Optional[str] +diff --git a/pcs/lib/cib/rule/parsed_to_cib.py b/pcs/lib/cib/rule/parsed_to_cib.py +new file mode 100644 +index 00000000..0fcae4f1 +--- /dev/null ++++ b/pcs/lib/cib/rule/parsed_to_cib.py +@@ -0,0 +1,103 @@ ++from typing import cast ++from xml.etree.ElementTree import Element ++ ++from lxml import etree ++from lxml.etree import _Element ++ ++from pcs.lib.cib.tools import ( ++ IdProvider, ++ create_subelement_id, ++) ++ ++from .expression_part import ( ++ BoolExpr, ++ OpExpr, ++ RscExpr, ++ RuleExprPart, ++) ++ ++ ++def export( ++ parent_el: Element, id_provider: IdProvider, expr_tree: BoolExpr, ++) -> Element: ++ """ ++ Export parsed rule to a CIB element ++ ++ parent_el -- element to place the rule into ++ id_provider -- elements' ids generator ++ expr_tree -- parsed rule tree root ++ """ ++ element = __export_part(parent_el, expr_tree, id_provider) ++ # Add score only to the top level rule element (which is represented by ++ # BoolExpr class). This is achieved by this function not being called for ++ # child nodes. ++ # TODO This was implemented originaly only for rules in resource and ++ # operation defaults. In those cases, score is the only rule attribute and ++ # it is always INFINITY. Once this code is used for other rules, modify ++ # this behavior as needed. ++ if isinstance(expr_tree, BoolExpr): ++ element.set("score", "INFINITY") ++ return element ++ ++ ++def __export_part( ++ parent_el: Element, expr_tree: RuleExprPart, id_provider: IdProvider ++) -> Element: ++ part_export_map = { ++ BoolExpr: __export_bool, ++ OpExpr: __export_op, ++ RscExpr: __export_rsc, ++ } ++ func = part_export_map[type(expr_tree)] ++ # mypy doesn't handle this dynamic call ++ return func(parent_el, expr_tree, id_provider) # type: ignore ++ ++ ++def __export_bool( ++ parent_el: Element, boolean: BoolExpr, id_provider: IdProvider ++) -> Element: ++ element = etree.SubElement( ++ cast(_Element, parent_el), ++ "rule", ++ { ++ "id": create_subelement_id(parent_el, "rule", id_provider), ++ "boolean-op": boolean.operator.lower(), ++ }, ++ ) ++ for child in boolean.children: ++ __export_part(cast(Element, element), child, id_provider) ++ return cast(Element, element) ++ ++ ++def __export_op( ++ parent_el: Element, op: OpExpr, id_provider: IdProvider ++) -> Element: ++ element = etree.SubElement( ++ cast(_Element, parent_el), ++ "op_expression", ++ { ++ "id": create_subelement_id(parent_el, f"op-{op.name}", id_provider), ++ "name": op.name, ++ }, ++ ) ++ if op.interval: ++ element.attrib["interval"] = op.interval ++ return cast(Element, element) ++ ++ ++def __export_rsc( ++ parent_el: Element, rsc: RscExpr, id_provider: IdProvider ++) -> Element: ++ id_part = "-".join(filter(None, [rsc.standard, rsc.provider, rsc.type])) ++ element = etree.SubElement( ++ cast(_Element, parent_el), ++ "rsc_expression", ++ {"id": create_subelement_id(parent_el, f"rsc-{id_part}", id_provider)}, ++ ) ++ if rsc.standard: ++ element.attrib["class"] = rsc.standard ++ if rsc.provider: ++ element.attrib["provider"] = rsc.provider ++ if rsc.type: ++ element.attrib["type"] = rsc.type ++ return cast(Element, element) +diff --git a/pcs/lib/cib/rule/parser.py b/pcs/lib/cib/rule/parser.py +new file mode 100644 +index 00000000..2215c524 +--- /dev/null ++++ b/pcs/lib/cib/rule/parser.py +@@ -0,0 +1,232 @@ ++from typing import ( ++ Any, ++ Iterator, ++ Optional, ++ Tuple, ++) ++ ++import pyparsing ++ ++from .expression_part import ( ++ BOOL_AND, ++ BOOL_OR, ++ BoolExpr, ++ OpExpr, ++ RscExpr, ++ RuleExprPart, ++) ++ ++pyparsing.ParserElement.enablePackrat() ++ ++ ++class RuleParseError(Exception): ++ def __init__( ++ self, ++ rule_string: str, ++ rule_line: str, ++ lineno: int, ++ colno: int, ++ pos: int, ++ msg: str, ++ ): ++ super().__init__() ++ self.rule_string = rule_string ++ self.rule_line = rule_line ++ self.lineno = lineno ++ self.colno = colno ++ self.pos = pos ++ self.msg = msg ++ ++ ++def parse_rule( ++ rule_string: str, allow_rsc_expr: bool = False, allow_op_expr: bool = False ++) -> BoolExpr: ++ """ ++ Parse a rule string and return a corresponding semantic tree ++ ++ rule_string -- the whole rule expression ++ allow_rsc_expr -- allow resource expressions in the rule ++ allow_op_expr -- allow resource operation expressions in the rule ++ """ ++ if not rule_string: ++ return BoolExpr(BOOL_AND, []) ++ ++ try: ++ parsed = __get_rule_parser( ++ allow_rsc_expr=allow_rsc_expr, allow_op_expr=allow_op_expr ++ ).parseString(rule_string, parseAll=True)[0] ++ except pyparsing.ParseException as e: ++ raise RuleParseError( ++ rule_string, e.line, e.lineno, e.col, e.loc, e.args[2], ++ ) ++ ++ if not isinstance(parsed, BoolExpr): ++ # If we only got a representation on an inner rule element instead of a ++ # rule element itself, wrap the result in a default AND-rule. (There is ++ # only one expression so "and" vs. "or" doesn't really matter.) ++ parsed = BoolExpr(BOOL_AND, [parsed]) ++ ++ return parsed ++ ++ ++def __operator_operands( ++ token_list: pyparsing.ParseResults, ++) -> Iterator[Tuple[Any, Any]]: ++ # See pyparsing examples ++ # https://github.com/pyparsing/pyparsing/blob/master/examples/eval_arith.py ++ token_iterator = iter(token_list) ++ while True: ++ try: ++ yield (next(token_iterator), next(token_iterator)) ++ except StopIteration: ++ break ++ ++ ++def __build_bool_tree(token_list: pyparsing.ParseResults) -> RuleExprPart: ++ # See pyparsing examples ++ # https://github.com/pyparsing/pyparsing/blob/master/examples/eval_arith.py ++ token_to_operator = { ++ "and": BOOL_AND, ++ "or": BOOL_OR, ++ } ++ operand_left = token_list[0][0] ++ last_operator: Optional[str] = None ++ operand_list = [] ++ for operator, operand_right in __operator_operands(token_list[0][1:]): ++ # In each iteration, we get a bool_op ("and" or "or") and the right ++ # operand. ++ if last_operator == operator or last_operator is None: ++ # If we got the same operator as last time (or this is the first ++ # one), stack all the operads so we can put them all into one ++ # BoolExpr class. ++ operand_list.append(operand_right) ++ else: ++ # The operator has changed. Put all the stacked operands into the ++ # correct BoolExpr class and start the stacking again. The created ++ # class is the left operand of the current operator. ++ operand_left = BoolExpr( ++ token_to_operator[last_operator], [operand_left] + operand_list ++ ) ++ operand_list = [operand_right] ++ last_operator = operator ++ if operand_list and last_operator: ++ # Use any of the remaining stacked operands. ++ operand_left = BoolExpr( ++ token_to_operator[last_operator], [operand_left] + operand_list ++ ) ++ return operand_left ++ ++ ++def __build_op_expr(parse_result: pyparsing.ParseResults) -> RuleExprPart: ++ # Those attr are defined by setResultsName in op_expr grammar rule ++ return OpExpr( ++ parse_result.name, ++ # pyparsing-2.1.0 puts "interval_value" into parse_result.interval as ++ # defined in the grammar AND it also puts "interval_value" into ++ # parse_result. pyparsing-2.4.0 only puts "interval_value" into ++ # parse_result. Not sure why, maybe it's a bug, maybe it's intentional. ++ parse_result.interval_value if parse_result.interval_value else None, ++ ) ++ ++ ++def __build_rsc_expr(parse_result: pyparsing.ParseResults) -> RuleExprPart: ++ # Those attrs are defined by the regexp in rsc_expr grammar rule ++ return RscExpr( ++ parse_result.standard, parse_result.provider, parse_result.type ++ ) ++ ++ ++def __get_rule_parser( ++ allow_rsc_expr: bool = False, allow_op_expr: bool = False ++) -> pyparsing.ParserElement: ++ # This function defines the rule grammar ++ ++ # It was created for 'pcs resource [op] defaults' commands to be able to ++ # set defaults for specified resources and/or operation using rules. When ++ # implementing that feature, there was no time to reimplement all the other ++ # rule expressions from old code. The plan is to move old rule parser code ++ # here once there is time / need to do it. ++ # How to add other rule expressions: ++ # 1 Create new grammar rules in a way similar to existing rsc_expr and ++ # op_expr. Use setName for better description of a grammar when printed. ++ # Use setResultsName for an easy access to parsed parts. ++ # 2 Create new classes in expression_part module, probably one for each ++ # type of expression. Those are data containers holding the parsed data ++ # independent of the parser. ++ # 3 Create builders for the new classes and connect them to created ++ # grammar rules using setParseAction. ++ # 4 Add the new expressions into simple_expr_list. ++ # 5 Test and debug the whole thing. ++ ++ rsc_expr = pyparsing.And( ++ [ ++ pyparsing.CaselessKeyword("resource"), ++ # resource name ++ # Up to three parts seperated by ":". The parts can contain any ++ # characters except whitespace (token separator), ":" (parts ++ # separator) and "()" (brackets). ++ pyparsing.Regex( ++ r"(?P[^\s:()]+)?:(?P[^\s:()]+)?:(?P[^\s:()]+)?" ++ ).setName(""), ++ ] ++ ) ++ rsc_expr.setParseAction(__build_rsc_expr) ++ ++ op_interval = pyparsing.And( ++ [ ++ pyparsing.CaselessKeyword("interval"), ++ # no spaces allowed around the "=" ++ pyparsing.Literal("=").leaveWhitespace(), ++ # interval value: number followed by a time unit, no spaces allowed ++ # between the number and the unit thanks to Combine being used ++ pyparsing.Combine( ++ pyparsing.And( ++ [ ++ pyparsing.Word(pyparsing.nums), ++ pyparsing.Optional(pyparsing.Word(pyparsing.alphas)), ++ ] ++ ) ++ ) ++ .setName("[ + ++ ++ ++ Support for managing multiple sets of resource operations defaults. ++ ++ pcs commands: resource op defaults set create | delete | remove | update ++ ++ ++ ++ ++ Support for rules with 'resource' and 'op' expressions in sets of ++ resource operations defaults. ++ ++ pcs commands: resource op defaults set create ++ ++ + + + Show and set resources defaults, can set multiple defaults at once. +@@ -971,6 +986,21 @@ + pcs commands: resource defaults + + ++ ++ ++ Support for managing multiple sets of resources defaults. ++ ++ pcs commands: resource defaults set create | delete | remove | update ++ ++ ++ ++ ++ Support for rules with 'resource' and 'op' expressions in sets of ++ resources defaults. ++ ++ pcs commands: resource defaults set create ++ ++ + + + +diff --git a/test/centos8/Dockerfile b/test/centos8/Dockerfile +index bcdfadef..753f0ca7 100644 +--- a/test/centos8/Dockerfile ++++ b/test/centos8/Dockerfile +@@ -12,6 +12,7 @@ RUN dnf install -y \ + python3-pip \ + python3-pycurl \ + python3-pyOpenSSL \ ++ python3-pyparsing \ + # ruby + ruby \ + ruby-devel \ +diff --git a/test/fedora30/Dockerfile b/test/fedora30/Dockerfile +index 60aad892..7edbfe5b 100644 +--- a/test/fedora30/Dockerfile ++++ b/test/fedora30/Dockerfile +@@ -9,6 +9,7 @@ RUN dnf install -y \ + python3-mock \ + python3-pycurl \ + python3-pyOpenSSL \ ++ python3-pyparsing \ + # ruby + ruby \ + ruby-devel \ +diff --git a/test/fedora31/Dockerfile b/test/fedora31/Dockerfile +index eb24bb1c..6750e222 100644 +--- a/test/fedora31/Dockerfile ++++ b/test/fedora31/Dockerfile +@@ -10,6 +10,7 @@ RUN dnf install -y \ + python3-pip \ + python3-pycurl \ + python3-pyOpenSSL \ ++ python3-pyparsing \ + # ruby + ruby \ + ruby-devel \ +diff --git a/test/fedora32/Dockerfile b/test/fedora32/Dockerfile +index 61a0a439..c6cc2146 100644 +--- a/test/fedora32/Dockerfile ++++ b/test/fedora32/Dockerfile +@@ -11,6 +11,7 @@ RUN dnf install -y \ + python3-pip \ + python3-pycurl \ + python3-pyOpenSSL \ ++ python3-pyparsing \ + # ruby + ruby \ + ruby-devel \ +-- +2.25.4 + diff --git a/SOURCES/do-not-support-cluster-setup-with-udp-u-transport.patch b/SOURCES/do-not-support-cluster-setup-with-udp-u-transport.patch index 4da46c4..800145e 100644 --- a/SOURCES/do-not-support-cluster-setup-with-udp-u-transport.patch +++ b/SOURCES/do-not-support-cluster-setup-with-udp-u-transport.patch @@ -1,4 +1,4 @@ -From b919e643ff75fa47dcecbf60fd4938ae9b076ce4 Mon Sep 17 00:00:00 2001 +From c0fff964cc07e3a9fbdea85da33abe3329c653a3 Mon Sep 17 00:00:00 2001 From: Ivan Devat Date: Tue, 20 Nov 2018 15:03:56 +0100 Subject: [PATCH 3/3] do not support cluster setup with udp(u) transport @@ -10,10 +10,10 @@ Subject: [PATCH 3/3] do not support cluster setup with udp(u) transport 3 files changed, 6 insertions(+) diff --git a/pcs/pcs.8 b/pcs/pcs.8 -index 651fda83..9a4673dd 100644 +index 3efc5bb2..20247774 100644 --- a/pcs/pcs.8 +++ b/pcs/pcs.8 -@@ -283,6 +283,8 @@ By default, encryption is enabled with cipher=aes256 and hash=sha256. To disable +@@ -376,6 +376,8 @@ By default, encryption is enabled with cipher=aes256 and hash=sha256. To disable Transports udp and udpu: .br @@ -23,10 +23,10 @@ index 651fda83..9a4673dd 100644 .br Transport options are: ip_version, netmtu diff --git a/pcs/usage.py b/pcs/usage.py -index e4f5af32..63e1c061 100644 +index 0f3c95a3..51bc1196 100644 --- a/pcs/usage.py +++ b/pcs/usage.py -@@ -689,6 +689,7 @@ Commands: +@@ -796,6 +796,7 @@ Commands: hash=sha256. To disable encryption, set cipher=none and hash=none. Transports udp and udpu: @@ -49,5 +49,5 @@ index b857cbae..b8d48d92 100644 #csetup-transport-options.knet .without-knet { -- -2.21.0 +2.25.4 diff --git a/SPECS/pcs.spec b/SPECS/pcs.spec index bcdf69a..3a207a6 100644 --- a/SPECS/pcs.spec +++ b/SPECS/pcs.spec @@ -1,11 +1,17 @@ Name: pcs -Version: 0.10.4 -Release: 3%{?dist} +Version: 0.10.6 +Release: 2%{?dist} +# https://docs.fedoraproject.org/en-US/packaging-guidelines/LicensingGuidelines/ # https://fedoraproject.org/wiki/Licensing:Main?rd=Licensing#Good_Licenses # GPLv2: pcs -# ASL 2.0: tornado -# MIT: handlebars -License: GPLv2 and ASL 2.0 and MIT +# ASL 2.0: dataclasses, tornado +# MIT: handlebars, backports, dacite, daemons, ethon, mustermann, rack, +# rack-protection, rack-test, sinatra, tilt +# GPLv2 or Ruby: eventmachne, json +# (GPLv2 or Ruby) and BSD: thin +# BSD or Ruby: open4, ruby2_keywords +# BSD and MIT: ffi +License: GPLv2 and ASL 2.0 and MIT and BSD and (GPLv2 or Ruby) and (BSD or Ruby) URL: https://github.com/ClusterLabs/pcs Group: System Environment/Base Summary: Pacemaker Configuration System @@ -18,24 +24,30 @@ ExclusiveArch: i686 x86_64 s390x ppc64le aarch64 %global pcs_source_name %{name}-%{version_or_commit} # ui_commit can be determined by hash, tag or branch -%global ui_commit 0.1.2 +%global ui_commit 0.1.3 %global ui_src_name pcs-web-ui-%{ui_commit} %global pcs_snmp_pkg_name pcs-snmp %global pyagentx_version 0.4.pcs.2 -%global tornado_version 6.0.3 -%global version_rubygem_backports 3.11.4 -%global version_rubygem_ethon 0.11.0 -%global version_rubygem_ffi 1.9.25 -%global version_rubygem_json 2.1.0 -%global version_rubygem_mustermann 1.0.3 +%global tornado_version 6.0.4 +%global dataclasses_version 0.6 +%global dacite_version 1.5.0 +%global version_rubygem_backports 3.17.2 +%global version_rubygem_daemons 1.3.1 +%global version_rubygem_ethon 0.12.0 +%global version_rubygem_eventmachine 1.2.7 +%global version_rubygem_ffi 1.13.1 +%global version_rubygem_json 2.3.0 +%global version_rubygem_mustermann 1.1.1 %global version_rubygem_open4 1.3.4 -%global version_rubygem_rack 2.0.6 -%global version_rubygem_rack_protection 2.0.4 -%global version_rubygem_rack_test 1.0.0 -%global version_rubygem_sinatra 2.0.4 -%global version_rubygem_tilt 2.0.9 +%global version_rubygem_rack 2.2.3 +%global version_rubygem_rack_protection 2.0.8.1 +%global version_rubygem_rack_test 1.1.0 +%global version_rubygem_ruby2_keywords 0.0.2 +%global version_rubygem_sinatra 2.0.8.1 +%global version_rubygem_thin 1.7.2 +%global version_rubygem_tilt 2.0.10 # We do not use _libdir macro because upstream is not prepared for it. # Pcs does not include binaries and thus it should live in /usr/lib. Tornado @@ -70,6 +82,8 @@ Source2: pcsd-bundle-config-2 Source41: https://github.com/ondrejmular/pyagentx/archive/v%{pyagentx_version}/pyagentx-%{pyagentx_version}.tar.gz Source42: https://github.com/tornadoweb/tornado/archive/v%{tornado_version}/tornado-%{tornado_version}.tar.gz +Source43: https://github.com/ericvsmith/dataclasses/archive/%{dataclasses_version}/dataclasses-%{dataclasses_version}.tar.gz +Source44: https://github.com/konradhalas/dacite/archive/v%{dacite_version}/dacite-%{dacite_version}.tar.gz Source81: https://rubygems.org/downloads/backports-%{version_rubygem_backports}.gem Source82: https://rubygems.org/downloads/ethon-%{version_rubygem_ethon}.gem @@ -85,6 +99,10 @@ Source89: https://rubygems.org/downloads/rack-protection-%{version_rubygem_rack_ Source90: https://rubygems.org/downloads/rack-test-%{version_rubygem_rack_test}.gem Source91: https://rubygems.org/downloads/sinatra-%{version_rubygem_sinatra}.gem Source92: https://rubygems.org/downloads/tilt-%{version_rubygem_tilt}.gem +Source93: https://rubygems.org/downloads/eventmachine-%{version_rubygem_eventmachine}.gem +Source94: https://rubygems.org/downloads/daemons-%{version_rubygem_daemons}.gem +Source95: https://rubygems.org/downloads/thin-%{version_rubygem_thin}.gem +Source96: https://rubygems.org/downloads/ruby2_keywords-%{version_rubygem_ruby2_keywords}.gem Source100: https://github.com/idevat/pcs-web-ui/archive/%{ui_commit}/%{ui_src_name}.tar.gz Source101: https://github.com/idevat/pcs-web-ui/releases/download/%{ui_commit}/pcs-web-ui-node-modules-%{ui_commit}.tar.xz @@ -93,8 +111,9 @@ Source101: https://github.com/idevat/pcs-web-ui/releases/download/%{ui_commit}/p # They should come before downstream patches to avoid unnecessary conflicts. # Z-streams are exception here: they can come from upstream but should be # applied at the end to keep z-stream changes as straightforward as possible. -Patch1: bz1676431-01-Display-status-of-disaster-recovery.patch -Patch2: bz1743731-01-fix-error-msg-when-cluster-is-not-set-up.patch +# Patch1: name.patch +Patch1: bz1817547-01-resource-and-operation-defaults.patch +Patch2: bz1805082-01-fix-resource-stonith-refresh-documentation.patch # Downstream patches do not come from upstream. They adapt pcs for specific # RHEL needs. @@ -104,12 +123,12 @@ Patch101: do-not-support-cluster-setup-with-udp-u-transport.patch BuildRequires: git #printf from coreutils is used in makefile BuildRequires: coreutils -BuildRequires: execstack # python for pcs BuildRequires: platform-python BuildRequires: python3-devel BuildRequires: platform-python-setuptools BuildRequires: python3-pycurl +BuildRequires: python3-pyparsing # gcc for compiling custom rubygems BuildRequires: gcc BuildRequires: gcc-c++ @@ -126,16 +145,6 @@ BuildRequires: systemd # for tests BuildRequires: python3-lxml BuildRequires: python3-pyOpenSSL -BuildRequires: pacemaker-cli >= 2.0.0 -# BuildRequires: fence-agents-all -BuildRequires: fence-agents-apc -BuildRequires: fence-agents-scsi -BuildRequires: fence-agents-ipmilan -# for tests -%ifarch i686 x86_64 -BuildRequires: fence-virt -%endif -BuildRequires: booth-site # pcsd fonts and font management tools for creating symlinks to fonts BuildRequires: fontconfig BuildRequires: liberation-sans-fonts @@ -152,6 +161,7 @@ Requires: python3-lxml Requires: platform-python-setuptools Requires: python3-clufter => 0.70.0 Requires: python3-pycurl +Requires: python3-pyparsing # ruby and gems for pcsd Requires: ruby >= 2.2.0 Requires: rubygems @@ -179,18 +189,26 @@ Requires: liberation-sans-fonts Requires: overpass-fonts # favicon Red Hat logo Requires: redhat-logos +# needs logrotate for /etc/logrotate.d/pcsd +Requires: logrotate Provides: bundled(tornado) = %{tornado_version} +Provides: bundled(dataclasses) = %{dataclasses_version} +Provides: bundled(dacite) = %{dacite_version} Provides: bundled(backports) = %{version_rubygem_backports} +Provides: bundled(daemons) = %{version_rubygem_daemons} Provides: bundled(ethon) = %{version_rubygem_ethon} +Provides: bundled(eventmachine) = %{version_rubygem_eventmachine} Provides: bundled(ffi) = %{version_rubygem_ffi} Provides: bundled(json) = %{version_rubygem_json} Provides: bundled(mustermann) = %{version_rubygem_mustermann} Provides: bundled(open4) = %{version_rubygem_open4} Provides: bundled(rack) = %{version_rubygem_rack} -Provides: bundled(rack) = %{version_rubygem_rack_protection} -Provides: bundled(rack) = %{version_rubygem_rack_test} +Provides: bundled(rack_protection) = %{version_rubygem_rack_protection} +Provides: bundled(rack_test) = %{version_rubygem_rack_test} +Provides: bundled(ruby2_keywords) = %{version_rubygem_ruby2_keywords} Provides: bundled(sinatra) = %{version_rubygem_sinatra} +Provides: bundled(thin) = %{version_rubygem_thin} Provides: bundled(tilt) = %{version_rubygem_tilt} %description @@ -238,7 +256,11 @@ update_times(){ unset file_list[0] for fname in ${file_list[@]}; do - touch -r $reference_file $fname + # some files could be deleted by a patch therefore we test file for + # existance before touch to avoid exit with error: No such file or + # directory + # diffstat cannot create list of files without deleted files + test -e $fname && touch -r $reference_file $fname done } @@ -257,6 +279,7 @@ update_times_patch(){ update_times ${patch_file_name} `diffstat -p1 -l ${patch_file_name}` } +# update_times_patch %%{PATCH1} update_times_patch %{PATCH1} update_times_patch %{PATCH2} update_times_patch %{PATCH101} @@ -288,6 +311,10 @@ cp -f %SOURCE89 pcsd/vendor/cache cp -f %SOURCE90 pcsd/vendor/cache cp -f %SOURCE91 pcsd/vendor/cache cp -f %SOURCE92 pcsd/vendor/cache +cp -f %SOURCE93 pcsd/vendor/cache +cp -f %SOURCE94 pcsd/vendor/cache +cp -f %SOURCE95 pcsd/vendor/cache +cp -f %SOURCE96 pcsd/vendor/cache # 3) dir for python bundles @@ -308,6 +335,20 @@ update_times %SOURCE42 `find %{bundled_src_dir}/tornado -follow` cp %{bundled_src_dir}/tornado/LICENSE tornado_LICENSE cp %{bundled_src_dir}/tornado/README.rst tornado_README.rst +# 6) sources for python dataclasses +tar -xzf %SOURCE43 -C %{bundled_src_dir} +mv %{bundled_src_dir}/dataclasses-%{dataclasses_version} %{bundled_src_dir}/dataclasses +update_times %SOURCE43 `find %{bundled_src_dir}/dataclasses -follow` +cp %{bundled_src_dir}/dataclasses/LICENSE.txt dataclasses_LICENSE.txt +cp %{bundled_src_dir}/dataclasses/README.rst dataclasses_README.rst + +# 7) sources for python dacite +tar -xzf %SOURCE44 -C %{bundled_src_dir} +mv %{bundled_src_dir}/dacite-%{dacite_version} %{bundled_src_dir}/dacite +update_times %SOURCE44 `find %{bundled_src_dir}/dacite -follow` +cp %{bundled_src_dir}/dacite/LICENSE dacite_LICENSE +cp %{bundled_src_dir}/dacite/README.md dacite_README.md + %build %define debug_package %{nil} @@ -321,22 +362,47 @@ gem install \ --force --verbose --no-rdoc --no-ri -l --no-user-install \ -i %{rubygem_bundle_dir} \ %{rubygem_cache_dir}/backports-%{version_rubygem_backports}.gem \ + %{rubygem_cache_dir}/daemons-%{version_rubygem_daemons}.gem \ %{rubygem_cache_dir}/ethon-%{version_rubygem_ethon}.gem \ + %{rubygem_cache_dir}/eventmachine-%{version_rubygem_eventmachine}.gem \ %{rubygem_cache_dir}/ffi-%{version_rubygem_ffi}.gem \ %{rubygem_cache_dir}/json-%{version_rubygem_json}.gem \ %{rubygem_cache_dir}/mustermann-%{version_rubygem_mustermann}.gem \ %{rubygem_cache_dir}/open4-%{version_rubygem_open4}.gem \ - %{rubygem_cache_dir}/rack-%{version_rubygem_rack}.gem \ %{rubygem_cache_dir}/rack-protection-%{version_rubygem_rack_protection}.gem \ %{rubygem_cache_dir}/rack-test-%{version_rubygem_rack_test}.gem \ + %{rubygem_cache_dir}/rack-%{version_rubygem_rack}.gem \ + %{rubygem_cache_dir}/ruby2_keywords-%{version_rubygem_ruby2_keywords}.gem \ %{rubygem_cache_dir}/sinatra-%{version_rubygem_sinatra}.gem \ + %{rubygem_cache_dir}/thin-%{version_rubygem_thin}.gem \ %{rubygem_cache_dir}/tilt-%{version_rubygem_tilt}.gem \ -- '--with-ldflags="-Wl,-z,relro -Wl,-z,ibt -Wl,-z,now -Wl,--gc-sections"' \ '--with-cflags="-O2 -ffunction-sections"' +# prepare license files +# some rubygems do not have a license file (ruby2_keywords, thin) +mv %{rubygem_bundle_dir}/gems/backports-%{version_rubygem_backports}/LICENSE.txt backports_LICENSE.txt +mv %{rubygem_bundle_dir}/gems/daemons-%{version_rubygem_daemons}/LICENSE daemons_LICENSE +mv %{rubygem_bundle_dir}/gems/ethon-%{version_rubygem_ethon}/LICENSE ethon_LICENSE +mv %{rubygem_bundle_dir}/gems/eventmachine-%{version_rubygem_eventmachine}/LICENSE eventmachine_LICENSE +mv %{rubygem_bundle_dir}/gems/eventmachine-%{version_rubygem_eventmachine}/GNU eventmachine_GNU +mv %{rubygem_bundle_dir}/gems/ffi-%{version_rubygem_ffi}/COPYING ffi_COPYING +mv %{rubygem_bundle_dir}/gems/ffi-%{version_rubygem_ffi}/LICENSE ffi_LICENSE +mv %{rubygem_bundle_dir}/gems/ffi-%{version_rubygem_ffi}/LICENSE.SPECS ffi_LICENSE.SPECS +mv %{rubygem_bundle_dir}/gems/json-%{version_rubygem_json}/LICENSE json_LICENSE +mv %{rubygem_bundle_dir}/gems/mustermann-%{version_rubygem_mustermann}/LICENSE mustermann_LICENSE +mv %{rubygem_bundle_dir}/gems/open4-%{version_rubygem_open4}/LICENSE open4_LICENSE +mv %{rubygem_bundle_dir}/gems/rack-%{version_rubygem_rack}/MIT-LICENSE rack_MIT-LICENSE +mv %{rubygem_bundle_dir}/gems/rack-protection-%{version_rubygem_rack_protection}/License rack-protection_License +mv %{rubygem_bundle_dir}/gems/rack-test-%{version_rubygem_rack_test}/MIT-LICENSE.txt rack-test_MIT-LICENSE.txt +mv %{rubygem_bundle_dir}/gems/sinatra-%{version_rubygem_sinatra}/LICENSE sinatra_LICENSE +mv %{rubygem_bundle_dir}/gems/tilt-%{version_rubygem_tilt}/COPYING tilt_COPYING + # We can remove files required for gem compilation +rm -rf %{rubygem_bundle_dir}/gems/eventmachine-%{version_rubygem_eventmachine}/ext rm -rf %{rubygem_bundle_dir}/gems/ffi-%{version_rubygem_ffi}/ext rm -rf %{rubygem_bundle_dir}/gems/json-%{version_rubygem_json}/ext +rm -rf %{rubygem_bundle_dir}/gems/thin-%{version_rubygem_thin}/ext # With this file there is "File is not stripped" problem during rpmdiff @@ -361,6 +427,8 @@ make install \ BASH_COMPLETION_DIR=%{_datadir}/bash-completion/completions \ BUNDLE_PYAGENTX_SRC_DIR=`readlink -f %{bundled_src_dir}/pyagentx` \ BUNDLE_TORNADO_SRC_DIR=`readlink -f %{bundled_src_dir}/tornado` \ + BUNDLE_DACITE_SRC_DIR=`readlink -f %{bundled_src_dir}/dacite` \ + BUNDLE_DATACLASSES_SRC_DIR=`readlink -f %{bundled_src_dir}/dataclasses` \ BUILD_GEMS=false \ SYSTEMCTL_OVERRIDE=true \ hdrdir="%{_includedir}" \ @@ -401,7 +469,7 @@ run_all_tests(){ # TODO: Investigate the issue BUNDLED_LIB_LOCATION=$RPM_BUILD_ROOT%{pcs_libdir}/pcs/bundled/packages \ - %{__python3} pcs_test/suite.py -v --vanilla --all-but \ + %{__python3} pcs_test/suite.py --tier0 -v --vanilla --all-but \ pcs_test.tier0.lib.commands.test_resource_agent.DescribeAgentUtf8.test_describe \ pcs_test.tier0.daemon.app.test_app_remote.SyncConfigMutualExclusive.test_get_not_locked \ pcs_test.tier0.daemon.app.test_app_remote.SyncConfigMutualExclusive.test_post_not_locked \ @@ -434,20 +502,31 @@ remove_all_tests() { run_all_tests remove_all_tests +%posttrans +# Make sure the new version of the daemon is runnning. +# Also, make sure to start pcsd-ruby if it hasn't been started or even +# installed before. This is done by restarting pcsd.service. +%{_bindir}/systemctl daemon-reload +%{_bindir}/systemctl try-restart pcsd.service + + %post %systemd_post pcsd.service +%systemd_post pcsd-ruby.service %post -n %{pcs_snmp_pkg_name} %systemd_post pcs_snmp_agent.service %preun %systemd_preun pcsd.service +%systemd_preun pcsd-ruby.service %preun -n %{pcs_snmp_pkg_name} %systemd_preun pcs_snmp_agent.service %postun %systemd_postun_with_restart pcsd.service +%systemd_postun_with_restart pcsd-ruby.service %postun -n %{pcs_snmp_pkg_name} %systemd_postun_with_restart pcs_snmp_agent.service @@ -456,8 +535,29 @@ remove_all_tests %doc CHANGELOG.md %doc README.md %doc tornado_README.rst +%doc dacite_README.md +%doc dataclasses_README.rst %license tornado_LICENSE +%license dacite_LICENSE +%license dataclasses_LICENSE.txt %license COPYING +# rugygem licenses +%license backports_LICENSE.txt +%license daemons_LICENSE +%license ethon_LICENSE +%license eventmachine_LICENSE +%license eventmachine_GNU +%license ffi_COPYING +%license ffi_LICENSE +%license ffi_LICENSE.SPECS +%license json_LICENSE +%license mustermann_LICENSE +%license open4_LICENSE +%license rack_MIT-LICENSE +%license rack-protection_License +%license rack-test_MIT-LICENSE.txt +%license sinatra_LICENSE +%license tilt_COPYING %{python3_sitelib}/pcs %{python3_sitelib}/pcs-%{version}-py3.*.egg-info %{_sbindir}/pcs @@ -466,10 +566,14 @@ remove_all_tests %{pcs_libdir}/pcsd/* %{pcs_libdir}/pcsd/.bundle/config %{pcs_libdir}/pcs/bundled/packages/tornado* +%{pcs_libdir}/pcs/bundled/packages/dacite* +%{pcs_libdir}/pcs/bundled/packages/dataclasses* +%{pcs_libdir}/pcs/bundled/packages/__pycache__/dataclasses.cpython-36.pyc %{_unitdir}/pcsd.service +%{_unitdir}/pcsd-ruby.service %{_datadir}/bash-completion/completions/pcs %{_sharedstatedir}/pcsd -%{_sysconfdir}/pam.d/pcsd +%config(noreplace) %{_sysconfdir}/pam.d/pcsd %dir %{_var}/log/pcsd %config(noreplace) %{_sysconfdir}/logrotate.d/pcsd %config(noreplace) %{_sysconfdir}/sysconfig/pcsd @@ -484,6 +588,7 @@ remove_all_tests %{_mandir}/man8/pcsd.* %exclude %{pcs_libdir}/pcsd/*.debian %exclude %{pcs_libdir}/pcsd/pcsd.service +%exclude %{pcs_libdir}/pcsd/pcsd-ruby.service %exclude %{pcs_libdir}/pcsd/pcsd.conf %exclude %{pcs_libdir}/pcsd/pcsd.8 %exclude %{pcs_libdir}/pcsd/public/js/dev/* @@ -508,6 +613,45 @@ remove_all_tests %license pyagentx_LICENSE.txt %changelog +* Thu Jun 25 2020 Miroslav Lisik - 0.10.6-2 +- Added resource and operation defaults that apply to specific resource/operation types +- Added Requires/BuildRequires: python3-pyparsing +- Added Requires: logrotate +- Fixed resource and stonith documentation +- Fixed rubygem licenses +- Fixed update_times() +- Updated rubygem rack to version 2.2.3 +- Removed BuildRequires execstack (it is not needed) +- Resolves: rhbz#1805082 rhbz#1817547 + +* Thu Jun 11 2020 Miroslav Lisik - 0.10.6-1 +- Rebased to latest upstream sources (see CHANGELOG.md) +- Added python bundled dependencies: dacite, dataclasses +- Added new bundled rubygem ruby2_keywords +- Updated rubygem bundled packages: backports, ethon, ffi, json, mustermann, rack, rack_protection, rack_test, sinatra, tilt +- Updated pcs-web-ui +- Updated test run, only tier0 tests are running during build +- Removed BuildRequires needed for tier1 tests which were removed for build (pacemaker-cli, fence_agents-*, fence_virt, booth-site) +- Resolves: rhbz#1387358 rhbz#1684676 rhbz#1722970 rhbz#1778672 rhbz#1782553 rhbz#1790460 rhbz#1805082 rhbz#1810017 rhbz#1817547 rhbz#1830552 rhbz#1832973 rhbz#1833114 rhbz#1833506 rhbz#1838853 rhbz#1839637 + +* Fri Mar 20 2020 Miroslav Lisik - 0.10.4-6 +- Fixed communication between python and ruby daemons +- Resolves: rhbz#1783106 + +* Thu Feb 13 2020 Miroslav Lisik - 0.10.4-5 +- Fixed link to sbd man page from `sbd enable` doc +- Fixed safe-disabling clones, groups, bundles +- Fixed sinatra wrapper performance issue +- Fixed detecting fence history support +- Fixed cookie options +- Updated hint for 'resource create ... master' +- Updated gating tests execution, smoke tests run from upstream sources +- Resolves: rhbz#1750427 rhbz#1781303 rhbz#1783106 rhbz#1793574 + +* Mon Jan 20 2020 Tomas Jelinek - 0.10.4-4 +- Fix testsuite for pacemaker-2.0.3-4 +- Resolves: rhbz#1792946 + * Mon Dec 02 2019 Ivan Devat - 0.10.4-3 - Added basic resource views in new webUI - Resolves: rhbz#1744060