diff --git a/.gitignore b/.gitignore
index 84e3f80..b5198b3 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,17 +1,23 @@
 SOURCES/HAM-logo.png
-SOURCES/backports-3.11.4.gem
-SOURCES/ethon-0.11.0.gem
-SOURCES/ffi-1.9.25.gem
-SOURCES/json-2.1.0.gem
-SOURCES/mustermann-1.0.3.gem
+SOURCES/backports-3.17.2.gem
+SOURCES/dacite-1.5.0.tar.gz
+SOURCES/daemons-1.3.1.gem
+SOURCES/dataclasses-0.6.tar.gz
+SOURCES/ethon-0.12.0.gem
+SOURCES/eventmachine-1.2.7.gem
+SOURCES/ffi-1.13.1.gem
+SOURCES/json-2.3.0.gem
+SOURCES/mustermann-1.1.1.gem
 SOURCES/open4-1.3.4-1.gem
-SOURCES/pcs-0.10.4.tar.gz
-SOURCES/pcs-web-ui-0.1.2.tar.gz
-SOURCES/pcs-web-ui-node-modules-0.1.2.tar.xz
+SOURCES/pcs-0.10.6.tar.gz
+SOURCES/pcs-web-ui-0.1.3.tar.gz
+SOURCES/pcs-web-ui-node-modules-0.1.3.tar.xz
 SOURCES/pyagentx-0.4.pcs.2.tar.gz
-SOURCES/rack-2.0.6.gem
-SOURCES/rack-protection-2.0.4.gem
-SOURCES/rack-test-1.0.0.gem
-SOURCES/sinatra-2.0.4.gem
-SOURCES/tilt-2.0.9.gem
-SOURCES/tornado-6.0.3.tar.gz
+SOURCES/rack-2.2.3.gem
+SOURCES/rack-protection-2.0.8.1.gem
+SOURCES/rack-test-1.1.0.gem
+SOURCES/ruby2_keywords-0.0.2.gem
+SOURCES/sinatra-2.0.8.1.gem
+SOURCES/thin-1.7.2.gem
+SOURCES/tilt-2.0.10.gem
+SOURCES/tornado-6.0.4.tar.gz
diff --git a/.pcs.metadata b/.pcs.metadata
index 0d738d2..2c6863f 100644
--- a/.pcs.metadata
+++ b/.pcs.metadata
@@ -1,17 +1,23 @@
 679a4ce22a33ffd4d704261a17c00cff98d9499a SOURCES/HAM-logo.png
-edf08f3a0d9e202048857d78ddda44e59294084c SOURCES/backports-3.11.4.gem
-3c921ceeb2847be8cfa25704be74923e233786bd SOURCES/ethon-0.11.0.gem
-86fa011857f977254ccf39f507587310f9ade768 SOURCES/ffi-1.9.25.gem
-8b9e81a2a6ff57f97bec1f65940c61cc6b6d81be SOURCES/json-2.1.0.gem
-2d090e7d3cd2a35efeaeacf006100fb83b828686 SOURCES/mustermann-1.0.3.gem
+28b63a742124da6c9575a1c5e7d7331ef93600b2 SOURCES/backports-3.17.2.gem
+c14ee49221d8e1b09364b5f248bc3da12484f675 SOURCES/dacite-1.5.0.tar.gz
+e28c1e78d1a6e34e80f4933b494f1e0501939dd3 SOURCES/daemons-1.3.1.gem
+81079b734108084eea0ae1c05a1cab0e806a3a1d SOURCES/dataclasses-0.6.tar.gz
+921ef1be44583a7644ee7f20fe5f26f21d018a04 SOURCES/ethon-0.12.0.gem
+7a5b2896e210fac9759c786ee4510f265f75b481 SOURCES/eventmachine-1.2.7.gem
+cfa25e7a3760c3ec16723cb8263d9b7a52d0eadf SOURCES/ffi-1.13.1.gem
+0230e8c5a37f1543982e5b04be503dd5f9004b47 SOURCES/json-2.3.0.gem
+50a4e37904485810cb05e27d75c9783e5a8f3402 SOURCES/mustermann-1.1.1.gem
 41a7fe9f8e3e02da5ae76c821b89c5b376a97746 SOURCES/open4-1.3.4-1.gem
-d2b649f271580b18d39efffa93f62b55291ef55d SOURCES/pcs-0.10.4.tar.gz
-8ac1291ce8f56073b74149ac56acc094337a3298 SOURCES/pcs-web-ui-0.1.2.tar.gz
-52599fe9c17bda8cc0cad1acf830a9114b8b6db6 SOURCES/pcs-web-ui-node-modules-0.1.2.tar.xz
+73fafb4228326c14a799f0cccbcb734ab7ba2bfa SOURCES/pcs-0.10.6.tar.gz
+df118954a980ceecc9cdd0e85a83d43253836f7f SOURCES/pcs-web-ui-0.1.3.tar.gz
+3e09042e3dc32c992451ba4c0454f2879f0d3f40 SOURCES/pcs-web-ui-node-modules-0.1.3.tar.xz
 3176b2f2b332c2b6bf79fe882e83feecf3d3f011 SOURCES/pyagentx-0.4.pcs.2.tar.gz
-b15267e1f94e69238a00a6f1bd48fb7683c03a78 SOURCES/rack-2.0.6.gem
-c1376e5678322b401d988d261762a78bf2cf3361 SOURCES/rack-protection-2.0.4.gem
-4c99cf0a82372a1bc5968c1551d9e606b68b4879 SOURCES/rack-test-1.0.0.gem
-1c85f05c874bc8c0bf9c40291ea2d430090cdfd9 SOURCES/sinatra-2.0.4.gem
-55a75a80e29731d072fe44dfaf865479b65c27fd SOURCES/tilt-2.0.9.gem
-126c66189fc5b26a39c9b54eb17254652cca8b27 SOURCES/tornado-6.0.3.tar.gz
+345b7169d4d2d62176a225510399963bad62b68f SOURCES/rack-2.2.3.gem
+1f046e23baca8beece3b38c60382f44aa2b2cb41 SOURCES/rack-protection-2.0.8.1.gem
+b80bc5ca38a885e747271675ba91dd3d02136bf1 SOURCES/rack-test-1.1.0.gem
+0be571aacb5d6a212a30af3f322a7000d8af1ef9 SOURCES/ruby2_keywords-0.0.2.gem
+04cca7a5d9d641fe076e4e24dc5b6ff31922f4c3 SOURCES/sinatra-2.0.8.1.gem
+41395e86322ffd31f3a7aef1f697bda3e1e2d6b9 SOURCES/thin-1.7.2.gem
+d265c822a6b228392d899e9eb5114613d65e6967 SOURCES/tilt-2.0.10.gem
+e177f2a092dc5f23b0b3078e40adf52e17a9f8a6 SOURCES/tornado-6.0.4.tar.gz
diff --git a/SOURCES/bz1676431-01-Display-status-of-disaster-recovery.patch b/SOURCES/bz1676431-01-Display-status-of-disaster-recovery.patch
deleted file mode 100644
index bd37518..0000000
--- a/SOURCES/bz1676431-01-Display-status-of-disaster-recovery.patch
+++ /dev/null
@@ -1,5055 +0,0 @@
-From 7cf137380bc80653c50747a1d4d70783d593fcb5 Mon Sep 17 00:00:00 2001
-From: Miroslav Lisik <mlisik@redhat.com>
-Date: Fri, 29 Nov 2019 12:16:11 +0100
-Subject: [PATCH 1/3] squash bz1676431 Display status of disaster recovery site
-
-support DR config in node add, node remove, cluster destroy
-
-dr: add command for setting recovery site
-
-improve typing
-
-move tests
-
-dr: add a command for displaying clusters' status
-
-dr: add a command for displaying dr config
-
-dr: add 'destroy' sub-command
-
-dr: review based fixes
-
-update capabilities, changelog
----
- CHANGELOG.md                                  |   9 +
- pcs/app.py                                    |   2 +
- pcs/cli/common/console_report.py              |  16 +-
- pcs/cli/common/lib_wrapper.py                 |  13 +
- pcs/cli/dr.py                                 | 138 ++++
- pcs/cli/routing/dr.py                         |  15 +
- pcs/cluster.py                                |   1 +
- pcs/common/dr.py                              | 109 +++
- pcs/common/file_type_codes.py                 |  27 +-
- pcs/common/report_codes.py                    |   3 +
- pcs/lib/commands/cluster.py                   |  18 +-
- pcs/lib/commands/dr.py                        | 316 ++++++++
- pcs/lib/communication/corosync.py             |  28 +
- pcs/lib/communication/status.py               |  97 +++
- pcs/lib/dr/__init__.py                        |   0
- pcs/lib/dr/config/__init__.py                 |   0
- pcs/lib/dr/config/facade.py                   |  49 ++
- pcs/lib/dr/env.py                             |  28 +
- pcs/lib/env.py                                |  17 +
- pcs/lib/file/instance.py                      |  21 +-
- pcs/lib/file/metadata.py                      |   8 +
- pcs/lib/file/toolbox.py                       |  80 +-
- pcs/lib/node.py                               |   5 +-
- pcs/lib/node_communication_format.py          |  16 +
- pcs/lib/reports.py                            |  31 +
- pcs/pcs.8                                     |  18 +-
- pcs/pcs_internal.py                           |   1 +
- pcs/settings_default.py                       |   1 +
- pcs/usage.py                                  |  32 +-
- .../tier0/cli/common/test_console_report.py   |  24 +
- pcs_test/tier0/cli/test_dr.py                 | 293 +++++++
- pcs_test/tier0/common/test_dr.py              | 167 ++++
- .../lib/commands/cluster/test_add_nodes.py    | 143 +++-
- pcs_test/tier0/lib/commands/dr/__init__.py    |   0
- .../tier0/lib/commands/dr/test_destroy.py     | 342 ++++++++
- .../tier0/lib/commands/dr/test_get_config.py  | 134 ++++
- .../lib/commands/dr/test_set_recovery_site.py | 702 ++++++++++++++++
- pcs_test/tier0/lib/commands/dr/test_status.py | 756 ++++++++++++++++++
- .../tier0/lib/communication/test_status.py    |   7 +
- pcs_test/tier0/lib/dr/__init__.py             |   0
- pcs_test/tier0/lib/dr/test_facade.py          | 138 ++++
- pcs_test/tier0/lib/test_env.py                |  42 +-
- .../tools/command_env/config_corosync_conf.py |   9 +-
- pcs_test/tools/command_env/config_http.py     |   3 +
- .../tools/command_env/config_http_corosync.py |  24 +
- .../tools/command_env/config_http_files.py    |  28 +-
- .../tools/command_env/config_http_status.py   |  52 ++
- .../mock_get_local_corosync_conf.py           |  12 +-
- pcsd/capabilities.xml                         |  12 +
- pcsd/pcsd_file.rb                             |  15 +
- pcsd/pcsd_remove_file.rb                      |   7 +
- pcsd/remote.rb                                |  19 +-
- pcsd/settings.rb                              |   1 +
- pcsd/settings.rb.debian                       |   1 +
- pylintrc                                      |   2 +-
- 55 files changed, 3964 insertions(+), 68 deletions(-)
- create mode 100644 pcs/cli/dr.py
- create mode 100644 pcs/cli/routing/dr.py
- create mode 100644 pcs/common/dr.py
- create mode 100644 pcs/lib/commands/dr.py
- create mode 100644 pcs/lib/communication/status.py
- create mode 100644 pcs/lib/dr/__init__.py
- create mode 100644 pcs/lib/dr/config/__init__.py
- create mode 100644 pcs/lib/dr/config/facade.py
- create mode 100644 pcs/lib/dr/env.py
- create mode 100644 pcs_test/tier0/cli/test_dr.py
- create mode 100644 pcs_test/tier0/common/test_dr.py
- create mode 100644 pcs_test/tier0/lib/commands/dr/__init__.py
- create mode 100644 pcs_test/tier0/lib/commands/dr/test_destroy.py
- create mode 100644 pcs_test/tier0/lib/commands/dr/test_get_config.py
- create mode 100644 pcs_test/tier0/lib/commands/dr/test_set_recovery_site.py
- create mode 100644 pcs_test/tier0/lib/commands/dr/test_status.py
- create mode 100644 pcs_test/tier0/lib/communication/test_status.py
- create mode 100644 pcs_test/tier0/lib/dr/__init__.py
- create mode 100644 pcs_test/tier0/lib/dr/test_facade.py
- create mode 100644 pcs_test/tools/command_env/config_http_status.py
-
-diff --git a/CHANGELOG.md b/CHANGELOG.md
-index 69e6da44..889436c3 100644
---- a/CHANGELOG.md
-+++ b/CHANGELOG.md
-@@ -1,5 +1,14 @@
- # Change Log
- 
-+## [Unreleased]
-+
-+### Added
-+- It is possible to configure a disaster-recovery site and display its status
-+  ([rhbz#1676431])
-+
-+[rhbz#1676431]: https://bugzilla.redhat.com/show_bug.cgi?id=1676431
-+
-+
- ## [0.10.4] - 2019-11-28
- 
- ### Added
-diff --git a/pcs/app.py b/pcs/app.py
-index 8df07c1d..defc4055 100644
---- a/pcs/app.py
-+++ b/pcs/app.py
-@@ -25,6 +25,7 @@ from pcs.cli.routing import (
-     cluster,
-     config,
-     constraint,
-+    dr,
-     host,
-     node,
-     pcsd,
-@@ -245,6 +246,7 @@ def main(argv=None):
-         "booth": booth.booth_cmd,
-         "host": host.host_cmd,
-         "client": client.client_cmd,
-+        "dr": dr.dr_cmd,
-         "help": lambda lib, argv, modifiers: usage.main(),
-     }
-     try:
-diff --git a/pcs/cli/common/console_report.py b/pcs/cli/common/console_report.py
-index 0a730cfa..d349c823 100644
---- a/pcs/cli/common/console_report.py
-+++ b/pcs/cli/common/console_report.py
-@@ -2,6 +2,7 @@
- from collections import defaultdict
- from collections.abc import Iterable
- from functools import partial
-+from typing import Mapping
- import sys
- 
- from pcs.common import (
-@@ -46,6 +47,7 @@ _file_role_translation = {
-     file_type_codes.BOOTH_CONFIG: "Booth configuration",
-     file_type_codes.BOOTH_KEY: "Booth key",
-     file_type_codes.COROSYNC_AUTHKEY: "Corosync authkey",
-+    file_type_codes.PCS_DR_CONFIG: "disaster-recovery configuration",
-     file_type_codes.PACEMAKER_AUTHKEY: "Pacemaker authkey",
-     file_type_codes.PCSD_ENVIRONMENT_CONFIG: "pcsd configuration",
-     file_type_codes.PCSD_SSL_CERT: "pcsd SSL certificate",
-@@ -53,7 +55,7 @@ _file_role_translation = {
-     file_type_codes.PCS_KNOWN_HOSTS: "known-hosts",
-     file_type_codes.PCS_SETTINGS_CONF: "pcs configuration",
- }
--_file_role_to_option_translation = {
-+_file_role_to_option_translation: Mapping[str, str] = {
-     file_type_codes.BOOTH_CONFIG: "--booth-conf",
-     file_type_codes.BOOTH_KEY: "--booth-key",
-     file_type_codes.CIB: "-f",
-@@ -2284,4 +2286,16 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
-             "resources\n\n{crm_simulate_plaintext_output}"
-         ).format(**info)
-     ,
-+
-+    codes.DR_CONFIG_ALREADY_EXIST: lambda info: (
-+        "Disaster-recovery already configured"
-+    ).format(**info),
-+
-+    codes.DR_CONFIG_DOES_NOT_EXIST: lambda info: (
-+        "Disaster-recovery is not configured"
-+    ).format(**info),
-+
-+    codes.NODE_IN_LOCAL_CLUSTER: lambda info: (
-+        "Node '{node}' is part of local cluster"
-+    ).format(**info),
- }
-diff --git a/pcs/cli/common/lib_wrapper.py b/pcs/cli/common/lib_wrapper.py
-index 27b7d8b1..4ef6bf2f 100644
---- a/pcs/cli/common/lib_wrapper.py
-+++ b/pcs/cli/common/lib_wrapper.py
-@@ -9,6 +9,7 @@ from pcs.lib.commands import (
-     booth,
-     cib_options,
-     cluster,
-+    dr,
-     fencing_topology,
-     node,
-     pcsd,
-@@ -183,6 +184,18 @@ def load_module(env, middleware_factory, name):
-             }
-         )
- 
-+    if name == "dr":
-+        return bind_all(
-+            env,
-+            middleware.build(middleware_factory.corosync_conf_existing),
-+            {
-+                "get_config": dr.get_config,
-+                "destroy": dr.destroy,
-+                "set_recovery_site": dr.set_recovery_site,
-+                "status_all_sites_plaintext": dr.status_all_sites_plaintext,
-+            }
-+        )
-+
-     if name == "remote_node":
-         return bind_all(
-             env,
-diff --git a/pcs/cli/dr.py b/pcs/cli/dr.py
-new file mode 100644
-index 00000000..c6830aa0
---- /dev/null
-+++ b/pcs/cli/dr.py
-@@ -0,0 +1,138 @@
-+from typing import (
-+    Any,
-+    List,
-+    Sequence,
-+)
-+
-+from pcs.cli.common.console_report import error
-+from pcs.cli.common.errors import CmdLineInputError
-+from pcs.cli.common.parse_args import InputModifiers
-+from pcs.common import report_codes
-+from pcs.common.dr import (
-+    DrConfigDto,
-+    DrConfigSiteDto,
-+    DrSiteStatusDto,
-+)
-+from pcs.common.tools import indent
-+
-+def config(
-+    lib: Any,
-+    argv: Sequence[str],
-+    modifiers: InputModifiers,
-+) -> None:
-+    """
-+    Options: None
-+    """
-+    modifiers.ensure_only_supported()
-+    if argv:
-+        raise CmdLineInputError()
-+    config_raw = lib.dr.get_config()
-+    try:
-+        config_dto = DrConfigDto.from_dict(config_raw)
-+    except (KeyError, TypeError, ValueError):
-+        raise error(
-+            "Unable to communicate with pcsd, received response:\n"
-+                f"{config_raw}"
-+        )
-+
-+    lines = ["Local site:"]
-+    lines.extend(indent(_config_site_lines(config_dto.local_site)))
-+    for site_dto in config_dto.remote_site_list:
-+        lines.append("Remote site:")
-+        lines.extend(indent(_config_site_lines(site_dto)))
-+    print("\n".join(lines))
-+
-+def _config_site_lines(site_dto: DrConfigSiteDto) -> List[str]:
-+    lines = [f"Role: {site_dto.site_role.capitalize()}"]
-+    if site_dto.node_list:
-+        lines.append("Nodes:")
-+        lines.extend(indent(sorted([node.name for node in site_dto.node_list])))
-+    return lines
-+
-+
-+def set_recovery_site(
-+    lib: Any,
-+    argv: Sequence[str],
-+    modifiers: InputModifiers,
-+) -> None:
-+    """
-+    Options:
-+      * --request-timeout - HTTP timeout for node authorization check
-+    """
-+    modifiers.ensure_only_supported("--request-timeout")
-+    if len(argv) != 1:
-+        raise CmdLineInputError()
-+    lib.dr.set_recovery_site(argv[0])
-+
-+def status(
-+    lib: Any,
-+    argv: Sequence[str],
-+    modifiers: InputModifiers,
-+) -> None:
-+    """
-+    Options:
-+      * --full - show full details, node attributes and failcount
-+      * --hide-inactive - hide inactive resources
-+      * --request-timeout - HTTP timeout for node authorization check
-+    """
-+    modifiers.ensure_only_supported(
-+        "--full", "--hide-inactive", "--request-timeout",
-+    )
-+    if argv:
-+        raise CmdLineInputError()
-+
-+    status_list_raw = lib.dr.status_all_sites_plaintext(
-+        hide_inactive_resources=modifiers.get("--hide-inactive"),
-+        verbose=modifiers.get("--full"),
-+    )
-+    try:
-+        status_list = [
-+            DrSiteStatusDto.from_dict(status_raw)
-+            for status_raw in status_list_raw
-+        ]
-+    except (KeyError, TypeError, ValueError):
-+        raise error(
-+            "Unable to communicate with pcsd, received response:\n"
-+                f"{status_list_raw}"
-+        )
-+
-+    has_errors = False
-+    plaintext_parts = []
-+    for site_status in status_list:
-+        plaintext_parts.append(
-+            "--- {local_remote} cluster - {role} site ---".format(
-+                local_remote=("Local" if site_status.local_site else "Remote"),
-+                role=site_status.site_role.capitalize()
-+            )
-+        )
-+        if site_status.status_successfully_obtained:
-+            plaintext_parts.append(site_status.status_plaintext.strip())
-+            plaintext_parts.extend(["", ""])
-+        else:
-+            has_errors = True
-+            plaintext_parts.extend([
-+                "Error: Unable to get status of the cluster from any node",
-+                ""
-+            ])
-+    print("\n".join(plaintext_parts).strip())
-+    if has_errors:
-+        raise error("Unable to get status of all sites")
-+
-+
-+def destroy(
-+    lib: Any,
-+    argv: Sequence[str],
-+    modifiers: InputModifiers,
-+) -> None:
-+    """
-+    Options:
-+      * --skip-offline - skip unreachable nodes (including missing auth token)
-+      * --request-timeout - HTTP timeout for node authorization check
-+    """
-+    modifiers.ensure_only_supported("--skip-offline", "--request-timeout")
-+    if argv:
-+        raise CmdLineInputError()
-+    force_flags = []
-+    if modifiers.get("--skip-offline"):
-+        force_flags.append(report_codes.SKIP_OFFLINE_NODES)
-+    lib.dr.destroy(force_flags=force_flags)
-diff --git a/pcs/cli/routing/dr.py b/pcs/cli/routing/dr.py
-new file mode 100644
-index 00000000..dbf44c1c
---- /dev/null
-+++ b/pcs/cli/routing/dr.py
-@@ -0,0 +1,15 @@
-+from pcs import usage
-+from pcs.cli import dr
-+from pcs.cli.common.routing import create_router
-+
-+dr_cmd = create_router(
-+    {
-+        "help": lambda lib, argv, modifiers: usage.dr(argv),
-+        "config": dr.config,
-+        "destroy": dr.destroy,
-+        "set-recovery-site": dr.set_recovery_site,
-+        "status": dr.status,
-+    },
-+    ["dr"],
-+    default_cmd="help",
-+)
-diff --git a/pcs/cluster.py b/pcs/cluster.py
-index 3a931b60..9473675f 100644
---- a/pcs/cluster.py
-+++ b/pcs/cluster.py
-@@ -1209,6 +1209,7 @@ def cluster_destroy(lib, argv, modifiers):
-             settings.corosync_conf_file,
-             settings.corosync_authkey_file,
-             settings.pacemaker_authkey_file,
-+            settings.pcsd_dr_config_location,
-         ])
-         state_files = [
-             "cib-*",
-diff --git a/pcs/common/dr.py b/pcs/common/dr.py
-new file mode 100644
-index 00000000..1648d93d
---- /dev/null
-+++ b/pcs/common/dr.py
-@@ -0,0 +1,109 @@
-+from enum import auto
-+from typing import (
-+    Any,
-+    Iterable,
-+    Mapping,
-+)
-+
-+from pcs.common.interface.dto import DataTransferObject
-+from pcs.common.tools import AutoNameEnum
-+
-+
-+class DrRole(AutoNameEnum):
-+    PRIMARY = auto()
-+    RECOVERY = auto()
-+
-+
-+class DrConfigNodeDto(DataTransferObject):
-+    def __init__(self, name: str):
-+        self.name = name
-+
-+    def to_dict(self) -> Mapping[str, Any]:
-+        return dict(name=self.name)
-+
-+    @classmethod
-+    def from_dict(cls, payload: Mapping[str, Any]) -> "DrConfigNodeDto":
-+        return cls(payload["name"])
-+
-+
-+class DrConfigSiteDto(DataTransferObject):
-+    def __init__(
-+        self,
-+        site_role: DrRole,
-+        node_list: Iterable[DrConfigNodeDto]
-+    ):
-+        self.site_role = site_role
-+        self.node_list = node_list
-+
-+    def to_dict(self) -> Mapping[str, Any]:
-+        return dict(
-+            site_role=self.site_role.value,
-+            node_list=[node.to_dict() for node in self.node_list]
-+        )
-+
-+    @classmethod
-+    def from_dict(cls, payload: Mapping[str, Any]) -> "DrConfigSiteDto":
-+        return cls(
-+            DrRole(payload["site_role"]),
-+            [
-+                DrConfigNodeDto.from_dict(payload_node)
-+                for payload_node in payload["node_list"]
-+            ],
-+        )
-+
-+
-+class DrConfigDto(DataTransferObject):
-+    def __init__(
-+        self,
-+        local_site: DrConfigSiteDto,
-+        remote_site_list: Iterable[DrConfigSiteDto]
-+    ):
-+        self.local_site = local_site
-+        self.remote_site_list = remote_site_list
-+
-+    def to_dict(self) -> Mapping[str, Any]:
-+        return dict(
-+            local_site=self.local_site.to_dict(),
-+            remote_site_list=[site.to_dict() for site in self.remote_site_list],
-+        )
-+
-+    @classmethod
-+    def from_dict(cls, payload: Mapping[str, Any]) -> "DrConfigDto":
-+        return cls(
-+            DrConfigSiteDto.from_dict(payload["local_site"]),
-+            [
-+                DrConfigSiteDto.from_dict(payload_site)
-+                for payload_site in payload["remote_site_list"]
-+            ],
-+        )
-+
-+
-+class DrSiteStatusDto(DataTransferObject):
-+    def __init__(
-+        self,
-+        local_site: bool,
-+        site_role: DrRole,
-+        status_plaintext: str,
-+        status_successfully_obtained: bool
-+    ):
-+        self.local_site = local_site
-+        self.site_role = site_role
-+        self.status_plaintext = status_plaintext
-+        self.status_successfully_obtained = status_successfully_obtained
-+
-+    def to_dict(self) -> Mapping[str, Any]:
-+        return dict(
-+            local_site=self.local_site,
-+            site_role=self.site_role.value,
-+            status_plaintext=self.status_plaintext,
-+            status_successfully_obtained=self.status_successfully_obtained,
-+        )
-+
-+    @classmethod
-+    def from_dict(cls, payload: Mapping[str, Any]) -> "DrSiteStatusDto":
-+        return cls(
-+            payload["local_site"],
-+            DrRole(payload["site_role"]),
-+            payload["status_plaintext"],
-+            payload["status_successfully_obtained"],
-+        )
-diff --git a/pcs/common/file_type_codes.py b/pcs/common/file_type_codes.py
-index 9c801180..967aa76b 100644
---- a/pcs/common/file_type_codes.py
-+++ b/pcs/common/file_type_codes.py
-@@ -1,11 +1,16 @@
--BOOTH_CONFIG = "BOOTH_CONFIG"
--BOOTH_KEY = "BOOTH_KEY"
--CIB = "CIB"
--COROSYNC_AUTHKEY = "COROSYNC_AUTHKEY"
--COROSYNC_CONF = "COROSYNC_CONF"
--PACEMAKER_AUTHKEY = "PACEMAKER_AUTHKEY"
--PCSD_ENVIRONMENT_CONFIG = "PCSD_ENVIRONMENT_CONFIG"
--PCSD_SSL_CERT = "PCSD_SSL_CERT"
--PCSD_SSL_KEY = "PCSD_SSL_KEY"
--PCS_KNOWN_HOSTS = "PCS_KNOWN_HOSTS"
--PCS_SETTINGS_CONF = "PCS_SETTINGS_CONF"
-+from typing import NewType
-+
-+FileTypeCode = NewType("FileTypeCode", str)
-+
-+BOOTH_CONFIG = FileTypeCode("BOOTH_CONFIG")
-+BOOTH_KEY = FileTypeCode("BOOTH_KEY")
-+CIB = FileTypeCode("CIB")
-+COROSYNC_AUTHKEY = FileTypeCode("COROSYNC_AUTHKEY")
-+COROSYNC_CONF = FileTypeCode("COROSYNC_CONF")
-+PACEMAKER_AUTHKEY = FileTypeCode("PACEMAKER_AUTHKEY")
-+PCSD_ENVIRONMENT_CONFIG = FileTypeCode("PCSD_ENVIRONMENT_CONFIG")
-+PCSD_SSL_CERT = FileTypeCode("PCSD_SSL_CERT")
-+PCSD_SSL_KEY = FileTypeCode("PCSD_SSL_KEY")
-+PCS_KNOWN_HOSTS = FileTypeCode("PCS_KNOWN_HOSTS")
-+PCS_SETTINGS_CONF = FileTypeCode("PCS_SETTINGS_CONF")
-+PCS_DR_CONFIG = FileTypeCode("PCS_DR_CONFIG")
-diff --git a/pcs/common/report_codes.py b/pcs/common/report_codes.py
-index 4e3433a8..514ac079 100644
---- a/pcs/common/report_codes.py
-+++ b/pcs/common/report_codes.py
-@@ -141,6 +141,8 @@ COROSYNC_TRANSPORT_UNSUPPORTED_OPTIONS = "COROSYNC_TRANSPORT_UNSUPPORTED_OPTIONS
- CRM_MON_ERROR = "CRM_MON_ERROR"
- DEFAULTS_CAN_BE_OVERRIDEN = "DEFAULTS_CAN_BE_OVERRIDEN"
- DEPRECATED_OPTION = "DEPRECATED_OPTION"
-+DR_CONFIG_ALREADY_EXIST = "DR_CONFIG_ALREADY_EXIST"
-+DR_CONFIG_DOES_NOT_EXIST = "DR_CONFIG_DOES_NOT_EXIST"
- DUPLICATE_CONSTRAINTS_EXIST = "DUPLICATE_CONSTRAINTS_EXIST"
- EMPTY_RESOURCE_SET_LIST = "EMPTY_RESOURCE_SET_LIST"
- EMPTY_ID = "EMPTY_ID"
-@@ -203,6 +205,7 @@ NONE_HOST_FOUND = "NONE_HOST_FOUND"
- NODE_USED_AS_TIE_BREAKER = "NODE_USED_AS_TIE_BREAKER"
- NODES_TO_REMOVE_UNREACHABLE = "NODES_TO_REMOVE_UNREACHABLE"
- NODE_TO_CLEAR_IS_STILL_IN_CLUSTER = "NODE_TO_CLEAR_IS_STILL_IN_CLUSTER"
-+NODE_IN_LOCAL_CLUSTER = "NODE_IN_LOCAL_CLUSTER"
- OMITTING_NODE = "OMITTING_NODE"
- OBJECT_WITH_ID_IN_UNEXPECTED_CONTEXT = "OBJECT_WITH_ID_IN_UNEXPECTED_CONTEXT"
- PACEMAKER_LOCAL_NODE_NAME_NOT_FOUND = "PACEMAKER_LOCAL_NODE_NAME_NOT_FOUND"
-diff --git a/pcs/lib/commands/cluster.py b/pcs/lib/commands/cluster.py
-index 64015864..f30dcb25 100644
---- a/pcs/lib/commands/cluster.py
-+++ b/pcs/lib/commands/cluster.py
-@@ -777,7 +777,7 @@ def add_nodes(
-         skip_wrong_config=force,
-     )
- 
--    # distribute corosync and pacemaker authkeys
-+    # distribute corosync and pacemaker authkeys and other config files
-     files_action = {}
-     forceable_io_error_creator = reports.get_problem_creator(
-         report_codes.SKIP_FILE_DISTRIBUTION_ERRORS, force
-@@ -814,6 +814,22 @@ def add_nodes(
-                 file_path=settings.pacemaker_authkey_file,
-             ))
- 
-+    if os.path.isfile(settings.pcsd_dr_config_location):
-+        try:
-+            files_action.update(
-+                node_communication_format.pcs_dr_config_file(
-+                    open(settings.pcsd_dr_config_location, "rb").read()
-+                )
-+            )
-+        except EnvironmentError as e:
-+            report_processor.report(forceable_io_error_creator(
-+                reports.file_io_error,
-+                file_type_codes.PCS_DR_CONFIG,
-+                RawFileError.ACTION_READ,
-+                format_environment_error(e),
-+                file_path=settings.pcsd_dr_config_location,
-+            ))
-+
-     # pcs_settings.conf was previously synced using pcsdcli send_local_configs.
-     # This has been changed temporarily until new system for distribution and
-     # syncronization of configs will be introduced.
-diff --git a/pcs/lib/commands/dr.py b/pcs/lib/commands/dr.py
-new file mode 100644
-index 00000000..41ddb5cb
---- /dev/null
-+++ b/pcs/lib/commands/dr.py
-@@ -0,0 +1,316 @@
-+from typing import (
-+    Any,
-+    Container,
-+    Iterable,
-+    List,
-+    Mapping,
-+    Tuple,
-+)
-+
-+from pcs.common import file_type_codes, report_codes
-+from pcs.common.dr import (
-+    DrConfigDto,
-+    DrConfigNodeDto,
-+    DrConfigSiteDto,
-+    DrSiteStatusDto,
-+)
-+from pcs.common.file import RawFileError
-+from pcs.common.node_communicator import RequestTarget
-+from pcs.common.reports import SimpleReportProcessor
-+
-+from pcs.lib import node_communication_format, reports
-+from pcs.lib.communication.corosync import GetCorosyncConf
-+from pcs.lib.communication.nodes import (
-+    DistributeFilesWithoutForces,
-+    RemoveFilesWithoutForces,
-+)
-+from pcs.lib.communication.status import GetFullClusterStatusPlaintext
-+from pcs.lib.communication.tools import (
-+    run as run_com_cmd,
-+    run_and_raise,
-+)
-+from pcs.lib.corosync.config_facade import ConfigFacade as CorosyncConfigFacade
-+from pcs.lib.dr.config.facade import (
-+    DrRole,
-+    Facade as DrConfigFacade,
-+)
-+from pcs.lib.env import LibraryEnvironment
-+from pcs.lib.errors import LibraryError, ReportItemList
-+from pcs.lib.file.instance import FileInstance
-+from pcs.lib.file.raw_file import raw_file_error_report
-+from pcs.lib.file.toolbox import for_file_type as get_file_toolbox
-+from pcs.lib.interface.config import ParserErrorException
-+from pcs.lib.node import get_existing_nodes_names
-+
-+
-+def get_config(env: LibraryEnvironment) -> Mapping[str, Any]:
-+    """
-+    Return local disaster recovery config
-+
-+    env -- LibraryEnvironment
-+    """
-+    report_processor = SimpleReportProcessor(env.report_processor)
-+    report_list, dr_config = _load_dr_config(env.get_dr_env().config)
-+    report_processor.report_list(report_list)
-+    if report_processor.has_errors:
-+        raise LibraryError()
-+
-+    return DrConfigDto(
-+        DrConfigSiteDto(
-+            dr_config.local_role,
-+            []
-+        ),
-+        [
-+            DrConfigSiteDto(
-+                site.role,
-+                [DrConfigNodeDto(name) for name in site.node_name_list]
-+            )
-+            for site in dr_config.get_remote_site_list()
-+        ]
-+    ).to_dict()
-+
-+
-+def set_recovery_site(env: LibraryEnvironment, node_name: str) -> None:
-+    """
-+    Set up disaster recovery with the local cluster being the primary site
-+
-+    env
-+    node_name -- a known host from the recovery site
-+    """
-+    if env.ghost_file_codes:
-+        raise LibraryError(
-+            reports.live_environment_required(env.ghost_file_codes)
-+        )
-+    report_processor = SimpleReportProcessor(env.report_processor)
-+    dr_env = env.get_dr_env()
-+    if dr_env.config.raw_file.exists():
-+        report_processor.report(reports.dr_config_already_exist())
-+    target_factory = env.get_node_target_factory()
-+
-+    local_nodes, report_list = get_existing_nodes_names(
-+        env.get_corosync_conf(),
-+        error_on_missing_name=True
-+    )
-+    report_processor.report_list(report_list)
-+
-+    if node_name in local_nodes:
-+        report_processor.report(reports.node_in_local_cluster(node_name))
-+
-+    report_list, local_targets = target_factory.get_target_list_with_reports(
-+        local_nodes, allow_skip=False, report_none_host_found=False
-+    )
-+    report_processor.report_list(report_list)
-+
-+    report_list, remote_targets = (
-+        target_factory.get_target_list_with_reports(
-+            [node_name], allow_skip=False, report_none_host_found=False
-+        )
-+    )
-+    report_processor.report_list(report_list)
-+
-+    if report_processor.has_errors:
-+        raise LibraryError()
-+
-+    com_cmd = GetCorosyncConf(env.report_processor)
-+    com_cmd.set_targets(remote_targets)
-+    remote_cluster_nodes, report_list = get_existing_nodes_names(
-+        CorosyncConfigFacade.from_string(
-+            run_and_raise(env.get_node_communicator(), com_cmd)
-+        ),
-+        error_on_missing_name=True
-+    )
-+    if report_processor.report_list(report_list):
-+        raise LibraryError()
-+
-+    # ensure we have tokens for all nodes of remote cluster
-+    report_list, remote_targets = target_factory.get_target_list_with_reports(
-+        remote_cluster_nodes, allow_skip=False, report_none_host_found=False
-+    )
-+    if report_processor.report_list(report_list):
-+        raise LibraryError()
-+    dr_config_exporter = (
-+        get_file_toolbox(file_type_codes.PCS_DR_CONFIG).exporter
-+    )
-+    # create dr config for remote cluster
-+    remote_dr_cfg = dr_env.create_facade(DrRole.RECOVERY)
-+    remote_dr_cfg.add_site(DrRole.PRIMARY, local_nodes)
-+    # send config to all node of remote cluster
-+    distribute_file_cmd = DistributeFilesWithoutForces(
-+        env.report_processor,
-+        node_communication_format.pcs_dr_config_file(
-+            dr_config_exporter.export(remote_dr_cfg.config)
-+        )
-+    )
-+    distribute_file_cmd.set_targets(remote_targets)
-+    run_and_raise(env.get_node_communicator(), distribute_file_cmd)
-+    # create new dr config, with local cluster as primary site
-+    local_dr_cfg = dr_env.create_facade(DrRole.PRIMARY)
-+    local_dr_cfg.add_site(DrRole.RECOVERY, remote_cluster_nodes)
-+    distribute_file_cmd = DistributeFilesWithoutForces(
-+        env.report_processor,
-+        node_communication_format.pcs_dr_config_file(
-+            dr_config_exporter.export(local_dr_cfg.config)
-+        )
-+    )
-+    distribute_file_cmd.set_targets(local_targets)
-+    run_and_raise(env.get_node_communicator(), distribute_file_cmd)
-+    # Note: No token sync across multiple clusters. Most probably they are in
-+    # different subnetworks.
-+
-+
-+def status_all_sites_plaintext(
-+    env: LibraryEnvironment,
-+    hide_inactive_resources: bool = False,
-+    verbose: bool = False,
-+) -> List[Mapping[str, Any]]:
-+    """
-+    Return local site's and all remote sites' status as plaintext
-+
-+    env -- LibraryEnvironment
-+    hide_inactive_resources -- if True, do not display non-running resources
-+    verbose -- if True, display more info
-+    """
-+    # The command does not provide an option to skip offline / unreacheable /
-+    # misbehaving nodes.
-+    # The point of such skipping is to stop a command if it is unable to make
-+    # changes on all nodes. The user can then decide to proceed anyway and
-+    # make changes on the skipped nodes later manually.
-+    # This command only reads from nodes so it automatically asks other nodes
-+    # if one is offline / misbehaving.
-+    class SiteData():
-+        local: bool
-+        role: DrRole
-+        target_list: Iterable[RequestTarget]
-+        status_loaded: bool
-+        status_plaintext: str
-+
-+        def __init__(self, local, role, target_list):
-+            self.local = local
-+            self.role = role
-+            self.target_list = target_list
-+            self.status_loaded = False
-+            self.status_plaintext = ""
-+
-+
-+    if env.ghost_file_codes:
-+        raise LibraryError(
-+            reports.live_environment_required(env.ghost_file_codes)
-+        )
-+
-+    report_processor = SimpleReportProcessor(env.report_processor)
-+    report_list, dr_config = _load_dr_config(env.get_dr_env().config)
-+    report_processor.report_list(report_list)
-+    if report_processor.has_errors:
-+        raise LibraryError()
-+
-+    site_data_list = []
-+    target_factory = env.get_node_target_factory()
-+
-+    # get local nodes
-+    local_nodes, report_list = get_existing_nodes_names(env.get_corosync_conf())
-+    report_processor.report_list(report_list)
-+    report_list, local_targets = target_factory.get_target_list_with_reports(
-+        local_nodes,
-+        skip_non_existing=True,
-+    )
-+    report_processor.report_list(report_list)
-+    site_data_list.append(SiteData(True, dr_config.local_role, local_targets))
-+
-+    # get remote sites' nodes
-+    for conf_remote_site in dr_config.get_remote_site_list():
-+        report_list, remote_targets = (
-+            target_factory.get_target_list_with_reports(
-+                conf_remote_site.node_name_list,
-+                skip_non_existing=True,
-+            )
-+        )
-+        report_processor.report_list(report_list)
-+        site_data_list.append(
-+            SiteData(False, conf_remote_site.role, remote_targets)
-+        )
-+    if report_processor.has_errors:
-+        raise LibraryError()
-+
-+    # get all statuses
-+    for site_data in site_data_list:
-+        com_cmd = GetFullClusterStatusPlaintext(
-+            report_processor,
-+            hide_inactive_resources=hide_inactive_resources,
-+            verbose=verbose,
-+        )
-+        com_cmd.set_targets(site_data.target_list)
-+        site_data.status_loaded, site_data.status_plaintext = run_com_cmd(
-+            env.get_node_communicator(), com_cmd
-+        )
-+
-+    return [
-+        DrSiteStatusDto(
-+            site_data.local,
-+            site_data.role,
-+            site_data.status_plaintext,
-+            site_data.status_loaded,
-+        ).to_dict()
-+        for site_data in site_data_list
-+    ]
-+
-+def _load_dr_config(
-+    config_file: FileInstance,
-+) -> Tuple[ReportItemList, DrConfigFacade]:
-+    if not config_file.raw_file.exists():
-+        return [reports.dr_config_does_not_exist()], DrConfigFacade.empty()
-+    try:
-+        return [], config_file.read_to_facade()
-+    except RawFileError as e:
-+        return [raw_file_error_report(e)], DrConfigFacade.empty()
-+    except ParserErrorException as e:
-+        return (
-+            config_file.parser_exception_to_report_list(e),
-+            DrConfigFacade.empty()
-+        )
-+
-+
-+def destroy(env: LibraryEnvironment, force_flags: Container[str] = ()) -> None:
-+    """
-+    Destroy disaster-recovery configuration on all sites
-+    """
-+    if env.ghost_file_codes:
-+        raise LibraryError(
-+            reports.live_environment_required(env.ghost_file_codes)
-+        )
-+
-+    report_processor = SimpleReportProcessor(env.report_processor)
-+    skip_offline = report_codes.SKIP_OFFLINE_NODES in force_flags
-+
-+    report_list, dr_config = _load_dr_config(env.get_dr_env().config)
-+    report_processor.report_list(report_list)
-+
-+    if report_processor.has_errors:
-+        raise LibraryError()
-+
-+    local_nodes, report_list = get_existing_nodes_names(env.get_corosync_conf())
-+    report_processor.report_list(report_list)
-+
-+    if report_processor.has_errors:
-+        raise LibraryError()
-+
-+    remote_nodes: List[str] = []
-+    for conf_remote_site in dr_config.get_remote_site_list():
-+        remote_nodes.extend(conf_remote_site.node_name_list)
-+
-+    target_factory = env.get_node_target_factory()
-+    report_list, targets = target_factory.get_target_list_with_reports(
-+         remote_nodes + local_nodes, skip_non_existing=skip_offline,
-+    )
-+    report_processor.report_list(report_list)
-+    if report_processor.has_errors:
-+        raise LibraryError()
-+
-+    com_cmd = RemoveFilesWithoutForces(
-+        env.report_processor, {
-+            "pcs disaster-recovery config": {
-+                "type": "pcs_disaster_recovery_conf",
-+            },
-+        },
-+    )
-+    com_cmd.set_targets(targets)
-+    run_and_raise(env.get_node_communicator(), com_cmd)
-diff --git a/pcs/lib/communication/corosync.py b/pcs/lib/communication/corosync.py
-index 0f3c3787..1a78e0de 100644
---- a/pcs/lib/communication/corosync.py
-+++ b/pcs/lib/communication/corosync.py
-@@ -138,3 +138,31 @@ class ReloadCorosyncConf(
-     def on_complete(self):
-         if not self.__was_successful and self.__has_failures:
-             self._report(reports.unable_to_perform_operation_on_any_node())
-+
-+
-+class GetCorosyncConf(
-+    AllSameDataMixin, OneByOneStrategyMixin, RunRemotelyBase
-+):
-+    __was_successful = False
-+    __has_failures = False
-+    __corosync_conf = None
-+
-+    def _get_request_data(self):
-+        return RequestData("remote/get_corosync_conf")
-+
-+    def _process_response(self, response):
-+        report = response_to_report_item(
-+            response, severity=ReportItemSeverity.WARNING
-+        )
-+        if report is not None:
-+            self.__has_failures = True
-+            self._report(report)
-+            return self._get_next_list()
-+        self.__corosync_conf = response.data
-+        self.__was_successful = True
-+        return []
-+
-+    def on_complete(self):
-+        if not self.__was_successful and self.__has_failures:
-+            self._report(reports.unable_to_perform_operation_on_any_node())
-+        return self.__corosync_conf
-diff --git a/pcs/lib/communication/status.py b/pcs/lib/communication/status.py
-new file mode 100644
-index 00000000..3470415a
---- /dev/null
-+++ b/pcs/lib/communication/status.py
-@@ -0,0 +1,97 @@
-+import json
-+from typing import Tuple
-+
-+from pcs.common.node_communicator import RequestData
-+from pcs.lib import reports
-+from pcs.lib.communication.tools import (
-+    AllSameDataMixin,
-+    OneByOneStrategyMixin,
-+    RunRemotelyBase,
-+)
-+from pcs.lib.errors import ReportItemSeverity
-+from pcs.lib.node_communication import response_to_report_item
-+
-+
-+class GetFullClusterStatusPlaintext(
-+    AllSameDataMixin, OneByOneStrategyMixin, RunRemotelyBase
-+):
-+    def __init__(
-+        self, report_processor, hide_inactive_resources=False, verbose=False
-+    ):
-+        super().__init__(report_processor)
-+        self._hide_inactive_resources = hide_inactive_resources
-+        self._verbose = verbose
-+        self._cluster_status = ""
-+        self._was_successful = False
-+
-+    def _get_request_data(self):
-+        return RequestData(
-+            "remote/cluster_status_plaintext",
-+            [
-+                (
-+                    "data_json",
-+                    json.dumps(dict(
-+                        hide_inactive_resources=self._hide_inactive_resources,
-+                        verbose=self._verbose,
-+                    ))
-+                )
-+            ],
-+        )
-+
-+    def _process_response(self, response):
-+        report = response_to_report_item(
-+            response, severity=ReportItemSeverity.WARNING
-+        )
-+        if report is not None:
-+            self._report(report)
-+            return self._get_next_list()
-+
-+        node = response.request.target.label
-+        try:
-+            output = json.loads(response.data)
-+            if output["status"] == "success":
-+                self._was_successful = True
-+                self._cluster_status = output["data"]
-+                return []
-+            if output["status_msg"]:
-+                self._report(
-+                    reports.node_communication_command_unsuccessful(
-+                        node,
-+                        response.request.action,
-+                        output["status_msg"]
-+                    )
-+                )
-+            # TODO Node name should be added to each received report item and
-+            # those modified report itemss should be reported. That, however,
-+            # requires reports overhaul which would add posibility to add a
-+            # node name to any report item. Also, infos and warnings should not
-+            # be ignored.
-+            if output["report_list"]:
-+                for report_data in output["report_list"]:
-+                    if (
-+                        report_data["severity"] == ReportItemSeverity.ERROR
-+                        and
-+                        report_data["report_text"]
-+                    ):
-+                        self._report(
-+                            reports.node_communication_command_unsuccessful(
-+                                node,
-+                                response.request.action,
-+                                report_data["report_text"]
-+                            )
-+                        )
-+        except (ValueError, LookupError, TypeError):
-+            self._report(reports.invalid_response_format(
-+                node,
-+                severity=ReportItemSeverity.WARNING,
-+            ))
-+
-+        return self._get_next_list()
-+
-+    def on_complete(self) -> Tuple[bool, str]:
-+        # Usually, reports.unable_to_perform_operation_on_any_node is reported
-+        # when the operation was unsuccessful and failed on at least one node.
-+        # The only use case this communication command is used does not need
-+        # that report and on top of that the report causes confusing ouptut for
-+        # the user. The report may be added in a future if needed.
-+        return self._was_successful, self._cluster_status
-diff --git a/pcs/lib/dr/__init__.py b/pcs/lib/dr/__init__.py
-new file mode 100644
-index 00000000..e69de29b
-diff --git a/pcs/lib/dr/config/__init__.py b/pcs/lib/dr/config/__init__.py
-new file mode 100644
-index 00000000..e69de29b
-diff --git a/pcs/lib/dr/config/facade.py b/pcs/lib/dr/config/facade.py
-new file mode 100644
-index 00000000..f3187ba5
---- /dev/null
-+++ b/pcs/lib/dr/config/facade.py
-@@ -0,0 +1,49 @@
-+from typing import (
-+    Iterable,
-+    List,
-+    NamedTuple,
-+)
-+
-+from pcs.common.dr import DrRole
-+from pcs.lib.interface.config import FacadeInterface
-+
-+
-+class DrSite(NamedTuple):
-+    role: DrRole
-+    node_name_list: List[str]
-+
-+
-+class Facade(FacadeInterface):
-+    @classmethod
-+    def create(cls, local_role: DrRole) -> "Facade":
-+        return cls(dict(
-+            local=dict(
-+                role=local_role.value,
-+            ),
-+            remote_sites=[],
-+        ))
-+
-+    @classmethod
-+    def empty(cls) -> "Facade":
-+        return cls(dict())
-+
-+    @property
-+    def local_role(self) -> DrRole:
-+        return DrRole(self._config["local"]["role"])
-+
-+    def add_site(self, role: DrRole, node_list: Iterable[str]) -> None:
-+        self._config["remote_sites"].append(
-+            dict(
-+                role=role.value,
-+                nodes=[dict(name=node) for node in node_list],
-+            )
-+        )
-+
-+    def get_remote_site_list(self) -> List[DrSite]:
-+        return [
-+            DrSite(
-+                DrRole(conf_site["role"]),
-+                [node["name"] for node in conf_site["nodes"]]
-+            )
-+            for conf_site in self._config.get("remote_sites", [])
-+        ]
-diff --git a/pcs/lib/dr/env.py b/pcs/lib/dr/env.py
-new file mode 100644
-index 00000000..c73ee622
---- /dev/null
-+++ b/pcs/lib/dr/env.py
-@@ -0,0 +1,28 @@
-+from pcs.common import file_type_codes
-+
-+from pcs.lib.file.instance import FileInstance
-+from pcs.lib.file.toolbox import (
-+    for_file_type as get_file_toolbox,
-+    FileToolbox,
-+)
-+
-+from .config.facade import (
-+    DrRole,
-+    Facade,
-+)
-+
-+class DrEnv:
-+    def __init__(self):
-+        self._config_file = FileInstance.for_dr_config()
-+
-+    @staticmethod
-+    def create_facade(role: DrRole) -> Facade:
-+        return Facade.create(role)
-+
-+    @property
-+    def config(self) -> FileInstance:
-+        return self._config_file
-+
-+    @staticmethod
-+    def get_config_toolbox() -> FileToolbox:
-+        return get_file_toolbox(file_type_codes.PCS_DR_CONFIG)
-diff --git a/pcs/lib/env.py b/pcs/lib/env.py
-index 66f7b1a4..0b12103e 100644
---- a/pcs/lib/env.py
-+++ b/pcs/lib/env.py
-@@ -3,11 +3,13 @@ from typing import (
- )
- from xml.etree.ElementTree import Element
- 
-+from pcs.common import file_type_codes
- from pcs.common.node_communicator import Communicator, NodeCommunicatorFactory
- from pcs.common.tools import Version
- from pcs.lib import reports
- from pcs.lib.booth.env import BoothEnv
- from pcs.lib.cib.tools import get_cib_crm_feature_set
-+from pcs.lib.dr.env import DrEnv
- from pcs.lib.node import get_existing_nodes_names
- from pcs.lib.communication import qdevice
- from pcs.lib.communication.corosync import (
-@@ -89,6 +91,7 @@ class LibraryEnvironment:
-             self._request_timeout
-         )
-         self.__loaded_booth_env = None
-+        self.__loaded_dr_env = None
- 
-         self.__timeout_cache = {}
- 
-@@ -108,6 +111,15 @@ class LibraryEnvironment:
-     def user_groups(self):
-         return self._user_groups
- 
-+    @property
-+    def ghost_file_codes(self):
-+        codes = set()
-+        if not self.is_cib_live:
-+            codes.add(file_type_codes.CIB)
-+        if not self.is_corosync_conf_live:
-+            codes.add(file_type_codes.COROSYNC_CONF)
-+        return codes
-+
-     def get_cib(self, minimal_version: Optional[Version] = None) -> Element:
-         if self.__loaded_cib_diff_source is not None:
-             raise AssertionError("CIB has already been loaded")
-@@ -412,3 +424,8 @@ class LibraryEnvironment:
-         if self.__loaded_booth_env is None:
-             self.__loaded_booth_env = BoothEnv(name, self._booth_files_data)
-         return self.__loaded_booth_env
-+
-+    def get_dr_env(self) -> DrEnv:
-+        if self.__loaded_dr_env is None:
-+            self.__loaded_dr_env = DrEnv()
-+        return self.__loaded_dr_env
-diff --git a/pcs/lib/file/instance.py b/pcs/lib/file/instance.py
-index da6b760c..f0812c2d 100644
---- a/pcs/lib/file/instance.py
-+++ b/pcs/lib/file/instance.py
-@@ -51,18 +51,27 @@ class FileInstance():
-         """
-         Factory for known-hosts file
-         """
--        file_type_code = file_type_codes.PCS_KNOWN_HOSTS
--        return cls(
--            raw_file.RealFile(metadata.for_file_type(file_type_code)),
--            toolbox.for_file_type(file_type_code)
--        )
-+        return cls._for_common(file_type_codes.PCS_KNOWN_HOSTS)
- 
-     @classmethod
-     def for_pacemaker_key(cls):
-         """
-         Factory for pacemaker key file
-         """
--        file_type_code = file_type_codes.PACEMAKER_AUTHKEY
-+        return cls._for_common(file_type_codes.PACEMAKER_AUTHKEY)
-+
-+    @classmethod
-+    def for_dr_config(cls) -> "FileInstance":
-+        """
-+        Factory for disaster-recovery config file
-+        """
-+        return cls._for_common(file_type_codes.PCS_DR_CONFIG)
-+
-+    @classmethod
-+    def _for_common(
-+        cls,
-+        file_type_code: file_type_codes.FileTypeCode,
-+    ) -> "FileInstance":
-         return cls(
-             raw_file.RealFile(metadata.for_file_type(file_type_code)),
-             toolbox.for_file_type(file_type_code)
-diff --git a/pcs/lib/file/metadata.py b/pcs/lib/file/metadata.py
-index 175e5ac1..72701aed 100644
---- a/pcs/lib/file/metadata.py
-+++ b/pcs/lib/file/metadata.py
-@@ -50,6 +50,14 @@ _metadata = {
-         permissions=0o600,
-         is_binary=False,
-     ),
-+    code.PCS_DR_CONFIG: lambda: FileMetadata(
-+        file_type_code=code.PCS_DR_CONFIG,
-+        path=settings.pcsd_dr_config_location,
-+        owner_user_name="root",
-+        owner_group_name="root",
-+        permissions=0o600,
-+        is_binary=False,
-+    )
- }
- 
- def for_file_type(file_type_code, *args, **kwargs):
-diff --git a/pcs/lib/file/toolbox.py b/pcs/lib/file/toolbox.py
-index 5d827887..db852617 100644
---- a/pcs/lib/file/toolbox.py
-+++ b/pcs/lib/file/toolbox.py
-@@ -1,4 +1,9 @@
--from collections import namedtuple
-+from typing import (
-+    Any,
-+    Dict,
-+    NamedTuple,
-+    Type,
-+)
- import json
- 
- from pcs.common import file_type_codes as code
-@@ -8,6 +13,8 @@ from pcs.lib.booth.config_parser import (
-     Exporter as BoothConfigExporter,
-     Parser as BoothConfigParser,
- )
-+from pcs.lib.dr.config.facade import Facade as DrConfigFacade
-+from pcs.lib.errors import ReportItemList
- from pcs.lib.interface.config import (
-     ExporterInterface,
-     FacadeInterface,
-@@ -16,27 +23,23 @@ from pcs.lib.interface.config import (
- )
- 
- 
--FileToolbox = namedtuple(
--    "FileToolbox",
--    [
--        # File type code the toolbox belongs to
--        "file_type_code",
--        # Provides an easy access for reading and modifying data
--        "facade",
--        # Turns raw data into a structure which the facade is able to process
--        "parser",
--        # Turns a structure produced by the parser and the facade to raw data
--        "exporter",
--        # Checks that the structure is valid
--        "validator",
--        # Provides means for file syncing based on the file's version
--        "version_controller",
--    ]
--)
-+class FileToolbox(NamedTuple):
-+    # File type code the toolbox belongs to
-+    file_type_code: code.FileTypeCode
-+    # Provides an easy access for reading and modifying data
-+    facade: Type[FacadeInterface]
-+    # Turns raw data into a structure which the facade is able to process
-+    parser: Type[ParserInterface]
-+    # Turns a structure produced by the parser and the facade to raw data
-+    exporter: Type[ExporterInterface]
-+    # Checks that the structure is valid
-+    validator: None # TBI
-+    # Provides means for file syncing based on the file's version
-+    version_controller: None # TBI
- 
- 
- class JsonParserException(ParserErrorException):
--    def __init__(self, json_exception):
-+    def __init__(self, json_exception: json.JSONDecodeError):
-         super().__init__()
-         self.json_exception = json_exception
- 
-@@ -45,7 +48,7 @@ class JsonParser(ParserInterface):
-     Adapts standard json parser to our interfaces
-     """
-     @staticmethod
--    def parse(raw_file_data):
-+    def parse(raw_file_data: bytes) -> Dict[str, Any]:
-         try:
-             # json.loads handles bytes, it expects utf-8, 16 or 32 encoding
-             return json.loads(raw_file_data)
-@@ -54,8 +57,12 @@ class JsonParser(ParserInterface):
- 
-     @staticmethod
-     def exception_to_report_list(
--        exception, file_type_code, file_path, force_code, is_forced_or_warning
--    ):
-+        exception: JsonParserException,
-+        file_type_code: code.FileTypeCode,
-+        file_path: str,
-+        force_code: str, # TODO: fix
-+        is_forced_or_warning: bool
-+    ) -> ReportItemList:
-         report_creator = reports.get_problem_creator(
-             force_code=force_code, is_forced=is_forced_or_warning
-         )
-@@ -80,7 +87,7 @@ class JsonExporter(ExporterInterface):
-     Adapts standard json exporter to our interfaces
-     """
-     @staticmethod
--    def export(config_structure):
-+    def export(config_structure: Dict[str, Any])-> bytes:
-         return json.dumps(
-             config_structure, indent=4, sort_keys=True,
-         ).encode("utf-8")
-@@ -88,23 +95,27 @@ class JsonExporter(ExporterInterface):
- 
- class NoopParser(ParserInterface):
-     @staticmethod
--    def parse(raw_file_data):
-+    def parse(raw_file_data: bytes) -> bytes:
-         return raw_file_data
- 
-     @staticmethod
-     def exception_to_report_list(
--        exception, file_type_code, file_path, force_code, is_forced_or_warning
--    ):
-+        exception: ParserErrorException,
-+        file_type_code: code.FileTypeCode,
-+        file_path: str,
-+        force_code: str, # TODO: fix
-+        is_forced_or_warning: bool
-+    ) -> ReportItemList:
-         return []
- 
- class NoopExporter(ExporterInterface):
-     @staticmethod
--    def export(config_structure):
-+    def export(config_structure: bytes) -> bytes:
-         return config_structure
- 
- class NoopFacade(FacadeInterface):
-     @classmethod
--    def create(cls):
-+    def create(cls) -> "NoopFacade":
-         return cls(bytes())
- 
- 
-@@ -135,7 +146,16 @@ _toolboxes = {
-     ),
-     code.PCS_KNOWN_HOSTS: FileToolbox(
-         file_type_code=code.PCS_KNOWN_HOSTS,
--        facade=None, # TODO needed for 'auth' and 'deauth' commands
-+        # TODO needed for 'auth' and 'deauth' commands
-+        facade=None, # type: ignore
-+        parser=JsonParser,
-+        exporter=JsonExporter,
-+        validator=None, # TODO needed for files syncing
-+        version_controller=None, # TODO needed for files syncing
-+    ),
-+    code.PCS_DR_CONFIG: FileToolbox(
-+        file_type_code=code.PCS_DR_CONFIG,
-+        facade=DrConfigFacade,
-         parser=JsonParser,
-         exporter=JsonExporter,
-         validator=None, # TODO needed for files syncing
-@@ -143,5 +163,5 @@ _toolboxes = {
-     ),
- }
- 
--def for_file_type(file_type_code):
-+def for_file_type(file_type_code: code.FileTypeCode) -> FileToolbox:
-     return _toolboxes[file_type_code]
-diff --git a/pcs/lib/node.py b/pcs/lib/node.py
-index 1930ffa8..09543c8e 100644
---- a/pcs/lib/node.py
-+++ b/pcs/lib/node.py
-@@ -1,5 +1,6 @@
- from typing import (
-     Iterable,
-+    List,
-     Optional,
-     Tuple,
- )
-@@ -18,7 +19,7 @@ def get_existing_nodes_names(
-     corosync_conf: Optional[CorosyncConfigFacade] = None,
-     cib: Optional[Element] = None,
-     error_on_missing_name: bool = False
--) -> Tuple[Iterable[str], ReportItemList]:
-+) -> Tuple[List[str], ReportItemList]:
-     return __get_nodes_names(
-         *__get_nodes(corosync_conf, cib),
-         error_on_missing_name
-@@ -56,7 +57,7 @@ def __get_nodes_names(
-     corosync_nodes: Iterable[CorosyncNode],
-     remote_and_guest_nodes: Iterable[PacemakerNode],
-     error_on_missing_name: bool = False
--) -> Tuple[Iterable[str], ReportItemList]:
-+) -> Tuple[List[str], ReportItemList]:
-     report_list = []
-     corosync_names = []
-     name_missing_in_corosync = False
-diff --git a/pcs/lib/node_communication_format.py b/pcs/lib/node_communication_format.py
-index 6134c66d..1cef35b4 100644
---- a/pcs/lib/node_communication_format.py
-+++ b/pcs/lib/node_communication_format.py
-@@ -1,5 +1,9 @@
- import base64
- from collections import namedtuple
-+from typing import (
-+    Any,
-+    Dict,
-+)
- 
- from pcs.lib import reports
- from pcs.lib.errors import LibraryError
-@@ -55,6 +59,18 @@ def corosync_conf_file(corosync_conf_content):
-         "corosync.conf": corosync_conf_format(corosync_conf_content)
-     }
- 
-+def pcs_dr_config_format(dr_conf_content: bytes) -> Dict[str, Any]:
-+    return {
-+        "type": "pcs_disaster_recovery_conf",
-+        "data": base64.b64encode(dr_conf_content).decode("utf-8"),
-+        "rewrite_existing": True,
-+    }
-+
-+def pcs_dr_config_file(dr_conf_content: bytes) -> Dict[str, Any]:
-+    return {
-+        "disaster-recovery config": pcs_dr_config_format(dr_conf_content)
-+    }
-+
- def pcs_settings_conf_format(content):
-     return {
-         "data": content,
-diff --git a/pcs/lib/reports.py b/pcs/lib/reports.py
-index e83737b0..1f081007 100644
---- a/pcs/lib/reports.py
-+++ b/pcs/lib/reports.py
-@@ -4221,3 +4221,34 @@ def resource_disable_affects_other_resources(
-             "crm_simulate_plaintext_output": crm_simulate_plaintext_output,
-         }
-     )
-+
-+
-+def dr_config_already_exist():
-+    """
-+    Disaster recovery config exists when the opposite was expected
-+    """
-+    return ReportItem.error(
-+        report_codes.DR_CONFIG_ALREADY_EXIST,
-+    )
-+
-+def dr_config_does_not_exist():
-+    """
-+    Disaster recovery config does not exist when the opposite was expected
-+    """
-+    return ReportItem.error(
-+        report_codes.DR_CONFIG_DOES_NOT_EXIST,
-+    )
-+
-+def node_in_local_cluster(node):
-+    """
-+    Node is part of local cluster and it cannot be used for example to set up
-+    disaster-recovery site
-+
-+    node -- node which is part of local cluster
-+    """
-+    return ReportItem.error(
-+        report_codes.NODE_IN_LOCAL_CLUSTER,
-+        info=dict(
-+            node=node,
-+        ),
-+    )
-diff --git a/pcs/pcs.8 b/pcs/pcs.8
-index 5765c6b5..651fda83 100644
---- a/pcs/pcs.8
-+++ b/pcs/pcs.8
-@@ -75,6 +75,9 @@ alert
- .TP
- client
-  Manage pcsd client configuration.
-+.TP
-+dr
-+ Manage disaster recovery configuration.
- .SS "resource"
- .TP
- [status [\fB\-\-hide\-inactive\fR]]
-@@ -887,7 +890,7 @@ stop
- Stop booth arbitrator service.
- .SS "status"
- .TP
--[status] [\fB\-\-full\fR | \fB\-\-hide\-inactive\fR]
-+[status] [\fB\-\-full\fR] [\fB\-\-hide\-inactive\fR]
- View all information about the cluster and resources (\fB\-\-full\fR provides more details, \fB\-\-hide\-inactive\fR hides inactive resources).
- .TP
- resources [\fB\-\-hide\-inactive\fR]
-@@ -1015,6 +1018,19 @@ Remove specified recipients.
- .TP
- local-auth [<pcsd\-port>] [\-u <username>] [\-p <password>]
- Authenticate current user to local pcsd. This is required to run some pcs commands which may require permissions of root user such as 'pcs cluster start'.
-+.SS "dr"
-+.TP
-+config
-+Display disaster-recovery configuration from the local node.
-+.TP
-+status [\fB\-\-full\fR] [\fB\-\-hide\-inactive\fR]
-+Display status of the local and the remote site cluster (\fB\-\-full\fR provides more details, \fB\-\-hide\-inactive\fR hides inactive resources).
-+.TP
-+set\-recovery\-site <recovery site node>
-+Set up disaster\-recovery with the local cluster being the primary site. The recovery site is defined by a name of one of its nodes.
-+.TP
-+destroy
-+Permanently destroy disaster-recovery configuration on all sites.
- .SH EXAMPLES
- .TP
- Show all resources
-diff --git a/pcs/pcs_internal.py b/pcs/pcs_internal.py
-index fecdc8d5..d956d71e 100644
---- a/pcs/pcs_internal.py
-+++ b/pcs/pcs_internal.py
-@@ -22,6 +22,7 @@ SUPPORTED_COMMANDS = {
-     "cluster.setup",
-     "cluster.add_nodes",
-     "cluster.remove_nodes",
-+    "status.full_cluster_status_plaintext",
- }
- 
- 
-diff --git a/pcs/settings_default.py b/pcs/settings_default.py
-index ab61b20b..6d8f33ac 100644
---- a/pcs/settings_default.py
-+++ b/pcs/settings_default.py
-@@ -50,6 +50,7 @@ pcsd_users_conf_location = os.path.join(pcsd_var_location, "pcs_users.conf")
- pcsd_settings_conf_location = os.path.join(
-     pcsd_var_location, "pcs_settings.conf"
- )
-+pcsd_dr_config_location = os.path.join(pcsd_var_location, "disaster-recovery")
- pcsd_exec_location = "/usr/lib/pcsd/"
- pcsd_log_location = "/var/log/pcsd/pcsd.log"
- pcsd_default_port = 2224
-diff --git a/pcs/usage.py b/pcs/usage.py
-index 0b16289e..e4f5af32 100644
---- a/pcs/usage.py
-+++ b/pcs/usage.py
-@@ -22,6 +22,7 @@ def full_usage():
-     out += strip_extras(host([], False))
-     out += strip_extras(alert([], False))
-     out += strip_extras(client([], False))
-+    out += strip_extras(dr([], False))
-     print(out.strip())
-     print("Examples:\n" + examples.replace(r" \ ", ""))
- 
-@@ -124,6 +125,7 @@ def generate_completion_tree_from_usage():
-     tree["alert"] = generate_tree(alert([], False))
-     tree["booth"] = generate_tree(booth([], False))
-     tree["client"] = generate_tree(client([], False))
-+    tree["dr"] = generate_tree(dr([], False))
-     return tree
- 
- def generate_tree(usage_txt):
-@@ -194,6 +196,7 @@ Commands:
-     node        Manage cluster nodes.
-     alert       Manage pacemaker alerts.
-     client      Manage pcsd client configuration.
-+    dr          Manage disaster recovery configuration.
- """
- # Advanced usage to possibly add later
- #  --corosync_conf=<corosync file> Specify alternative corosync.conf file
-@@ -1517,7 +1520,7 @@ def status(args=(), pout=True):
- Usage: pcs status [commands]...
- View current cluster and resource status
- Commands:
--    [status] [--full | --hide-inactive]
-+    [status] [--full] [--hide-inactive]
-         View all information about the cluster and resources (--full provides
-         more details, --hide-inactive hides inactive resources).
- 
-@@ -2019,6 +2022,32 @@ Commands:
-     return output
- 
- 
-+def dr(args=(), pout=True):
-+    output = """
-+Usage: pcs dr <command>
-+Manage disaster recovery configuration.
-+
-+Commands:
-+    config
-+        Display disaster-recovery configuration from the local node.
-+
-+    status [--full] [--hide-inactive]
-+        Display status of the local and the remote site cluster (--full
-+        provides more details, --hide-inactive hides inactive resources).
-+
-+    set-recovery-site <recovery site node>
-+        Set up disaster-recovery with the local cluster being the primary site.
-+        The recovery site is defined by a name of one of its nodes.
-+
-+    destroy
-+        Permanently destroy disaster-recovery configuration on all sites.
-+"""
-+    if pout:
-+        print(sub_usage(args, output))
-+        return None
-+    return output
-+
-+
- def show(main_usage_name, rest_usage_names):
-     usage_map = {
-         "acl": acl,
-@@ -2028,6 +2057,7 @@ def show(main_usage_name, rest_usage_names):
-         "cluster": cluster,
-         "config": config,
-         "constraint": constraint,
-+        "dr": dr,
-         "host": host,
-         "node": node,
-         "pcsd": pcsd,
-diff --git a/pcs_test/tier0/cli/common/test_console_report.py b/pcs_test/tier0/cli/common/test_console_report.py
-index 2deb896d..0d0c2457 100644
---- a/pcs_test/tier0/cli/common/test_console_report.py
-+++ b/pcs_test/tier0/cli/common/test_console_report.py
-@@ -4489,3 +4489,27 @@ class ResourceDisableAffectsOtherResources(NameBuildTest):
-                 "crm_simulate output",
-             )
-         )
-+
-+
-+class DrConfigAlreadyExist(NameBuildTest):
-+    def test_success(self):
-+        self.assert_message_from_report(
-+            "Disaster-recovery already configured",
-+            reports.dr_config_already_exist()
-+        )
-+
-+
-+class DrConfigDoesNotExist(NameBuildTest):
-+    def test_success(self):
-+        self.assert_message_from_report(
-+            "Disaster-recovery is not configured",
-+            reports.dr_config_does_not_exist()
-+        )
-+
-+
-+class NodeInLocalCluster(NameBuildTest):
-+    def test_success(self):
-+        self.assert_message_from_report(
-+            "Node 'node-name' is part of local cluster",
-+            reports.node_in_local_cluster("node-name")
-+        )
-diff --git a/pcs_test/tier0/cli/test_dr.py b/pcs_test/tier0/cli/test_dr.py
-new file mode 100644
-index 00000000..4422cdc4
---- /dev/null
-+++ b/pcs_test/tier0/cli/test_dr.py
-@@ -0,0 +1,293 @@
-+from textwrap import dedent
-+from unittest import mock, TestCase
-+
-+from pcs_test.tools.misc import dict_to_modifiers
-+
-+from pcs.common import report_codes
-+
-+from pcs.cli import dr
-+from pcs.cli.common.errors import CmdLineInputError
-+
-+
-+@mock.patch("pcs.cli.dr.print")
-+class Config(TestCase):
-+    def setUp(self):
-+        self.lib = mock.Mock(spec_set=["dr"])
-+        self.lib.dr = mock.Mock(spec_set=["get_config"])
-+
-+    def _call_cmd(self, argv=None):
-+        dr.config(self.lib, argv or [], dict_to_modifiers({}))
-+
-+    def test_argv(self, mock_print):
-+        with self.assertRaises(CmdLineInputError) as cm:
-+            self._call_cmd(["x"])
-+        self.assertIsNone(cm.exception.message)
-+        mock_print.assert_not_called()
-+
-+    def test_success(self, mock_print):
-+        self.lib.dr.get_config.return_value = {
-+            "local_site": {
-+                "node_list": [],
-+                "site_role": "RECOVERY",
-+            },
-+            "remote_site_list": [
-+                {
-+                    "node_list": [
-+                        {"name": "nodeA2"},
-+                        {"name": "nodeA1"},
-+                    ],
-+                    "site_role": "PRIMARY",
-+                },
-+                {
-+                    "node_list": [
-+                        {"name": "nodeB1"},
-+                    ],
-+                    "site_role": "RECOVERY",
-+                }
-+            ],
-+        }
-+        self._call_cmd([])
-+        self.lib.dr.get_config.assert_called_once_with()
-+        mock_print.assert_called_once_with(dedent("""\
-+            Local site:
-+              Role: Recovery
-+            Remote site:
-+              Role: Primary
-+              Nodes:
-+                nodeA1
-+                nodeA2
-+            Remote site:
-+              Role: Recovery
-+              Nodes:
-+                nodeB1"""))
-+
-+    @mock.patch("pcs.cli.common.console_report.sys.stderr.write")
-+    def test_invalid_response(self, mock_stderr, mock_print):
-+        self.lib.dr.get_config.return_value = [
-+            "wrong response",
-+            {"x": "y"},
-+        ]
-+        with self.assertRaises(SystemExit) as cm:
-+            self._call_cmd([])
-+        self.assertEqual(cm.exception.code, 1)
-+        self.lib.dr.get_config.assert_called_once_with()
-+        mock_print.assert_not_called()
-+        mock_stderr.assert_called_once_with(
-+            "Error: Unable to communicate with pcsd, received response:\n"
-+                "['wrong response', {'x': 'y'}]\n"
-+        )
-+
-+
-+class SetRecoverySite(TestCase):
-+    def setUp(self):
-+        self.lib = mock.Mock(spec_set=["dr"])
-+        self.dr = mock.Mock(spec_set=["set_recovery_site"])
-+        self.lib.dr = self.dr
-+
-+    def call_cmd(self, argv):
-+        dr.set_recovery_site(self.lib, argv, dict_to_modifiers({}))
-+
-+    def test_no_node(self):
-+        with self.assertRaises(CmdLineInputError) as cm:
-+            self.call_cmd([])
-+        self.assertIsNone(cm.exception.message)
-+
-+    def test_multiple_nodes(self):
-+        with self.assertRaises(CmdLineInputError) as cm:
-+            self.call_cmd(["node1", "node2"])
-+        self.assertIsNone(cm.exception.message)
-+
-+    def test_success(self):
-+        node = "node"
-+        self.call_cmd([node])
-+        self.dr.set_recovery_site.assert_called_once_with(node)
-+
-+
-+@mock.patch("pcs.cli.dr.print")
-+class Status(TestCase):
-+    def setUp(self):
-+        self.lib = mock.Mock(spec_set=["dr"])
-+        self.lib.dr = mock.Mock(spec_set=["status_all_sites_plaintext"])
-+
-+    def _call_cmd(self, argv, modifiers=None):
-+        dr.status(self.lib, argv, dict_to_modifiers(modifiers or {}))
-+
-+    def _fixture_response(self, local_success=True, remote_success=True):
-+        self.lib.dr.status_all_sites_plaintext.return_value = [
-+            {
-+                "local_site": True,
-+                "site_role": "PRIMARY",
-+                "status_plaintext": (
-+                    "local cluster\nstatus" if local_success
-+                    else "this should never be displayed"
-+                ),
-+                "status_successfully_obtained": local_success,
-+            },
-+            {
-+                "local_site": False,
-+                "site_role": "RECOVERY",
-+                "status_plaintext": (
-+                    "remote cluster\nstatus" if remote_success
-+                    else "this should never be displayed"
-+                ),
-+                "status_successfully_obtained": remote_success,
-+            },
-+        ]
-+
-+    @staticmethod
-+    def _fixture_print():
-+        return dedent("""\
-+            --- Local cluster - Primary site ---
-+            local cluster
-+            status
-+
-+
-+            --- Remote cluster - Recovery site ---
-+            remote cluster
-+            status"""
-+        )
-+
-+    def test_argv(self, mock_print):
-+        with self.assertRaises(CmdLineInputError) as cm:
-+            self._call_cmd(["x"])
-+        self.assertIsNone(cm.exception.message)
-+        mock_print.assert_not_called()
-+
-+    def test_success(self, mock_print):
-+        self._fixture_response()
-+        self._call_cmd([])
-+        self.lib.dr.status_all_sites_plaintext.assert_called_once_with(
-+            hide_inactive_resources=False, verbose=False
-+        )
-+        mock_print.assert_called_once_with(self._fixture_print())
-+
-+    def test_success_full(self, mock_print):
-+        self._fixture_response()
-+        self._call_cmd([], {"full": True})
-+        self.lib.dr.status_all_sites_plaintext.assert_called_once_with(
-+            hide_inactive_resources=False, verbose=True
-+        )
-+        mock_print.assert_called_once_with(self._fixture_print())
-+
-+    def test_success_hide_inactive(self, mock_print):
-+        self._fixture_response()
-+        self._call_cmd([], {"hide-inactive": True})
-+        self.lib.dr.status_all_sites_plaintext.assert_called_once_with(
-+            hide_inactive_resources=True, verbose=False
-+        )
-+        mock_print.assert_called_once_with(self._fixture_print())
-+
-+    def test_success_all_flags(self, mock_print):
-+        self._fixture_response()
-+        self._call_cmd([], {"full": True, "hide-inactive": True})
-+        self.lib.dr.status_all_sites_plaintext.assert_called_once_with(
-+            hide_inactive_resources=True, verbose=True
-+        )
-+        mock_print.assert_called_once_with(self._fixture_print())
-+
-+    @mock.patch("pcs.cli.common.console_report.sys.stderr.write")
-+    def test_error_local(self, mock_stderr, mock_print):
-+        self._fixture_response(local_success=False)
-+        with self.assertRaises(SystemExit) as cm:
-+            self._call_cmd([])
-+        self.assertEqual(cm.exception.code, 1)
-+        self.lib.dr.status_all_sites_plaintext.assert_called_once_with(
-+            hide_inactive_resources=False, verbose=False
-+        )
-+        mock_print.assert_called_once_with(dedent("""\
-+            --- Local cluster - Primary site ---
-+            Error: Unable to get status of the cluster from any node
-+
-+            --- Remote cluster - Recovery site ---
-+            remote cluster
-+            status"""
-+        ))
-+        mock_stderr.assert_called_once_with(
-+            "Error: Unable to get status of all sites\n"
-+        )
-+
-+    @mock.patch("pcs.cli.common.console_report.sys.stderr.write")
-+    def test_error_remote(self, mock_stderr, mock_print):
-+        self._fixture_response(remote_success=False)
-+        with self.assertRaises(SystemExit) as cm:
-+            self._call_cmd([])
-+        self.assertEqual(cm.exception.code, 1)
-+        self.lib.dr.status_all_sites_plaintext.assert_called_once_with(
-+            hide_inactive_resources=False, verbose=False
-+        )
-+        mock_print.assert_called_once_with(dedent("""\
-+            --- Local cluster - Primary site ---
-+            local cluster
-+            status
-+
-+
-+            --- Remote cluster - Recovery site ---
-+            Error: Unable to get status of the cluster from any node"""
-+        ))
-+        mock_stderr.assert_called_once_with(
-+            "Error: Unable to get status of all sites\n"
-+        )
-+
-+    @mock.patch("pcs.cli.common.console_report.sys.stderr.write")
-+    def test_error_both(self, mock_stderr, mock_print):
-+        self._fixture_response(local_success=False, remote_success=False)
-+        with self.assertRaises(SystemExit) as cm:
-+            self._call_cmd([])
-+        self.assertEqual(cm.exception.code, 1)
-+        self.lib.dr.status_all_sites_plaintext.assert_called_once_with(
-+            hide_inactive_resources=False, verbose=False
-+        )
-+        mock_print.assert_called_once_with(dedent("""\
-+            --- Local cluster - Primary site ---
-+            Error: Unable to get status of the cluster from any node
-+
-+            --- Remote cluster - Recovery site ---
-+            Error: Unable to get status of the cluster from any node"""
-+        ))
-+        mock_stderr.assert_called_once_with(
-+            "Error: Unable to get status of all sites\n"
-+        )
-+
-+    @mock.patch("pcs.cli.common.console_report.sys.stderr.write")
-+    def test_invalid_response(self, mock_stderr, mock_print):
-+        self.lib.dr.status_all_sites_plaintext.return_value = [
-+            "wrong response",
-+            {"x": "y"},
-+        ]
-+        with self.assertRaises(SystemExit) as cm:
-+            self._call_cmd([])
-+        self.assertEqual(cm.exception.code, 1)
-+        self.lib.dr.status_all_sites_plaintext.assert_called_once_with(
-+            hide_inactive_resources=False, verbose=False
-+        )
-+        mock_print.assert_not_called()
-+        mock_stderr.assert_called_once_with(
-+            "Error: Unable to communicate with pcsd, received response:\n"
-+                "['wrong response', {'x': 'y'}]\n"
-+        )
-+
-+
-+class Destroy(TestCase):
-+    def setUp(self):
-+        self.lib = mock.Mock(spec_set=["dr"])
-+        self.dr = mock.Mock(spec_set=["destroy"])
-+        self.lib.dr = self.dr
-+
-+    def call_cmd(self, argv, modifiers=None):
-+        modifiers = modifiers or {}
-+        dr.destroy(self.lib, argv, dict_to_modifiers(modifiers))
-+
-+    def test_some_args(self):
-+        with self.assertRaises(CmdLineInputError) as cm:
-+            self.call_cmd(["arg"])
-+        self.assertIsNone(cm.exception.message)
-+
-+    def test_success(self):
-+        self.call_cmd([])
-+        self.dr.destroy.assert_called_once_with(force_flags=[])
-+
-+    def test_skip_offline(self):
-+        self.call_cmd([], modifiers={"skip-offline": True})
-+        self.dr.destroy.assert_called_once_with(
-+            force_flags=[report_codes.SKIP_OFFLINE_NODES]
-+        )
-diff --git a/pcs_test/tier0/common/test_dr.py b/pcs_test/tier0/common/test_dr.py
-new file mode 100644
-index 00000000..2ef12855
---- /dev/null
-+++ b/pcs_test/tier0/common/test_dr.py
-@@ -0,0 +1,167 @@
-+from unittest import TestCase
-+
-+from pcs.common import dr
-+
-+
-+class DrConfigNodeDto(TestCase):
-+    def setUp(self):
-+        self.name = "node-name"
-+
-+    def _fixture_dto(self):
-+        return dr.DrConfigNodeDto(self.name)
-+
-+    def _fixture_dict(self):
-+        return dict(name=self.name)
-+
-+    def test_to_dict(self):
-+        self.assertEqual(
-+            self._fixture_dict(),
-+            self._fixture_dto().to_dict()
-+        )
-+
-+    def test_from_dict(self):
-+        dto = dr.DrConfigNodeDto.from_dict(self._fixture_dict())
-+        self.assertEqual(dto.name, self.name)
-+
-+
-+class DrConfigSiteDto(TestCase):
-+    def setUp(self):
-+        self.role = dr.DrRole.PRIMARY
-+        self.node_name_list = ["node1", "node2"]
-+
-+    def _fixture_dto(self):
-+        return dr.DrConfigSiteDto(
-+            self.role,
-+            [dr.DrConfigNodeDto(name) for name in self.node_name_list]
-+        )
-+
-+    def _fixture_dict(self):
-+        return dict(
-+            site_role=self.role,
-+            node_list=[dict(name=name) for name in self.node_name_list]
-+        )
-+
-+    def test_to_dict(self):
-+        self.assertEqual(
-+            self._fixture_dict(),
-+            self._fixture_dto().to_dict()
-+        )
-+
-+    def test_from_dict(self):
-+        dto = dr.DrConfigSiteDto.from_dict(self._fixture_dict())
-+        self.assertEqual(dto.site_role, self.role)
-+        self.assertEqual(len(dto.node_list), len(self.node_name_list))
-+        for i, dto_node in enumerate(dto.node_list):
-+            self.assertEqual(
-+                dto_node.name,
-+                self.node_name_list[i],
-+                f"index: {i}"
-+            )
-+
-+
-+class DrConfig(TestCase):
-+    @staticmethod
-+    def _fixture_site_dto(role, node_name_list):
-+        return dr.DrConfigSiteDto(
-+            role,
-+            [dr.DrConfigNodeDto(name) for name in node_name_list]
-+        )
-+
-+    @staticmethod
-+    def _fixture_dict():
-+        return {
-+            "local_site": {
-+                "node_list": [],
-+                "site_role": "RECOVERY",
-+            },
-+            "remote_site_list": [
-+                {
-+                    "node_list": [
-+                        {"name": "nodeA1"},
-+                        {"name": "nodeA2"},
-+                    ],
-+                    "site_role": "PRIMARY",
-+                },
-+                {
-+                    "node_list": [
-+                        {"name": "nodeB1"},
-+                    ],
-+                    "site_role": "RECOVERY",
-+                }
-+            ],
-+        }
-+
-+    def test_to_dict(self):
-+        self.assertEqual(
-+            self._fixture_dict(),
-+            dr.DrConfigDto(
-+                self._fixture_site_dto(dr.DrRole.RECOVERY, []),
-+                [
-+                    self._fixture_site_dto(
-+                        dr.DrRole.PRIMARY,
-+                        ["nodeA1", "nodeA2"]
-+                    ),
-+                    self._fixture_site_dto(
-+                        dr.DrRole.RECOVERY,
-+                        ["nodeB1"]
-+                    ),
-+                ]
-+            ).to_dict()
-+        )
-+
-+    def test_from_dict(self):
-+        dto = dr.DrConfigDto.from_dict(self._fixture_dict())
-+        self.assertEqual(
-+            dto.local_site.to_dict(),
-+            self._fixture_site_dto(dr.DrRole.RECOVERY, []).to_dict()
-+        )
-+        self.assertEqual(len(dto.remote_site_list), 2)
-+        self.assertEqual(
-+            dto.remote_site_list[0].to_dict(),
-+            self._fixture_site_dto(
-+                dr.DrRole.PRIMARY, ["nodeA1", "nodeA2"]
-+            ).to_dict()
-+        )
-+        self.assertEqual(
-+            dto.remote_site_list[1].to_dict(),
-+            self._fixture_site_dto(dr.DrRole.RECOVERY, ["nodeB1"]).to_dict()
-+        )
-+
-+class DrSiteStatusDto(TestCase):
-+    def setUp(self):
-+        self.local = False
-+        self.role = dr.DrRole.PRIMARY
-+        self.status_plaintext = "plaintext status"
-+        self.status_successfully_obtained = True
-+
-+    def dto_fixture(self):
-+        return dr.DrSiteStatusDto(
-+            self.local,
-+            self.role,
-+            self.status_plaintext,
-+            self.status_successfully_obtained,
-+        )
-+
-+    def dict_fixture(self):
-+        return dict(
-+            local_site=self.local,
-+            site_role=self.role.value,
-+            status_plaintext=self.status_plaintext,
-+            status_successfully_obtained=self.status_successfully_obtained,
-+        )
-+
-+    def test_to_dict(self):
-+        self.assertEqual(
-+            self.dict_fixture(),
-+            self.dto_fixture().to_dict()
-+        )
-+
-+    def test_from_dict(self):
-+        dto = dr.DrSiteStatusDto.from_dict(self.dict_fixture())
-+        self.assertEqual(dto.local_site, self.local)
-+        self.assertEqual(dto.site_role, self.role)
-+        self.assertEqual(dto.status_plaintext, self.status_plaintext)
-+        self.assertEqual(
-+            dto.status_successfully_obtained,
-+            self.status_successfully_obtained
-+        )
-diff --git a/pcs_test/tier0/lib/commands/cluster/test_add_nodes.py b/pcs_test/tier0/lib/commands/cluster/test_add_nodes.py
-index a570d67e..295c1e6a 100644
---- a/pcs_test/tier0/lib/commands/cluster/test_add_nodes.py
-+++ b/pcs_test/tier0/lib/commands/cluster/test_add_nodes.py
-@@ -470,6 +470,11 @@ class LocalConfig():
-                 return_value=False,
-                 name=f"{local_prefix}fs.isfile.pacemaker_authkey"
-             )
-+            .fs.isfile(
-+                settings.pcsd_dr_config_location,
-+                return_value=False,
-+                name=f"{local_prefix}fs.isfile.pcsd_disaster_recovery"
-+            )
-             .fs.isfile(
-                 settings.pcsd_settings_conf_location,
-                 return_value=False,
-@@ -480,10 +485,12 @@ class LocalConfig():
-     def files_sync(self, node_labels):
-         corosync_authkey_content = b"corosync authfile"
-         pcmk_authkey_content = b"pcmk authfile"
--        pcs_settings_content = "pcs_settigns.conf data"
-+        pcs_disaster_recovery_content = b"disaster recovery config data"
-+        pcs_settings_content = "pcs_settings.conf data"
-         file_list = [
-             "corosync authkey",
-             "pacemaker authkey",
-+            "disaster-recovery config",
-             "pcs_settings.conf",
-         ]
-         local_prefix = "local.files_sync."
-@@ -512,6 +519,19 @@ class LocalConfig():
-                 mode="rb",
-                 name=f"{local_prefix}fs.open.pcmk_authkey_read",
-             )
-+            .fs.isfile(
-+                settings.pcsd_dr_config_location,
-+                return_value=True,
-+                name=f"{local_prefix}fs.isfile.pcsd_disaster_recovery"
-+            )
-+            .fs.open(
-+                settings.pcsd_dr_config_location,
-+                return_value=(
-+                    mock.mock_open(read_data=pcs_disaster_recovery_content)()
-+                ),
-+                mode="rb",
-+                name=f"{local_prefix}fs.open.pcsd_disaster_recovery_read",
-+            )
-             .fs.isfile(
-                 settings.pcsd_settings_conf_location,
-                 return_value=True,
-@@ -526,6 +546,7 @@ class LocalConfig():
-                 node_labels=node_labels,
-                 pcmk_authkey=pcmk_authkey_content,
-                 corosync_authkey=corosync_authkey_content,
-+                pcs_disaster_recovery_conf=pcs_disaster_recovery_content,
-                 pcs_settings_conf=pcs_settings_content,
-                 name=f"{local_prefix}http.files.put_files",
-             )
-@@ -2105,13 +2126,16 @@ class FailureFilesDistribution(TestCase):
-         self.expected_reports = []
-         self.pcmk_authkey_content = b"pcmk authkey content"
-         self.corosync_authkey_content = b"corosync authkey content"
-+        self.pcsd_dr_config_content = b"disaster recovery config data"
-         self.pcmk_authkey_file_id = "pacemaker_remote authkey"
-         self.corosync_authkey_file_id = "corosync authkey"
-+        self.pcsd_dr_config_file_id = "disaster-recovery config"
-         self.unsuccessful_nodes = self.new_nodes[:1]
-         self.successful_nodes = self.new_nodes[1:]
-         self.err_msg = "an error message"
-         self.corosync_key_open_before_position = "fs.isfile.pacemaker_authkey"
--        self.pacemaker_key_open_before_position = "fs.isfile.pcsd_settings"
-+        self.pacemaker_key_open_before_position = "fs.isfile.pcsd_dr_config"
-+        self.pcsd_dr_config_open_before_position = "fs.isfile.pcsd_settings"
-         patch_getaddrinfo(self, self.new_nodes)
-         self.existing_corosync_nodes = [
-             node_fixture(node, node_id)
-@@ -2149,9 +2173,14 @@ class FailureFilesDistribution(TestCase):
-             )
-             # open will be inserted here
-             .fs.isfile(
--                settings.pcsd_settings_conf_location, return_value=False,
-+                settings.pcsd_dr_config_location, return_value=True,
-                 name=self.pacemaker_key_open_before_position
-             )
-+            # open will be inserted here
-+            .fs.isfile(
-+                settings.pcsd_settings_conf_location, return_value=False,
-+                name=self.pcsd_dr_config_open_before_position
-+            )
-         )
-         self.expected_reports.extend(
-             [
-@@ -2165,7 +2194,11 @@ class FailureFilesDistribution(TestCase):
-         self.distribution_started_reports = [
-             fixture.info(
-                 report_codes.FILES_DISTRIBUTION_STARTED,
--                file_list=["corosync authkey", "pacemaker authkey"],
-+                file_list=[
-+                    self.corosync_authkey_file_id,
-+                    "pacemaker authkey",
-+                    self.pcsd_dr_config_file_id,
-+                ],
-                 node_list=self.new_nodes,
-             )
-         ]
-@@ -2181,6 +2214,12 @@ class FailureFilesDistribution(TestCase):
-                 node=node,
-                 file_description="pacemaker authkey",
-             ) for node in self.successful_nodes
-+        ] + [
-+            fixture.info(
-+                report_codes.FILE_DISTRIBUTION_SUCCESS,
-+                node=node,
-+                file_description=self.pcsd_dr_config_file_id,
-+            ) for node in self.successful_nodes
-         ]
- 
-     def _add_nodes_with_lib_error(self):
-@@ -2210,6 +2249,15 @@ class FailureFilesDistribution(TestCase):
-             name="fs.open.pacemaker_authkey",
-             before=self.pacemaker_key_open_before_position,
-         )
-+        self.config.fs.open(
-+            settings.pcsd_dr_config_location,
-+            mode="rb",
-+            side_effect=EnvironmentError(
-+                1, self.err_msg, settings.pcsd_dr_config_location
-+            ),
-+            name="fs.open.pcsd_dr_config",
-+            before=self.pcsd_dr_config_open_before_position,
-+        )
- 
-         self._add_nodes_with_lib_error()
- 
-@@ -2236,7 +2284,17 @@ class FailureFilesDistribution(TestCase):
-                         f"{self.err_msg}: '{settings.pacemaker_authkey_file}'"
-                     ),
-                     operation=RawFileError.ACTION_READ,
--                )
-+                ),
-+                fixture.error(
-+                    report_codes.FILE_IO_ERROR,
-+                    force_code=report_codes.SKIP_FILE_DISTRIBUTION_ERRORS,
-+                    file_type_code=file_type_codes.PCS_DR_CONFIG,
-+                    file_path=settings.pcsd_dr_config_location,
-+                    reason=(
-+                        f"{self.err_msg}: '{settings.pcsd_dr_config_location}'"
-+                    ),
-+                    operation=RawFileError.ACTION_READ,
-+                ),
-             ]
-         )
- 
-@@ -2260,6 +2318,15 @@ class FailureFilesDistribution(TestCase):
-                 name="fs.open.pacemaker_authkey",
-                 before=self.pacemaker_key_open_before_position,
-             )
-+            .fs.open(
-+                settings.pcsd_dr_config_location,
-+                mode="rb",
-+                side_effect=EnvironmentError(
-+                    1, self.err_msg, settings.pcsd_dr_config_location
-+                ),
-+                name="fs.open.pcsd_dr_config",
-+                before=self.pcsd_dr_config_open_before_position,
-+            )
-             .local.distribute_and_reload_corosync_conf(
-                 corosync_conf_fixture(
-                     self.existing_corosync_nodes + [
-@@ -2301,7 +2368,16 @@ class FailureFilesDistribution(TestCase):
-                         f"{self.err_msg}: '{settings.pacemaker_authkey_file}'"
-                     ),
-                     operation=RawFileError.ACTION_READ,
--                )
-+                ),
-+                fixture.warn(
-+                    report_codes.FILE_IO_ERROR,
-+                    file_type_code=file_type_codes.PCS_DR_CONFIG,
-+                    file_path=settings.pcsd_dr_config_location,
-+                    reason=(
-+                        f"{self.err_msg}: '{settings.pcsd_dr_config_location}'"
-+                    ),
-+                    operation=RawFileError.ACTION_READ,
-+                ),
-             ]
-         )
- 
-@@ -2325,9 +2401,19 @@ class FailureFilesDistribution(TestCase):
-                 name="fs.open.pacemaker_authkey",
-                 before=self.pacemaker_key_open_before_position,
-             )
-+            .fs.open(
-+                settings.pcsd_dr_config_location,
-+                return_value=mock.mock_open(
-+                    read_data=self.pcsd_dr_config_content
-+                )(),
-+                mode="rb",
-+                name="fs.open.pcsd_dr_config",
-+                before=self.pcsd_dr_config_open_before_position,
-+            )
-             .http.files.put_files(
-                 pcmk_authkey=self.pcmk_authkey_content,
-                 corosync_authkey=self.corosync_authkey_content,
-+                pcs_disaster_recovery_conf=self.pcsd_dr_config_content,
-                 communication_list=[
-                     dict(
-                         label=node,
-@@ -2339,7 +2425,11 @@ class FailureFilesDistribution(TestCase):
-                             self.pcmk_authkey_file_id: dict(
-                                 code="unexpected",
-                                 message=self.err_msg
--                            )
-+                            ),
-+                            self.pcsd_dr_config_file_id: dict(
-+                                code="unexpected",
-+                                message=self.err_msg
-+                            ),
-                         }))
-                     ) for node in self.unsuccessful_nodes
-                 ] + [
-@@ -2374,6 +2464,15 @@ class FailureFilesDistribution(TestCase):
-                     reason=self.err_msg,
-                 ) for node in self.unsuccessful_nodes
-             ]
-+            +
-+            [
-+                fixture.error(
-+                    report_codes.FILE_DISTRIBUTION_ERROR,
-+                    node=node,
-+                    file_description=self.pcsd_dr_config_file_id,
-+                    reason=self.err_msg,
-+                ) for node in self.unsuccessful_nodes
-+            ]
-         )
- 
-     def test_communication_failure(self):
-@@ -2396,9 +2495,19 @@ class FailureFilesDistribution(TestCase):
-                 name="fs.open.pacemaker_authkey",
-                 before=self.pacemaker_key_open_before_position,
-             )
-+            .fs.open(
-+                settings.pcsd_dr_config_location,
-+                return_value=mock.mock_open(
-+                    read_data=self.pcsd_dr_config_content
-+                )(),
-+                mode="rb",
-+                name="fs.open.pcsd_dr_config",
-+                before=self.pcsd_dr_config_open_before_position,
-+            )
-             .http.files.put_files(
-                 pcmk_authkey=self.pcmk_authkey_content,
-                 corosync_authkey=self.corosync_authkey_content,
-+                pcs_disaster_recovery_conf=self.pcsd_dr_config_content,
-                 communication_list=[
-                     dict(
-                         label=node,
-@@ -2450,9 +2559,19 @@ class FailureFilesDistribution(TestCase):
-                 name="fs.open.pacemaker_authkey",
-                 before=self.pacemaker_key_open_before_position,
-             )
-+            .fs.open(
-+                settings.pcsd_dr_config_location,
-+                return_value=mock.mock_open(
-+                    read_data=self.pcsd_dr_config_content
-+                )(),
-+                mode="rb",
-+                name="fs.open.pcsd_dr_config",
-+                before=self.pcsd_dr_config_open_before_position,
-+            )
-             .http.files.put_files(
-                 pcmk_authkey=self.pcmk_authkey_content,
-                 corosync_authkey=self.corosync_authkey_content,
-+                pcs_disaster_recovery_conf=self.pcsd_dr_config_content,
-                 communication_list=[
-                     dict(
-                         label=node,
-@@ -2501,9 +2620,19 @@ class FailureFilesDistribution(TestCase):
-                 name="fs.open.pacemaker_authkey",
-                 before=self.pacemaker_key_open_before_position,
-             )
-+            .fs.open(
-+                settings.pcsd_dr_config_location,
-+                return_value=mock.mock_open(
-+                    read_data=self.pcsd_dr_config_content
-+                )(),
-+                mode="rb",
-+                name="fs.open.pcsd_dr_config",
-+                before=self.pcsd_dr_config_open_before_position,
-+            )
-             .http.files.put_files(
-                 pcmk_authkey=self.pcmk_authkey_content,
-                 corosync_authkey=self.corosync_authkey_content,
-+                pcs_disaster_recovery_conf=self.pcsd_dr_config_content,
-                 communication_list=[
-                     dict(
-                         label=node,
-diff --git a/pcs_test/tier0/lib/commands/dr/__init__.py b/pcs_test/tier0/lib/commands/dr/__init__.py
-new file mode 100644
-index 00000000..e69de29b
-diff --git a/pcs_test/tier0/lib/commands/dr/test_destroy.py b/pcs_test/tier0/lib/commands/dr/test_destroy.py
-new file mode 100644
-index 00000000..de50b21c
---- /dev/null
-+++ b/pcs_test/tier0/lib/commands/dr/test_destroy.py
-@@ -0,0 +1,342 @@
-+import json
-+from unittest import TestCase
-+
-+from pcs_test.tools import fixture
-+from pcs_test.tools.command_env import get_env_tools
-+
-+from pcs import settings
-+from pcs.common import (
-+    file_type_codes,
-+    report_codes,
-+)
-+from pcs.common.file import RawFileError
-+from pcs.lib.commands import dr
-+
-+
-+DR_CONF = "pcs disaster-recovery config"
-+REASON = "error msg"
-+
-+
-+def generate_nodes(nodes_num, prefix=""):
-+    return [f"{prefix}node{i}" for i in range(1, nodes_num + 1)]
-+
-+
-+class CheckLive(TestCase):
-+    def setUp(self):
-+        self.env_assist, self.config = get_env_tools(self)
-+
-+    def assert_live_required(self, forbidden_options):
-+        self.env_assist.assert_raise_library_error(
-+            lambda: dr.destroy(self.env_assist.get_env()),
-+            [
-+                fixture.error(
-+                    report_codes.LIVE_ENVIRONMENT_REQUIRED,
-+                    forbidden_options=forbidden_options
-+                )
-+            ],
-+            expected_in_processor=False
-+        )
-+
-+    def test_mock_corosync(self):
-+        self.config.env.set_corosync_conf_data("corosync conf data")
-+        self.assert_live_required([file_type_codes.COROSYNC_CONF])
-+
-+    def test_mock_cib(self):
-+        self.config.env.set_cib_data("<cib />")
-+        self.assert_live_required([file_type_codes.CIB])
-+
-+    def test_mock(self):
-+        self.config.env.set_corosync_conf_data("corosync conf data")
-+        self.config.env.set_cib_data("<cib />")
-+        self.assert_live_required([
-+            file_type_codes.CIB,
-+            file_type_codes.COROSYNC_CONF,
-+        ])
-+
-+
-+class FixtureMixin:
-+    def _fixture_load_configs(self):
-+        self.config.raw_file.exists(
-+            file_type_codes.PCS_DR_CONFIG,
-+            settings.pcsd_dr_config_location,
-+        )
-+        self.config.raw_file.read(
-+            file_type_codes.PCS_DR_CONFIG,
-+            settings.pcsd_dr_config_location,
-+            content="""
-+                {{
-+                    "local": {{
-+                        "role": "PRIMARY"
-+                    }},
-+                    "remote_sites": [
-+                        {{
-+                            "nodes": [{nodes}],
-+                            "role": "RECOVERY"
-+                        }}
-+                    ]
-+                }}
-+            """.format(
-+                nodes=", ".join([
-+                    json.dumps(dict(name=node))
-+                    for node in self.remote_nodes
-+                ])
-+            )
-+        )
-+        self.config.corosync_conf.load(node_name_list=self.local_nodes)
-+
-+    def _success_reports(self):
-+        return [
-+            fixture.info(
-+                report_codes.FILES_REMOVE_FROM_NODES_STARTED,
-+                file_list=[DR_CONF],
-+                node_list=self.remote_nodes + self.local_nodes,
-+            )
-+        ] + [
-+            fixture.info(
-+                report_codes.FILE_REMOVE_FROM_NODE_SUCCESS,
-+                file_description=DR_CONF,
-+                node=node,
-+            ) for node in (self.remote_nodes + self.local_nodes)
-+        ]
-+
-+
-+class Success(FixtureMixin, TestCase):
-+    def setUp(self):
-+        self.env_assist, self.config = get_env_tools(self)
-+        self.local_nodes = generate_nodes(5)
-+        self.remote_nodes = generate_nodes(3, prefix="remote-")
-+        self.config.env.set_known_nodes(self.local_nodes + self.remote_nodes)
-+
-+    def test_minimal(self):
-+        self._fixture_load_configs()
-+        self.config.http.files.remove_files(
-+            node_labels=self.remote_nodes + self.local_nodes,
-+            pcs_disaster_recovery_conf=True,
-+        )
-+        dr.destroy(self.env_assist.get_env())
-+        self.env_assist.assert_reports(self._success_reports())
-+
-+
-+class FatalConfigIssue(FixtureMixin, TestCase):
-+    def setUp(self):
-+        self.env_assist, self.config = get_env_tools(self)
-+        self.local_nodes = generate_nodes(5)
-+        self.remote_nodes = generate_nodes(3, prefix="remote-")
-+
-+    def test_config_missing(self):
-+        self.config.raw_file.exists(
-+            file_type_codes.PCS_DR_CONFIG,
-+            settings.pcsd_dr_config_location,
-+            exists=False,
-+        )
-+
-+        self.env_assist.assert_raise_library_error(
-+            lambda: dr.destroy(self.env_assist.get_env()),
-+        )
-+        self.env_assist.assert_reports([
-+            fixture.error(
-+                report_codes.DR_CONFIG_DOES_NOT_EXIST,
-+            ),
-+        ])
-+
-+    def test_config_read_error(self):
-+        self.config.raw_file.exists(
-+            file_type_codes.PCS_DR_CONFIG,
-+            settings.pcsd_dr_config_location,
-+        )
-+        self.config.raw_file.read(
-+            file_type_codes.PCS_DR_CONFIG,
-+            settings.pcsd_dr_config_location,
-+            exception_msg=REASON,
-+        )
-+
-+        self.env_assist.assert_raise_library_error(
-+            lambda: dr.destroy(self.env_assist.get_env()),
-+        )
-+        self.env_assist.assert_reports([
-+            fixture.error(
-+                report_codes.FILE_IO_ERROR,
-+                file_type_code=file_type_codes.PCS_DR_CONFIG,
-+                file_path=settings.pcsd_dr_config_location,
-+                operation=RawFileError.ACTION_READ,
-+                reason=REASON,
-+            ),
-+        ])
-+
-+    def test_config_parse_error(self):
-+        self.config.raw_file.exists(
-+            file_type_codes.PCS_DR_CONFIG,
-+            settings.pcsd_dr_config_location,
-+        )
-+        self.config.raw_file.read(
-+            file_type_codes.PCS_DR_CONFIG,
-+            settings.pcsd_dr_config_location,
-+            content="bad content",
-+        )
-+
-+        self.env_assist.assert_raise_library_error(
-+            lambda: dr.destroy(self.env_assist.get_env()),
-+        )
-+        self.env_assist.assert_reports([
-+            fixture.error(
-+                report_codes.PARSE_ERROR_JSON_FILE,
-+                file_type_code=file_type_codes.PCS_DR_CONFIG,
-+                file_path=settings.pcsd_dr_config_location,
-+                line_number=1,
-+                column_number=1,
-+                position=0,
-+                reason="Expecting value",
-+                full_msg="Expecting value: line 1 column 1 (char 0)",
-+            ),
-+        ])
-+
-+    def test_corosync_conf_read_error(self):
-+        self._fixture_load_configs()
-+        self.config.corosync_conf.load_content(
-+            "", exception_msg=REASON, instead="corosync_conf.load"
-+        )
-+        self.env_assist.assert_raise_library_error(
-+            lambda: dr.destroy(self.env_assist.get_env()),
-+            [
-+                fixture.error(
-+                    report_codes.UNABLE_TO_READ_COROSYNC_CONFIG,
-+                    path=settings.corosync_conf_file,
-+                    reason=REASON,
-+                ),
-+            ],
-+            expected_in_processor=False
-+        )
-+
-+    def test_corosync_conf_parse_error(self):
-+        self._fixture_load_configs()
-+        self.config.corosync_conf.load_content(
-+            "wrong {\n  corosync", instead="corosync_conf.load"
-+        )
-+        self.env_assist.assert_raise_library_error(
-+            lambda: dr.destroy(self.env_assist.get_env()),
-+            [
-+                fixture.error(
-+                    report_codes
-+                    .PARSE_ERROR_COROSYNC_CONF_LINE_IS_NOT_SECTION_NOR_KEY_VALUE
-+                ),
-+            ],
-+            expected_in_processor=False
-+        )
-+
-+
-+class CommunicationIssue(FixtureMixin, TestCase):
-+    def setUp(self):
-+        self.env_assist, self.config = get_env_tools(self)
-+        self.local_nodes = generate_nodes(5)
-+        self.remote_nodes = generate_nodes(3, prefix="remote-")
-+
-+    def test_unknown_node(self):
-+        self.config.env.set_known_nodes(
-+            self.local_nodes[1:] + self.remote_nodes[1:]
-+        )
-+        self._fixture_load_configs()
-+        self.env_assist.assert_raise_library_error(
-+            lambda: dr.destroy(self.env_assist.get_env())
-+        )
-+        self.env_assist.assert_reports([
-+            fixture.error(
-+                report_codes.HOST_NOT_FOUND,
-+                host_list=self.local_nodes[:1] + self.remote_nodes[:1],
-+                force_code=report_codes.SKIP_OFFLINE_NODES,
-+            ),
-+        ])
-+
-+    def test_unknown_node_force(self):
-+        existing_nodes = self.remote_nodes[1:] + self.local_nodes[1:]
-+        self.config.env.set_known_nodes(existing_nodes)
-+        self._fixture_load_configs()
-+        self.config.http.files.remove_files(
-+            node_labels=existing_nodes,
-+            pcs_disaster_recovery_conf=True,
-+        )
-+        dr.destroy(
-+            self.env_assist.get_env(),
-+            force_flags=[report_codes.SKIP_OFFLINE_NODES],
-+        )
-+        self.env_assist.assert_reports([
-+            fixture.warn(
-+                report_codes.HOST_NOT_FOUND,
-+                host_list=self.local_nodes[:1] + self.remote_nodes[:1],
-+            ),
-+        ] + [
-+            fixture.info(
-+                report_codes.FILES_REMOVE_FROM_NODES_STARTED,
-+                file_list=[DR_CONF],
-+                node_list=existing_nodes,
-+            )
-+        ] + [
-+            fixture.info(
-+                report_codes.FILE_REMOVE_FROM_NODE_SUCCESS,
-+                file_description=DR_CONF,
-+                node=node,
-+            ) for node in existing_nodes
-+        ])
-+
-+    def test_node_issues(self):
-+        self.config.env.set_known_nodes(self.local_nodes + self.remote_nodes)
-+        self._fixture_load_configs()
-+        self.config.http.files.remove_files(
-+            pcs_disaster_recovery_conf=True,
-+            communication_list=[
-+                dict(label=node) for node in self.remote_nodes
-+            ] + [
-+                dict(
-+                    label=self.local_nodes[0],
-+                    was_connected=False,
-+                    error_msg=REASON,
-+                ),
-+                dict(
-+                    label=self.local_nodes[1],
-+                    output="invalid data",
-+                ),
-+                dict(
-+                    label=self.local_nodes[2],
-+                    output=json.dumps(dict(files={
-+                        DR_CONF: dict(
-+                            code="unexpected",
-+                            message=REASON,
-+                        ),
-+                    })),
-+                ),
-+            ] + [
-+                dict(label=node) for node in self.local_nodes[3:]
-+            ]
-+        )
-+
-+        self.env_assist.assert_raise_library_error(
-+            lambda: dr.destroy(self.env_assist.get_env())
-+        )
-+        self.env_assist.assert_reports([
-+            fixture.info(
-+                report_codes.FILES_REMOVE_FROM_NODES_STARTED,
-+                file_list=[DR_CONF],
-+                node_list=self.remote_nodes + self.local_nodes,
-+            ),
-+            fixture.error(
-+                report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
-+                command="remote/remove_file",
-+                node=self.local_nodes[0],
-+                reason=REASON,
-+            ),
-+            fixture.error(
-+                report_codes.INVALID_RESPONSE_FORMAT,
-+                node=self.local_nodes[1],
-+            ),
-+            fixture.error(
-+                report_codes.FILE_REMOVE_FROM_NODE_ERROR,
-+                file_description=DR_CONF,
-+                reason=REASON,
-+                node=self.local_nodes[2],
-+            ),
-+        ] + [
-+            fixture.info(
-+                report_codes.FILE_REMOVE_FROM_NODE_SUCCESS,
-+                file_description=DR_CONF,
-+                node=node,
-+            ) for node in self.local_nodes[3:] + self.remote_nodes
-+        ])
-diff --git a/pcs_test/tier0/lib/commands/dr/test_get_config.py b/pcs_test/tier0/lib/commands/dr/test_get_config.py
-new file mode 100644
-index 00000000..b2297c8a
---- /dev/null
-+++ b/pcs_test/tier0/lib/commands/dr/test_get_config.py
-@@ -0,0 +1,134 @@
-+from unittest import TestCase
-+
-+from pcs import settings
-+from pcs.common import (
-+    file_type_codes,
-+    report_codes,
-+)
-+from pcs.common.file import RawFileError
-+from pcs.lib.commands import dr
-+
-+from pcs_test.tools.command_env import get_env_tools
-+from pcs_test.tools import fixture
-+
-+REASON = "error msg"
-+
-+class Config(TestCase):
-+    def setUp(self):
-+        self.env_assist, self.config = get_env_tools(self)
-+
-+    def test_success(self):
-+        (self.config
-+            .raw_file.exists(
-+                file_type_codes.PCS_DR_CONFIG,
-+                settings.pcsd_dr_config_location,
-+            )
-+            .raw_file.read(
-+                file_type_codes.PCS_DR_CONFIG,
-+                settings.pcsd_dr_config_location,
-+                content="""
-+                    {
-+                        "local": {
-+                            "role": "PRIMARY"
-+                        },
-+                        "remote_sites": [
-+                            {
-+                                "nodes": [
-+                                    {
-+                                        "name": "recovery-node"
-+                                    }
-+                                ],
-+                                "role": "RECOVERY"
-+                            }
-+                        ]
-+                    }
-+                """,
-+            )
-+        )
-+        self.assertEqual(
-+            dr.get_config(self.env_assist.get_env()),
-+            {
-+                "local_site": {
-+                    "node_list": [],
-+                    "site_role": "PRIMARY",
-+                },
-+                 "remote_site_list": [
-+                    {
-+                        "node_list": [
-+                            {"name": "recovery-node"},
-+                        ],
-+                       "site_role": "RECOVERY",
-+                    },
-+                ],
-+            }
-+        )
-+
-+    def test_config_missing(self):
-+        (self.config
-+            .raw_file.exists(
-+                file_type_codes.PCS_DR_CONFIG,
-+                settings.pcsd_dr_config_location,
-+                exists=False,
-+            )
-+        )
-+        self.env_assist.assert_raise_library_error(
-+            lambda: dr.get_config(self.env_assist.get_env()),
-+        )
-+        self.env_assist.assert_reports([
-+            fixture.error(
-+                report_codes.DR_CONFIG_DOES_NOT_EXIST,
-+            ),
-+        ])
-+
-+    def test_config_read_error(self):
-+        (self.config
-+            .raw_file.exists(
-+                file_type_codes.PCS_DR_CONFIG,
-+                settings.pcsd_dr_config_location,
-+            )
-+            .raw_file.read(
-+                file_type_codes.PCS_DR_CONFIG,
-+                settings.pcsd_dr_config_location,
-+                exception_msg=REASON,
-+            )
-+        )
-+        self.env_assist.assert_raise_library_error(
-+            lambda: dr.get_config(self.env_assist.get_env()),
-+        )
-+        self.env_assist.assert_reports([
-+            fixture.error(
-+                report_codes.FILE_IO_ERROR,
-+                file_type_code=file_type_codes.PCS_DR_CONFIG,
-+                file_path=settings.pcsd_dr_config_location,
-+                operation=RawFileError.ACTION_READ,
-+                reason=REASON,
-+            ),
-+        ])
-+
-+    def test_config_parse_error(self):
-+        (self.config
-+            .raw_file.exists(
-+                file_type_codes.PCS_DR_CONFIG,
-+                settings.pcsd_dr_config_location,
-+            )
-+            .raw_file.read(
-+                file_type_codes.PCS_DR_CONFIG,
-+                settings.pcsd_dr_config_location,
-+                content="bad content",
-+            )
-+        )
-+        self.env_assist.assert_raise_library_error(
-+            lambda: dr.get_config(self.env_assist.get_env()),
-+        )
-+        self.env_assist.assert_reports([
-+            fixture.error(
-+                report_codes.PARSE_ERROR_JSON_FILE,
-+                file_type_code=file_type_codes.PCS_DR_CONFIG,
-+                file_path=settings.pcsd_dr_config_location,
-+                line_number=1,
-+                column_number=1,
-+                position=0,
-+                reason="Expecting value",
-+                full_msg="Expecting value: line 1 column 1 (char 0)",
-+            ),
-+        ])
-diff --git a/pcs_test/tier0/lib/commands/dr/test_set_recovery_site.py b/pcs_test/tier0/lib/commands/dr/test_set_recovery_site.py
-new file mode 100644
-index 00000000..06d80df1
---- /dev/null
-+++ b/pcs_test/tier0/lib/commands/dr/test_set_recovery_site.py
-@@ -0,0 +1,702 @@
-+import json
-+from unittest import TestCase
-+
-+from pcs_test.tools import fixture
-+from pcs_test.tools.command_env import get_env_tools
-+
-+from pcs import settings
-+from pcs.common import (
-+    file_type_codes,
-+    report_codes,
-+)
-+from pcs.lib.dr.config.facade import DrRole
-+from pcs.lib.commands import dr
-+
-+DR_CFG_DESC = "disaster-recovery config"
-+
-+COROSYNC_CONF_TEMPLATE = """\
-+totem {{
-+    version: 2
-+    cluster_name: cluster_name
-+}}
-+
-+nodelist {{
-+{node_list}}}
-+"""
-+
-+NODE_TEMPLATE_NO_NAME = """\
-+    node {{
-+        ring0_addr: {node}
-+        nodeid: {id}
-+    }}
-+"""
-+
-+NODE_TEMPLATE = """\
-+    node {{
-+        ring0_addr: {node}
-+        name: {node}
-+        nodeid: {id}
-+    }}
-+"""
-+
-+
-+def export_cfg(cfg_struct):
-+    return json.dumps(cfg_struct, indent=4, sort_keys=True).encode("utf-8")
-+
-+def dr_cfg_fixture(local_role, remote_role, nodes):
-+    return export_cfg(dict(
-+        local=dict(
-+            role=local_role.value,
-+        ),
-+        remote_sites=[
-+            dict(
-+                role=remote_role.value,
-+                nodes=[dict(name=node) for node in nodes],
-+            ),
-+        ]
-+    ))
-+
-+def corosync_conf_fixture(node_list):
-+    return COROSYNC_CONF_TEMPLATE.format(
-+        node_list="\n".join(node_list_fixture(node_list)),
-+    )
-+
-+def node_list_fixture(node_list):
-+    return [
-+        NODE_TEMPLATE.format(node=node, id=i)
-+        for i, node in enumerate(node_list, start=1)
-+    ]
-+
-+
-+def generate_nodes(nodes_num, prefix=""):
-+    return [f"{prefix}node{i}" for i in range(1, nodes_num + 1)]
-+
-+
-+class CheckLive(TestCase):
-+    def setUp(self):
-+        self.env_assist, self.config = get_env_tools(self)
-+
-+    def assert_live_required(self, forbidden_options):
-+        self.env_assist.assert_raise_library_error(
-+            lambda: dr.set_recovery_site(self.env_assist.get_env(), "node"),
-+            [
-+                fixture.error(
-+                    report_codes.LIVE_ENVIRONMENT_REQUIRED,
-+                    forbidden_options=forbidden_options
-+                )
-+            ],
-+            expected_in_processor=False
-+        )
-+
-+    def test_mock_corosync(self):
-+        self.config.env.set_corosync_conf_data(
-+            corosync_conf_fixture(generate_nodes(3))
-+        )
-+        self.assert_live_required([file_type_codes.COROSYNC_CONF])
-+
-+    def test_mock_cib(self):
-+        self.config.env.set_cib_data("<cib />")
-+        self.assert_live_required([file_type_codes.CIB])
-+
-+    def test_mock(self):
-+        self.config.env.set_corosync_conf_data(
-+            corosync_conf_fixture(generate_nodes(3))
-+        )
-+        self.config.env.set_cib_data("<cib />")
-+        self.assert_live_required([
-+            file_type_codes.CIB,
-+            file_type_codes.COROSYNC_CONF,
-+        ])
-+
-+
-+class SetRecoverySiteSuccess(TestCase):
-+    def setUp(self):
-+        self.env_assist, self.config = get_env_tools(self)
-+
-+    def _test_minimal(self, local_cluster_size, recovery_cluster_size):
-+        local_nodes = generate_nodes(local_cluster_size)
-+        remote_nodes = generate_nodes(recovery_cluster_size, prefix="recovery-")
-+        orig_node = remote_nodes[-1]
-+        cfg = self.config
-+        cfg.env.set_known_nodes(local_nodes + remote_nodes)
-+        cfg.raw_file.exists(
-+            file_type_codes.PCS_DR_CONFIG,
-+            settings.pcsd_dr_config_location,
-+            exists=False,
-+        )
-+        cfg.corosync_conf.load_content(corosync_conf_fixture(local_nodes))
-+        cfg.http.corosync.get_corosync_conf(
-+            corosync_conf_fixture(remote_nodes), node_labels=[orig_node]
-+        )
-+        cfg.http.files.put_files(
-+            node_labels=remote_nodes,
-+            pcs_disaster_recovery_conf=dr_cfg_fixture(
-+                DrRole.RECOVERY, DrRole.PRIMARY, local_nodes
-+            ),
-+            name="distribute_remote",
-+        )
-+        cfg.http.files.put_files(
-+            node_labels=local_nodes,
-+            pcs_disaster_recovery_conf=dr_cfg_fixture(
-+                DrRole.PRIMARY, DrRole.RECOVERY, remote_nodes
-+            ),
-+            name="distribute_local",
-+        )
-+        dr.set_recovery_site(self.env_assist.get_env(), orig_node)
-+        self.env_assist.assert_reports(
-+            [
-+                fixture.info(
-+                    report_codes.FILES_DISTRIBUTION_STARTED,
-+                    file_list=[DR_CFG_DESC],
-+                    node_list=remote_nodes,
-+                )
-+            ] + [
-+                fixture.info(
-+                    report_codes.FILE_DISTRIBUTION_SUCCESS,
-+                    file_description=DR_CFG_DESC,
-+                    node=node,
-+                ) for node in remote_nodes
-+            ] + [
-+                fixture.info(
-+                    report_codes.FILES_DISTRIBUTION_STARTED,
-+                    file_list=[DR_CFG_DESC],
-+                    node_list=local_nodes,
-+                )
-+            ] + [
-+                fixture.info(
-+                    report_codes.FILE_DISTRIBUTION_SUCCESS,
-+                    file_description=DR_CFG_DESC,
-+                    node=node,
-+                ) for node in local_nodes
-+            ]
-+        )
-+
-+    def test_minimal_local_1_remote_1(self):
-+        self._test_minimal(1, 1)
-+
-+    def test_minimal_local_1_remote_2(self):
-+        self._test_minimal(1, 2)
-+
-+    def test_minimal_local_1_remote_3(self):
-+        self._test_minimal(1, 3)
-+
-+    def test_minimal_local_2_remote_1(self):
-+        self._test_minimal(2, 1)
-+
-+    def test_minimal_local_2_remote_2(self):
-+        self._test_minimal(2, 2)
-+
-+    def test_minimal_local_2_remote_3(self):
-+        self._test_minimal(2, 3)
-+
-+    def test_minimal_local_3_remote_1(self):
-+        self._test_minimal(3, 1)
-+
-+    def test_minimal_local_3_remote_2(self):
-+        self._test_minimal(3, 2)
-+
-+    def test_minimal_local_3_remote_3(self):
-+        self._test_minimal(3, 3)
-+
-+
-+class FailureValidations(TestCase):
-+    def setUp(self):
-+        self.env_assist, self.config = get_env_tools(self)
-+        self.local_nodes = generate_nodes(4)
-+
-+    def test_dr_cfg_exist(self):
-+        orig_node = "node"
-+        cfg = self.config
-+        cfg.env.set_known_nodes(self.local_nodes + [orig_node])
-+        cfg.raw_file.exists(
-+            file_type_codes.PCS_DR_CONFIG,
-+            settings.pcsd_dr_config_location,
-+            exists=True,
-+        )
-+        cfg.corosync_conf.load_content(corosync_conf_fixture(self.local_nodes))
-+        self.env_assist.assert_raise_library_error(
-+            lambda: dr.set_recovery_site(self.env_assist.get_env(), orig_node),
-+        )
-+        self.env_assist.assert_reports([
-+            fixture.error(
-+                report_codes.DR_CONFIG_ALREADY_EXIST,
-+            )
-+        ])
-+
-+    def test_local_nodes_name_missing(self):
-+        orig_node = "node"
-+        cfg = self.config
-+        cfg.env.set_known_nodes(self.local_nodes + [orig_node])
-+        cfg.raw_file.exists(
-+            file_type_codes.PCS_DR_CONFIG,
-+            settings.pcsd_dr_config_location,
-+            exists=False,
-+        )
-+        cfg.corosync_conf.load_content(
-+            COROSYNC_CONF_TEMPLATE.format(
-+                node_list="\n".join(
-+                    [
-+                        NODE_TEMPLATE_NO_NAME.format(
-+                            node=self.local_nodes[0], id=len(self.local_nodes)
-+                        )
-+                    ] + node_list_fixture(self.local_nodes[1:])
-+                )
-+            )
-+        )
-+        self.env_assist.assert_raise_library_error(
-+            lambda: dr.set_recovery_site(self.env_assist.get_env(), orig_node),
-+        )
-+        self.env_assist.assert_reports([
-+            fixture.error(
-+                report_codes.COROSYNC_CONFIG_MISSING_NAMES_OF_NODES,
-+                fatal=True,
-+            )
-+        ])
-+
-+    def test_node_part_of_local_cluster(self):
-+        orig_node = self.local_nodes[-1]
-+        cfg = self.config
-+        cfg.env.set_known_nodes(self.local_nodes + [orig_node])
-+        cfg.raw_file.exists(
-+            file_type_codes.PCS_DR_CONFIG,
-+            settings.pcsd_dr_config_location,
-+            exists=False,
-+        )
-+        cfg.corosync_conf.load_content(corosync_conf_fixture(self.local_nodes))
-+        self.env_assist.assert_raise_library_error(
-+            lambda: dr.set_recovery_site(self.env_assist.get_env(), orig_node),
-+        )
-+        self.env_assist.assert_reports([
-+            fixture.error(
-+                report_codes.NODE_IN_LOCAL_CLUSTER,
-+                node=orig_node,
-+            )
-+        ])
-+
-+    def test_tokens_missing_for_local_nodes(self):
-+        orig_node = "node"
-+        cfg = self.config
-+        cfg.env.set_known_nodes(self.local_nodes[:-1] + [orig_node])
-+        cfg.raw_file.exists(
-+            file_type_codes.PCS_DR_CONFIG,
-+            settings.pcsd_dr_config_location,
-+            exists=False,
-+        )
-+        cfg.corosync_conf.load_content(corosync_conf_fixture(self.local_nodes))
-+        self.env_assist.assert_raise_library_error(
-+            lambda: dr.set_recovery_site(self.env_assist.get_env(), orig_node),
-+        )
-+        self.env_assist.assert_reports([
-+            fixture.error(
-+                report_codes.HOST_NOT_FOUND,
-+                host_list=self.local_nodes[-1:],
-+            )
-+        ])
-+
-+    def test_token_missing_for_node(self):
-+        orig_node = "node"
-+        cfg = self.config
-+        cfg.env.set_known_nodes(self.local_nodes)
-+        cfg.raw_file.exists(
-+            file_type_codes.PCS_DR_CONFIG,
-+            settings.pcsd_dr_config_location,
-+            exists=False,
-+        )
-+        cfg.corosync_conf.load_content(corosync_conf_fixture(self.local_nodes))
-+        self.env_assist.assert_raise_library_error(
-+            lambda: dr.set_recovery_site(self.env_assist.get_env(), orig_node),
-+        )
-+        self.env_assist.assert_reports([
-+            fixture.error(
-+                report_codes.HOST_NOT_FOUND,
-+                host_list=[orig_node],
-+            )
-+        ])
-+
-+    def test_tokens_missing_for_remote_cluster(self):
-+        remote_nodes = generate_nodes(3, prefix="recovery-")
-+        orig_node = remote_nodes[0]
-+        cfg = self.config
-+        cfg.env.set_known_nodes(self.local_nodes + remote_nodes[:-1])
-+        cfg.raw_file.exists(
-+            file_type_codes.PCS_DR_CONFIG,
-+            settings.pcsd_dr_config_location,
-+            exists=False,
-+        )
-+        cfg.corosync_conf.load_content(corosync_conf_fixture(self.local_nodes))
-+        cfg.http.corosync.get_corosync_conf(
-+            corosync_conf_fixture(remote_nodes), node_labels=[orig_node]
-+        )
-+        self.env_assist.assert_raise_library_error(
-+            lambda: dr.set_recovery_site(self.env_assist.get_env(), orig_node),
-+        )
-+        self.env_assist.assert_reports([
-+            fixture.error(
-+                report_codes.HOST_NOT_FOUND,
-+                host_list=remote_nodes[-1:],
-+            )
-+        ])
-+
-+
-+REASON = "error msg"
-+
-+
-+class FailureRemoteCorocyncConf(TestCase):
-+    def setUp(self):
-+        self.env_assist, self.config = get_env_tools(self)
-+        self.local_nodes = generate_nodes(4)
-+        self.remote_nodes = generate_nodes(3, prefix="recovery-")
-+        self.node = self.remote_nodes[0]
-+
-+        self.config.env.set_known_nodes(self.local_nodes + self.remote_nodes)
-+        self.config.raw_file.exists(
-+            file_type_codes.PCS_DR_CONFIG,
-+            settings.pcsd_dr_config_location,
-+            exists=False,
-+        )
-+        self.config.corosync_conf.load_content(
-+            corosync_conf_fixture(self.local_nodes)
-+        )
-+
-+    def test_network_issue(self):
-+        self.config.http.corosync.get_corosync_conf(
-+            communication_list=[
-+                dict(
-+                    label=self.node,
-+                    was_connected=False,
-+                    error_msg=REASON,
-+                )
-+            ]
-+        )
-+        self.env_assist.assert_raise_library_error(
-+            lambda: dr.set_recovery_site(self.env_assist.get_env(), self.node),
-+        )
-+        self.env_assist.assert_reports([
-+            fixture.warn(
-+                report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
-+                node=self.node,
-+                command="remote/get_corosync_conf",
-+                reason=REASON,
-+
-+            ),
-+            fixture.error(report_codes.UNABLE_TO_PERFORM_OPERATION_ON_ANY_NODE)
-+        ])
-+
-+    def test_file_does_not_exist(self):
-+        self.config.http.corosync.get_corosync_conf(
-+            communication_list=[
-+                dict(
-+                    label=self.node,
-+                    response_code=400,
-+                    output=REASON,
-+                )
-+            ]
-+        )
-+        self.env_assist.assert_raise_library_error(
-+            lambda: dr.set_recovery_site(self.env_assist.get_env(), self.node),
-+        )
-+        self.env_assist.assert_reports([
-+            fixture.warn(
-+                report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
-+                node=self.node,
-+                command="remote/get_corosync_conf",
-+                reason=REASON,
-+
-+            ),
-+            fixture.error(report_codes.UNABLE_TO_PERFORM_OPERATION_ON_ANY_NODE)
-+        ])
-+
-+    def test_node_names_missing(self):
-+        self.config.http.corosync.get_corosync_conf(
-+            COROSYNC_CONF_TEMPLATE.format(
-+                node_list="\n".join(
-+                    [
-+                        NODE_TEMPLATE_NO_NAME.format(
-+                            node=self.remote_nodes[-1],
-+                            id=len(self.remote_nodes),
-+                        )
-+                    ] + node_list_fixture(self.remote_nodes[:-1])
-+                )
-+            ),
-+            node_labels=[self.node],
-+        )
-+        self.env_assist.assert_raise_library_error(
-+            lambda: dr.set_recovery_site(self.env_assist.get_env(), self.node),
-+        )
-+        self.env_assist.assert_reports([
-+            fixture.error(
-+                report_codes.COROSYNC_CONFIG_MISSING_NAMES_OF_NODES,
-+                fatal=True,
-+            )
-+        ])
-+
-+
-+class FailureRemoteDrCfgDistribution(TestCase):
-+    # pylint: disable=too-many-instance-attributes
-+    def setUp(self):
-+        self.env_assist, self.config = get_env_tools(self)
-+        self.local_nodes = generate_nodes(4)
-+        self.remote_nodes = generate_nodes(3, prefix="recovery-")
-+        self.node = self.remote_nodes[0]
-+        self.failed_nodes = self.remote_nodes[-1:]
-+        successful_nodes = self.remote_nodes[:-1]
-+
-+        self.config.env.set_known_nodes(self.local_nodes + self.remote_nodes)
-+        self.config.raw_file.exists(
-+            file_type_codes.PCS_DR_CONFIG,
-+            settings.pcsd_dr_config_location,
-+            exists=False,
-+        )
-+        self.config.corosync_conf.load_content(
-+            corosync_conf_fixture(self.local_nodes)
-+        )
-+        self.config.http.corosync.get_corosync_conf(
-+            corosync_conf_fixture(self.remote_nodes), node_labels=[self.node]
-+        )
-+
-+        self.success_communication = [
-+            dict(label=node) for node in successful_nodes
-+        ]
-+        self.expected_reports = [
-+            fixture.info(
-+                report_codes.FILES_DISTRIBUTION_STARTED,
-+                file_list=[DR_CFG_DESC],
-+                node_list=self.remote_nodes,
-+            )
-+        ] + [
-+            fixture.info(
-+                report_codes.FILE_DISTRIBUTION_SUCCESS,
-+                file_description=DR_CFG_DESC,
-+                node=node,
-+            ) for node in successful_nodes
-+        ]
-+
-+    def test_write_failure(self):
-+        self.config.http.files.put_files(
-+            communication_list=self.success_communication + [
-+                dict(
-+                    label=node,
-+                    output=json.dumps(dict(files={
-+                        DR_CFG_DESC: dict(
-+                            code="unexpected",
-+                            message=REASON
-+                        ),
-+                    }))
-+                ) for node in self.failed_nodes
-+            ],
-+            pcs_disaster_recovery_conf=dr_cfg_fixture(
-+                DrRole.RECOVERY, DrRole.PRIMARY, self.local_nodes
-+            ),
-+        )
-+        self.env_assist.assert_raise_library_error(
-+            lambda: dr.set_recovery_site(self.env_assist.get_env(), self.node),
-+        )
-+        self.env_assist.assert_reports(
-+             self.expected_reports + [
-+                fixture.error(
-+                    report_codes.FILE_DISTRIBUTION_ERROR,
-+                    file_description=DR_CFG_DESC,
-+                    reason=REASON,
-+                    node=node,
-+                ) for node in self.failed_nodes
-+            ]
-+        )
-+
-+    def test_network_failure(self):
-+        self.config.http.files.put_files(
-+            communication_list=self.success_communication + [
-+                dict(
-+                    label=node,
-+                    was_connected=False,
-+                    error_msg=REASON,
-+                ) for node in self.failed_nodes
-+            ],
-+            pcs_disaster_recovery_conf=dr_cfg_fixture(
-+                DrRole.RECOVERY, DrRole.PRIMARY, self.local_nodes
-+            ),
-+        )
-+        self.env_assist.assert_raise_library_error(
-+            lambda: dr.set_recovery_site(self.env_assist.get_env(), self.node),
-+        )
-+        self.env_assist.assert_reports(
-+             self.expected_reports + [
-+                fixture.error(
-+                    report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
-+                    command="remote/put_file",
-+                    reason=REASON,
-+                    node=node,
-+                ) for node in self.failed_nodes
-+            ]
-+        )
-+
-+    def test_communication_error(self):
-+        self.config.http.files.put_files(
-+            communication_list=self.success_communication + [
-+                dict(
-+                    label=node,
-+                    response_code=400,
-+                    output=REASON,
-+                ) for node in self.failed_nodes
-+            ],
-+            pcs_disaster_recovery_conf=dr_cfg_fixture(
-+                DrRole.RECOVERY, DrRole.PRIMARY, self.local_nodes
-+            ),
-+        )
-+        self.env_assist.assert_raise_library_error(
-+            lambda: dr.set_recovery_site(self.env_assist.get_env(), self.node),
-+        )
-+        self.env_assist.assert_reports(
-+             self.expected_reports + [
-+                fixture.error(
-+                    report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
-+                    command="remote/put_file",
-+                    reason=REASON,
-+                    node=node,
-+                ) for node in self.failed_nodes
-+            ]
-+        )
-+
-+
-+class FailureLocalDrCfgDistribution(TestCase):
-+    # pylint: disable=too-many-instance-attributes
-+    def setUp(self):
-+        self.env_assist, self.config = get_env_tools(self)
-+        local_nodes = generate_nodes(4)
-+        self.remote_nodes = generate_nodes(3, prefix="recovery-")
-+        self.node = self.remote_nodes[0]
-+        self.failed_nodes = local_nodes[-1:]
-+        successful_nodes = local_nodes[:-1]
-+
-+        self.config.env.set_known_nodes(local_nodes + self.remote_nodes)
-+        self.config.raw_file.exists(
-+            file_type_codes.PCS_DR_CONFIG,
-+            settings.pcsd_dr_config_location,
-+            exists=False,
-+        )
-+        self.config.corosync_conf.load_content(
-+            corosync_conf_fixture(local_nodes)
-+        )
-+        self.config.http.corosync.get_corosync_conf(
-+            corosync_conf_fixture(self.remote_nodes), node_labels=[self.node]
-+        )
-+        self.config.http.files.put_files(
-+            node_labels=self.remote_nodes,
-+            pcs_disaster_recovery_conf=dr_cfg_fixture(
-+                DrRole.RECOVERY, DrRole.PRIMARY, local_nodes
-+            ),
-+            name="distribute_remote",
-+        )
-+
-+        self.success_communication = [
-+            dict(label=node) for node in successful_nodes
-+        ]
-+        self.expected_reports = [
-+            fixture.info(
-+                report_codes.FILES_DISTRIBUTION_STARTED,
-+                file_list=[DR_CFG_DESC],
-+                node_list=self.remote_nodes,
-+            )
-+        ] + [
-+            fixture.info(
-+                report_codes.FILE_DISTRIBUTION_SUCCESS,
-+                file_description=DR_CFG_DESC,
-+                node=node,
-+            ) for node in self.remote_nodes
-+        ] + [
-+            fixture.info(
-+                report_codes.FILES_DISTRIBUTION_STARTED,
-+                file_list=[DR_CFG_DESC],
-+                node_list=local_nodes,
-+            )
-+        ] + [
-+            fixture.info(
-+                report_codes.FILE_DISTRIBUTION_SUCCESS,
-+                file_description=DR_CFG_DESC,
-+                node=node,
-+            ) for node in successful_nodes
-+        ]
-+
-+    def test_write_failure(self):
-+        self.config.http.files.put_files(
-+            communication_list=self.success_communication + [
-+                dict(
-+                    label=node,
-+                    output=json.dumps(dict(files={
-+                        DR_CFG_DESC: dict(
-+                            code="unexpected",
-+                            message=REASON
-+                        ),
-+                    }))
-+                ) for node in self.failed_nodes
-+            ],
-+            pcs_disaster_recovery_conf=dr_cfg_fixture(
-+                DrRole.PRIMARY, DrRole.RECOVERY, self.remote_nodes
-+            ),
-+        )
-+        self.env_assist.assert_raise_library_error(
-+            lambda: dr.set_recovery_site(self.env_assist.get_env(), self.node),
-+        )
-+        self.env_assist.assert_reports(
-+             self.expected_reports + [
-+                fixture.error(
-+                    report_codes.FILE_DISTRIBUTION_ERROR,
-+                    file_description=DR_CFG_DESC,
-+                    reason=REASON,
-+                    node=node,
-+                ) for node in self.failed_nodes
-+            ]
-+        )
-+
-+    def test_network_failure(self):
-+        self.config.http.files.put_files(
-+            communication_list=self.success_communication + [
-+                dict(
-+                    label=node,
-+                    was_connected=False,
-+                    error_msg=REASON,
-+                ) for node in self.failed_nodes
-+            ],
-+            pcs_disaster_recovery_conf=dr_cfg_fixture(
-+                DrRole.PRIMARY, DrRole.RECOVERY, self.remote_nodes
-+            ),
-+        )
-+        self.env_assist.assert_raise_library_error(
-+            lambda: dr.set_recovery_site(self.env_assist.get_env(), self.node),
-+        )
-+        self.env_assist.assert_reports(
-+             self.expected_reports + [
-+                fixture.error(
-+                    report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
-+                    command="remote/put_file",
-+                    reason=REASON,
-+                    node=node,
-+                ) for node in self.failed_nodes
-+            ]
-+        )
-+
-+    def test_communication_error(self):
-+        self.config.http.files.put_files(
-+            communication_list=self.success_communication + [
-+                dict(
-+                    label=node,
-+                    response_code=400,
-+                    output=REASON,
-+                ) for node in self.failed_nodes
-+            ],
-+            pcs_disaster_recovery_conf=dr_cfg_fixture(
-+                DrRole.PRIMARY, DrRole.RECOVERY, self.remote_nodes
-+            ),
-+        )
-+        self.env_assist.assert_raise_library_error(
-+            lambda: dr.set_recovery_site(self.env_assist.get_env(), self.node),
-+        )
-+        self.env_assist.assert_reports(
-+             self.expected_reports + [
-+                fixture.error(
-+                    report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
-+                    command="remote/put_file",
-+                    reason=REASON,
-+                    node=node,
-+                ) for node in self.failed_nodes
-+            ]
-+        )
-diff --git a/pcs_test/tier0/lib/commands/dr/test_status.py b/pcs_test/tier0/lib/commands/dr/test_status.py
-new file mode 100644
-index 00000000..b46eb757
---- /dev/null
-+++ b/pcs_test/tier0/lib/commands/dr/test_status.py
-@@ -0,0 +1,756 @@
-+import json
-+import re
-+from unittest import TestCase
-+
-+from pcs import settings
-+from pcs.common import (
-+    file_type_codes,
-+    report_codes,
-+)
-+from pcs.common.dr import DrRole
-+from pcs.common.file import RawFileError
-+from pcs.lib.commands import dr
-+
-+from pcs_test.tools.command_env import get_env_tools
-+from pcs_test.tools import fixture
-+
-+
-+REASON = "error msg"
-+
-+class CheckLive(TestCase):
-+    def setUp(self):
-+        self.env_assist, self.config = get_env_tools(self)
-+
-+    def assert_live_required(self, forbidden_options):
-+        self.env_assist.assert_raise_library_error(
-+            lambda: dr.status_all_sites_plaintext(self.env_assist.get_env()),
-+            [
-+                fixture.error(
-+                    report_codes.LIVE_ENVIRONMENT_REQUIRED,
-+                    forbidden_options=forbidden_options
-+                )
-+            ],
-+            expected_in_processor=False
-+        )
-+
-+    def test_mock_corosync(self):
-+        self.config.env.set_corosync_conf_data("corosync conf")
-+        self.assert_live_required([file_type_codes.COROSYNC_CONF])
-+
-+    def test_mock_cib(self):
-+        self.config.env.set_cib_data("<cib />")
-+        self.assert_live_required([file_type_codes.CIB])
-+
-+    def test_mock(self):
-+        self.config.env.set_corosync_conf_data("corosync conf")
-+        self.config.env.set_cib_data("<cib />")
-+        self.assert_live_required([
-+            file_type_codes.CIB,
-+            file_type_codes.COROSYNC_CONF,
-+        ])
-+
-+class FixtureMixin():
-+    def _set_up(self, local_node_count=2):
-+        self.local_node_name_list = [
-+            f"node{i}" for i in range(1, local_node_count + 1)
-+        ]
-+        self.remote_node_name_list = ["recovery-node"]
-+        self.config.env.set_known_nodes(
-+            self.local_node_name_list + self.remote_node_name_list
-+        )
-+        self.local_status = "local cluster\nstatus\n"
-+        self.remote_status = "remote cluster\nstatus\n"
-+
-+    def _fixture_load_configs(self):
-+        (self.config
-+            .raw_file.exists(
-+                file_type_codes.PCS_DR_CONFIG,
-+                settings.pcsd_dr_config_location,
-+            )
-+            .raw_file.read(
-+                file_type_codes.PCS_DR_CONFIG,
-+                settings.pcsd_dr_config_location,
-+                content="""
-+                    {
-+                        "local": {
-+                            "role": "PRIMARY"
-+                        },
-+                        "remote_sites": [
-+                            {
-+                                "nodes": [
-+                                    {
-+                                        "name": "recovery-node"
-+                                    }
-+                                ],
-+                                "role": "RECOVERY"
-+                            }
-+                        ]
-+                    }
-+                """,
-+            )
-+            .corosync_conf.load(node_name_list=self.local_node_name_list)
-+        )
-+
-+    def _fixture_result(self, local_success=True, remote_success=True):
-+        return [
-+            {
-+                "local_site": True,
-+                "site_role": DrRole.PRIMARY,
-+                "status_plaintext": self.local_status if local_success else "",
-+                "status_successfully_obtained": local_success,
-+            },
-+            {
-+                "local_site": False,
-+                "site_role": DrRole.RECOVERY,
-+                "status_plaintext": (
-+                    self.remote_status if remote_success else ""
-+                ),
-+                "status_successfully_obtained": remote_success,
-+            }
-+        ]
-+
-+class Success(FixtureMixin, TestCase):
-+    def setUp(self):
-+        self.env_assist, self.config = get_env_tools(self)
-+        self._set_up()
-+
-+    def _assert_success(self, hide_inactive_resources, verbose):
-+        self._fixture_load_configs()
-+        (self.config
-+            .http.status.get_full_cluster_status_plaintext(
-+                name="http.status.get_full_cluster_status_plaintext.local",
-+                node_labels=self.local_node_name_list[:1],
-+                hide_inactive_resources=hide_inactive_resources,
-+                verbose=verbose,
-+                cluster_status_plaintext=self.local_status,
-+            )
-+            .http.status.get_full_cluster_status_plaintext(
-+                name="http.status.get_full_cluster_status_plaintext.remote",
-+                node_labels=self.remote_node_name_list[:1],
-+                hide_inactive_resources=hide_inactive_resources,
-+                verbose=verbose,
-+                cluster_status_plaintext=self.remote_status,
-+            )
-+        )
-+        result = dr.status_all_sites_plaintext(
-+            self.env_assist.get_env(),
-+            hide_inactive_resources=hide_inactive_resources,
-+            verbose=verbose,
-+        )
-+        self.assertEqual(result, self._fixture_result())
-+
-+    def test_success_minimal(self):
-+        self._assert_success(False, False)
-+
-+    def test_success_full(self):
-+        self._assert_success(False, True)
-+
-+    def test_success_hide_inactive(self):
-+        self._assert_success(True, False)
-+
-+    def test_success_all_flags(self):
-+        self._assert_success(True, True)
-+
-+    def test_local_not_running_first_node(self):
-+        self._fixture_load_configs()
-+        (self.config
-+            .http.status.get_full_cluster_status_plaintext(
-+                name="http.status.get_full_cluster_status_plaintext.local",
-+                cluster_status_plaintext=self.local_status,
-+                communication_list=[
-+                    [dict(
-+                        label=self.local_node_name_list[0],
-+                        output=json.dumps(dict(
-+                            status="error",
-+                            status_msg="",
-+                            data=None,
-+                            report_list=[
-+                                {
-+                                    "severity": "ERROR",
-+                                    "code": "CRM_MON_ERROR",
-+                                    "info": {
-+                                        "reason": REASON,
-+                                    },
-+                                    "forceable": None,
-+                                    "report_text": "translated report",
-+                                }
-+                            ]
-+                        )),
-+                    )],
-+                    [dict(
-+                        label=self.local_node_name_list[1],
-+                    )],
-+                ]
-+            )
-+            .http.status.get_full_cluster_status_plaintext(
-+                name="http.status.get_full_cluster_status_plaintext.remote",
-+                node_labels=self.remote_node_name_list[:1],
-+                cluster_status_plaintext=self.remote_status,
-+            )
-+        )
-+        result = dr.status_all_sites_plaintext(self.env_assist.get_env())
-+        self.assertEqual(result, self._fixture_result())
-+        self.env_assist.assert_reports([
-+            fixture.error(
-+                report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
-+                node=self.local_node_name_list[0],
-+                command="remote/cluster_status_plaintext",
-+                reason="translated report",
-+            ),
-+        ])
-+
-+    def test_local_not_running(self):
-+        self._fixture_load_configs()
-+        (self.config
-+            .http.status.get_full_cluster_status_plaintext(
-+                name="http.status.get_full_cluster_status_plaintext.local",
-+                cmd_status="error",
-+                cmd_status_msg="",
-+                cluster_status_plaintext="",
-+                report_list=[
-+                    {
-+                        "severity": "ERROR",
-+                        "code": "CRM_MON_ERROR",
-+                        "info": {
-+                            "reason": REASON,
-+                        },
-+                        "forceable": None,
-+                        "report_text": "translated report",
-+                    }
-+                ],
-+                communication_list=[
-+                    [dict(
-+                        label=self.local_node_name_list[0],
-+                    )],
-+                    [dict(
-+                        label=self.local_node_name_list[1],
-+                    )],
-+                ]
-+            )
-+            .http.status.get_full_cluster_status_plaintext(
-+                name="http.status.get_full_cluster_status_plaintext.remote",
-+                node_labels=self.remote_node_name_list[:1],
-+                cluster_status_plaintext=self.remote_status,
-+            )
-+        )
-+        result = dr.status_all_sites_plaintext(self.env_assist.get_env())
-+        self.assertEqual(result, self._fixture_result(local_success=False))
-+        self.env_assist.assert_reports(
-+            [
-+                fixture.error(
-+                    report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
-+                    node=node,
-+                    command="remote/cluster_status_plaintext",
-+                    reason="translated report",
-+                )
-+                for node in self.local_node_name_list
-+            ]
-+        )
-+
-+    def test_remote_not_running(self):
-+        self._fixture_load_configs()
-+        (self.config
-+            .http.status.get_full_cluster_status_plaintext(
-+                name="http.status.get_full_cluster_status_plaintext.local",
-+                node_labels=self.local_node_name_list[:1],
-+                cluster_status_plaintext=self.local_status,
-+            )
-+            .http.status.get_full_cluster_status_plaintext(
-+                name="http.status.get_full_cluster_status_plaintext.remote",
-+                node_labels=self.remote_node_name_list[:1],
-+                cmd_status="error",
-+                cmd_status_msg="",
-+                cluster_status_plaintext="",
-+                report_list=[
-+                    {
-+                        "severity": "ERROR",
-+                        "code": "CRM_MON_ERROR",
-+                        "info": {
-+                            "reason": REASON,
-+                        },
-+                        "forceable": None,
-+                        "report_text": "translated report",
-+                    }
-+                ],
-+            )
-+        )
-+        result = dr.status_all_sites_plaintext(self.env_assist.get_env())
-+        self.assertEqual(result, self._fixture_result(remote_success=False))
-+        self.env_assist.assert_reports(
-+            [
-+                fixture.error(
-+                    report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
-+                    node=node,
-+                    command="remote/cluster_status_plaintext",
-+                    reason="translated report",
-+                )
-+                for node in self.remote_node_name_list
-+            ]
-+        )
-+
-+    def test_both_not_running(self):
-+        self._fixture_load_configs()
-+        (self.config
-+            .http.status.get_full_cluster_status_plaintext(
-+                name="http.status.get_full_cluster_status_plaintext.local",
-+                cmd_status="error",
-+                cmd_status_msg="",
-+                cluster_status_plaintext="",
-+                report_list=[
-+                    {
-+                        "severity": "ERROR",
-+                        "code": "CRM_MON_ERROR",
-+                        "info": {
-+                            "reason": REASON,
-+                        },
-+                        "forceable": None,
-+                        "report_text": "translated report",
-+                    }
-+                ],
-+                communication_list=[
-+                    [dict(
-+                        label=self.local_node_name_list[0],
-+                    )],
-+                    [dict(
-+                        label=self.local_node_name_list[1],
-+                    )],
-+                ]
-+            )
-+            .http.status.get_full_cluster_status_plaintext(
-+                name="http.status.get_full_cluster_status_plaintext.remote",
-+                node_labels=self.remote_node_name_list[:1],
-+                cmd_status="error",
-+                cmd_status_msg="",
-+                cluster_status_plaintext="",
-+                report_list=[
-+                    {
-+                        "severity": "ERROR",
-+                        "code": "CRM_MON_ERROR",
-+                        "info": {
-+                            "reason": REASON,
-+                        },
-+                        "forceable": None,
-+                        "report_text": "translated report",
-+                    }
-+                ],
-+            )
-+        )
-+        result = dr.status_all_sites_plaintext(self.env_assist.get_env())
-+        self.assertEqual(result, self._fixture_result(
-+            local_success=False, remote_success=False
-+        ))
-+        self.env_assist.assert_reports(
-+            [
-+                fixture.error(
-+                    report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
-+                    node=node,
-+                    command="remote/cluster_status_plaintext",
-+                    reason="translated report",
-+                )
-+                for node in (
-+                    self.local_node_name_list + self.remote_node_name_list
-+                )
-+            ]
-+        )
-+
-+
-+class CommunicationIssue(FixtureMixin, TestCase):
-+    def setUp(self):
-+        self.env_assist, self.config = get_env_tools(self)
-+        self._set_up()
-+
-+    def test_unknown_node(self):
-+        self.config.env.set_known_nodes(
-+            self.local_node_name_list[1:] + self.remote_node_name_list
-+        )
-+        self._fixture_load_configs()
-+        (self.config
-+            .http.status.get_full_cluster_status_plaintext(
-+                name="http.status.get_full_cluster_status_plaintext.local",
-+                node_labels=self.local_node_name_list[1:],
-+                cluster_status_plaintext=self.local_status,
-+            )
-+            .http.status.get_full_cluster_status_plaintext(
-+                name="http.status.get_full_cluster_status_plaintext.remote",
-+                node_labels=self.remote_node_name_list[:1],
-+                cluster_status_plaintext=self.remote_status,
-+            )
-+        )
-+        result = dr.status_all_sites_plaintext(self.env_assist.get_env())
-+        self.assertEqual(result, self._fixture_result())
-+        self.env_assist.assert_reports([
-+            fixture.warn(
-+                report_codes.HOST_NOT_FOUND,
-+                host_list=["node1"],
-+            ),
-+        ])
-+
-+    def test_unknown_all_nodes_in_site(self):
-+        self.config.env.set_known_nodes(
-+            self.local_node_name_list
-+        )
-+        self._fixture_load_configs()
-+        self.env_assist.assert_raise_library_error(
-+            lambda: dr.status_all_sites_plaintext(self.env_assist.get_env()),
-+        )
-+        self.env_assist.assert_reports([
-+            fixture.warn(
-+                report_codes.HOST_NOT_FOUND,
-+                host_list=self.remote_node_name_list,
-+            ),
-+            fixture.error(
-+                report_codes.NONE_HOST_FOUND,
-+            ),
-+        ])
-+
-+    def test_missing_node_names(self):
-+        self._fixture_load_configs()
-+        coro_call = self.config.calls.get("corosync_conf.load")
-+        (self.config
-+            .http.status.get_full_cluster_status_plaintext(
-+                name="http.status.get_full_cluster_status_plaintext.local",
-+                node_labels=[],
-+            )
-+            .http.status.get_full_cluster_status_plaintext(
-+                name="http.status.get_full_cluster_status_plaintext.remote",
-+                node_labels=self.remote_node_name_list[:1],
-+                cluster_status_plaintext=self.remote_status,
-+            )
-+        )
-+        coro_call.content = re.sub(r"name: node\d", "", coro_call.content)
-+        result = dr.status_all_sites_plaintext(self.env_assist.get_env())
-+        self.assertEqual(result, self._fixture_result(local_success=False))
-+        self.env_assist.assert_reports([
-+            fixture.warn(
-+                report_codes.COROSYNC_CONFIG_MISSING_NAMES_OF_NODES,
-+                fatal=False,
-+            ),
-+        ])
-+
-+    def test_node_issues(self):
-+        self._set_up(local_node_count=7)
-+        self._fixture_load_configs()
-+        (self.config
-+            .http.status.get_full_cluster_status_plaintext(
-+                name="http.status.get_full_cluster_status_plaintext.local",
-+                cluster_status_plaintext=self.local_status,
-+                communication_list=[
-+                    [dict(
-+                        label=self.local_node_name_list[0],
-+                        was_connected=False,
-+                    )],
-+                    [dict(
-+                        label=self.local_node_name_list[1],
-+                        response_code=401,
-+                    )],
-+                    [dict(
-+                        label=self.local_node_name_list[2],
-+                        response_code=500,
-+                    )],
-+                    [dict(
-+                        label=self.local_node_name_list[3],
-+                        response_code=404,
-+                    )],
-+                    [dict(
-+                        label=self.local_node_name_list[4],
-+                        output="invalid data",
-+                    )],
-+                    [dict(
-+                        label=self.local_node_name_list[5],
-+                        output=json.dumps(dict(status="success"))
-+                    )],
-+                    [dict(
-+                        label=self.local_node_name_list[6],
-+                    )],
-+                ]
-+            )
-+            .http.status.get_full_cluster_status_plaintext(
-+                name="http.status.get_full_cluster_status_plaintext.remote",
-+                node_labels=self.remote_node_name_list[:1],
-+                cluster_status_plaintext=self.remote_status,
-+            )
-+        )
-+        result = dr.status_all_sites_plaintext(self.env_assist.get_env())
-+        self.assertEqual(result, self._fixture_result())
-+        self.env_assist.assert_reports([
-+            fixture.warn(
-+                report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
-+                command="remote/cluster_status_plaintext",
-+                node="node1",
-+                reason=None,
-+            ),
-+            fixture.warn(
-+                report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED,
-+                command="remote/cluster_status_plaintext",
-+                node="node2",
-+                reason="HTTP error: 401",
-+            ),
-+            fixture.warn(
-+                report_codes.NODE_COMMUNICATION_ERROR,
-+                command="remote/cluster_status_plaintext",
-+                node="node3",
-+                reason="HTTP error: 500",
-+            ),
-+            fixture.warn(
-+                report_codes.NODE_COMMUNICATION_ERROR_UNSUPPORTED_COMMAND,
-+                command="remote/cluster_status_plaintext",
-+                node="node4",
-+                reason="HTTP error: 404",
-+            ),
-+            fixture.warn(
-+                report_codes.INVALID_RESPONSE_FORMAT,
-+                node="node5",
-+            ),
-+            fixture.warn(
-+                report_codes.INVALID_RESPONSE_FORMAT,
-+                node="node6",
-+            ),
-+        ])
-+
-+    def test_local_site_down(self):
-+        self._fixture_load_configs()
-+        (self.config
-+            .http.status.get_full_cluster_status_plaintext(
-+                name="http.status.get_full_cluster_status_plaintext.local",
-+                cluster_status_plaintext=self.local_status,
-+                communication_list=[
-+                    [dict(
-+                        label=self.local_node_name_list[0],
-+                        was_connected=False,
-+                    )],
-+                    [dict(
-+                        label=self.local_node_name_list[1],
-+                        was_connected=False,
-+                    )],
-+                ]
-+            )
-+            .http.status.get_full_cluster_status_plaintext(
-+                name="http.status.get_full_cluster_status_plaintext.remote",
-+                node_labels=self.remote_node_name_list[:1],
-+                cluster_status_plaintext=self.remote_status,
-+            )
-+        )
-+        result = dr.status_all_sites_plaintext(self.env_assist.get_env())
-+        self.assertEqual(result, self._fixture_result(local_success=False))
-+        self.env_assist.assert_reports([
-+            fixture.warn(
-+                report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
-+                command="remote/cluster_status_plaintext",
-+                node="node1",
-+                reason=None,
-+            ),
-+            fixture.warn(
-+                report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
-+                command="remote/cluster_status_plaintext",
-+                node="node2",
-+                reason=None,
-+            ),
-+        ])
-+
-+    def test_remote_site_down(self):
-+        self._fixture_load_configs()
-+        (self.config
-+            .http.status.get_full_cluster_status_plaintext(
-+                name="http.status.get_full_cluster_status_plaintext.local",
-+                node_labels=self.local_node_name_list[:1],
-+                cluster_status_plaintext=self.local_status,
-+            )
-+            .http.status.get_full_cluster_status_plaintext(
-+                name="http.status.get_full_cluster_status_plaintext.remote",
-+                cluster_status_plaintext=self.remote_status,
-+                communication_list=[
-+                    [dict(
-+                        label=self.remote_node_name_list[0],
-+                        was_connected=False,
-+                    )],
-+                ]
-+            )
-+        )
-+        result = dr.status_all_sites_plaintext(self.env_assist.get_env())
-+        self.assertEqual(result, self._fixture_result(remote_success=False))
-+        self.env_assist.assert_reports([
-+            fixture.warn(
-+                report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
-+                command="remote/cluster_status_plaintext",
-+                node="recovery-node",
-+                reason=None,
-+            ),
-+        ])
-+
-+    def test_both_sites_down(self):
-+        self._fixture_load_configs()
-+        (self.config
-+            .http.status.get_full_cluster_status_plaintext(
-+                name="http.status.get_full_cluster_status_plaintext.local",
-+                cluster_status_plaintext=self.local_status,
-+                communication_list=[
-+                    [dict(
-+                        label=self.local_node_name_list[0],
-+                        was_connected=False,
-+                    )],
-+                    [dict(
-+                        label=self.local_node_name_list[1],
-+                        was_connected=False,
-+                    )],
-+                ]
-+            )
-+            .http.status.get_full_cluster_status_plaintext(
-+                name="http.status.get_full_cluster_status_plaintext.remote",
-+                cluster_status_plaintext=self.remote_status,
-+                communication_list=[
-+                    [dict(
-+                        label=self.remote_node_name_list[0],
-+                        was_connected=False,
-+                    )],
-+                ]
-+            )
-+        )
-+        result = dr.status_all_sites_plaintext(self.env_assist.get_env())
-+        self.assertEqual(
-+            result,
-+            self._fixture_result(local_success=False, remote_success=False)
-+        )
-+        self.env_assist.assert_reports([
-+            fixture.warn(
-+                report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
-+                command="remote/cluster_status_plaintext",
-+                node="node1",
-+                reason=None,
-+            ),
-+            fixture.warn(
-+                report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
-+                command="remote/cluster_status_plaintext",
-+                node="node2",
-+                reason=None,
-+            ),
-+            fixture.warn(
-+                report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
-+                command="remote/cluster_status_plaintext",
-+                node="recovery-node",
-+                reason=None,
-+            ),
-+        ])
-+
-+
-+class FatalConfigIssue(TestCase):
-+    def setUp(self):
-+        self.env_assist, self.config = get_env_tools(self)
-+
-+    def test_config_missing(self):
-+        (self.config
-+            .raw_file.exists(
-+                file_type_codes.PCS_DR_CONFIG,
-+                settings.pcsd_dr_config_location,
-+                exists=False,
-+            )
-+        )
-+        self.env_assist.assert_raise_library_error(
-+            lambda: dr.status_all_sites_plaintext(self.env_assist.get_env()),
-+        )
-+        self.env_assist.assert_reports([
-+            fixture.error(
-+                report_codes.DR_CONFIG_DOES_NOT_EXIST,
-+            ),
-+        ])
-+
-+    def test_config_read_error(self):
-+        (self.config
-+            .raw_file.exists(
-+                file_type_codes.PCS_DR_CONFIG,
-+                settings.pcsd_dr_config_location,
-+            )
-+            .raw_file.read(
-+                file_type_codes.PCS_DR_CONFIG,
-+                settings.pcsd_dr_config_location,
-+                exception_msg=REASON,
-+            )
-+        )
-+        self.env_assist.assert_raise_library_error(
-+            lambda: dr.status_all_sites_plaintext(self.env_assist.get_env()),
-+        )
-+        self.env_assist.assert_reports([
-+            fixture.error(
-+                report_codes.FILE_IO_ERROR,
-+                file_type_code=file_type_codes.PCS_DR_CONFIG,
-+                file_path=settings.pcsd_dr_config_location,
-+                operation=RawFileError.ACTION_READ,
-+                reason=REASON,
-+            ),
-+        ])
-+
-+    def test_config_parse_error(self):
-+        (self.config
-+            .raw_file.exists(
-+                file_type_codes.PCS_DR_CONFIG,
-+                settings.pcsd_dr_config_location,
-+            )
-+            .raw_file.read(
-+                file_type_codes.PCS_DR_CONFIG,
-+                settings.pcsd_dr_config_location,
-+                content="bad content",
-+            )
-+        )
-+        self.env_assist.assert_raise_library_error(
-+            lambda: dr.status_all_sites_plaintext(self.env_assist.get_env()),
-+        )
-+        self.env_assist.assert_reports([
-+            fixture.error(
-+                report_codes.PARSE_ERROR_JSON_FILE,
-+                file_type_code=file_type_codes.PCS_DR_CONFIG,
-+                file_path=settings.pcsd_dr_config_location,
-+                line_number=1,
-+                column_number=1,
-+                position=0,
-+                reason="Expecting value",
-+                full_msg="Expecting value: line 1 column 1 (char 0)",
-+            ),
-+        ])
-+
-+    def test_corosync_conf_read_error(self):
-+        (self.config
-+            .raw_file.exists(
-+                file_type_codes.PCS_DR_CONFIG,
-+                settings.pcsd_dr_config_location,
-+            )
-+            .raw_file.read(
-+                file_type_codes.PCS_DR_CONFIG,
-+                settings.pcsd_dr_config_location,
-+                content="{}",
-+            )
-+            .corosync_conf.load_content("", exception_msg=REASON)
-+        )
-+        self.env_assist.assert_raise_library_error(
-+            lambda: dr.status_all_sites_plaintext(self.env_assist.get_env()),
-+            [
-+                fixture.error(
-+                    report_codes.UNABLE_TO_READ_COROSYNC_CONFIG,
-+                    path=settings.corosync_conf_file,
-+                    reason=REASON,
-+                ),
-+            ],
-+            expected_in_processor=False
-+        )
-+
-+    def test_corosync_conf_parse_error(self):
-+        (self.config
-+            .raw_file.exists(
-+                file_type_codes.PCS_DR_CONFIG,
-+                settings.pcsd_dr_config_location,
-+            )
-+            .raw_file.read(
-+                file_type_codes.PCS_DR_CONFIG,
-+                settings.pcsd_dr_config_location,
-+                content="{}",
-+            )
-+            .corosync_conf.load_content("wrong {\n  corosync")
-+        )
-+        self.env_assist.assert_raise_library_error(
-+            lambda: dr.status_all_sites_plaintext(self.env_assist.get_env()),
-+            [
-+                fixture.error(
-+                    report_codes
-+                    .PARSE_ERROR_COROSYNC_CONF_LINE_IS_NOT_SECTION_NOR_KEY_VALUE
-+                ),
-+            ],
-+            expected_in_processor=False
-+        )
-diff --git a/pcs_test/tier0/lib/communication/test_status.py b/pcs_test/tier0/lib/communication/test_status.py
-new file mode 100644
-index 00000000..b8db7a73
---- /dev/null
-+++ b/pcs_test/tier0/lib/communication/test_status.py
-@@ -0,0 +1,7 @@
-+from unittest import TestCase
-+
-+class GetFullClusterStatusPlaintext(TestCase):
-+    """
-+    tested in:
-+        pcs_test.tier0.lib.commands.dr.test_status
-+    """
-diff --git a/pcs_test/tier0/lib/dr/__init__.py b/pcs_test/tier0/lib/dr/__init__.py
-new file mode 100644
-index 00000000..e69de29b
-diff --git a/pcs_test/tier0/lib/dr/test_facade.py b/pcs_test/tier0/lib/dr/test_facade.py
-new file mode 100644
-index 00000000..baa17b1e
---- /dev/null
-+++ b/pcs_test/tier0/lib/dr/test_facade.py
-@@ -0,0 +1,138 @@
-+from unittest import TestCase
-+
-+from pcs.common.dr import DrRole
-+from pcs.lib.dr.config import facade
-+
-+
-+class Facade(TestCase):
-+    def test_create(self):
-+        for role in DrRole:
-+            with self.subTest(local_role=role.value):
-+                self.assertEqual(
-+                    dict(
-+                        local=dict(
-+                            role=role.value,
-+                        ),
-+                        remote_sites=[],
-+                    ),
-+                    facade.Facade.create(role).config,
-+                )
-+
-+    def test_local_role(self):
-+        for role in DrRole:
-+            with self.subTest(local_role=role.value):
-+                cfg = facade.Facade({
-+                    "local": {
-+                        "role": role.value,
-+                    },
-+                    "remote_sites": [
-+                    ],
-+                })
-+                self.assertEqual(cfg.local_role, role)
-+
-+    def test_add_site(self):
-+        node_list = [f"node{i}" for i in range(4)]
-+        cfg = facade.Facade.create(DrRole.PRIMARY)
-+        cfg.add_site(DrRole.RECOVERY, node_list)
-+        self.assertEqual(
-+            dict(
-+                local=dict(
-+                    role=DrRole.PRIMARY.value,
-+                ),
-+                remote_sites=[
-+                    dict(
-+                        role=DrRole.RECOVERY.value,
-+                        nodes=[dict(name=node) for node in node_list],
-+                    ),
-+                ]
-+            ),
-+            cfg.config
-+        )
-+
-+class GetRemoteSiteList(TestCase):
-+    def test_no_sites(self):
-+        cfg = facade.Facade({
-+            "local": {
-+                "role": DrRole.PRIMARY.value,
-+            },
-+            "remote_sites": [
-+            ],
-+        })
-+        self.assertEqual(
-+            cfg.get_remote_site_list(),
-+            []
-+        )
-+
-+    def test_one_site(self):
-+        cfg = facade.Facade({
-+            "local": {
-+                "role": DrRole.PRIMARY.value,
-+            },
-+            "remote_sites": [
-+                {
-+                    "role": DrRole.RECOVERY.value,
-+                    "nodes": [
-+                        {"name": "node1"},
-+                    ],
-+                },
-+            ],
-+        })
-+        self.assertEqual(
-+            cfg.get_remote_site_list(),
-+            [
-+                facade.DrSite(role=DrRole.RECOVERY, node_name_list=["node1"]),
-+            ]
-+        )
-+
-+    def test_more_sites(self):
-+        cfg = facade.Facade({
-+            "local": {
-+                "role": DrRole.RECOVERY.value,
-+            },
-+            "remote_sites": [
-+                {
-+                    "role": DrRole.PRIMARY.value,
-+                    "nodes": [
-+                        {"name": "nodeA1"},
-+                        {"name": "nodeA2"},
-+                    ],
-+                },
-+                {
-+                    "role": DrRole.RECOVERY.value,
-+                    "nodes": [
-+                        {"name": "nodeB1"},
-+                        {"name": "nodeB2"},
-+                    ],
-+                },
-+            ],
-+        })
-+        self.assertEqual(
-+            cfg.get_remote_site_list(),
-+            [
-+                facade.DrSite(
-+                    role=DrRole.PRIMARY, node_name_list=["nodeA1", "nodeA2"]
-+                ),
-+                facade.DrSite(
-+                    role=DrRole.RECOVERY, node_name_list=["nodeB1", "nodeB2"]
-+                ),
-+            ]
-+        )
-+
-+    def test_no_nodes(self):
-+        cfg = facade.Facade({
-+            "local": {
-+                "role": DrRole.PRIMARY.value,
-+            },
-+            "remote_sites": [
-+                {
-+                    "role": DrRole.RECOVERY.value,
-+                    "nodes": [],
-+                },
-+            ],
-+        })
-+        self.assertEqual(
-+            cfg.get_remote_site_list(),
-+            [
-+                facade.DrSite(role=DrRole.RECOVERY, node_name_list=[]),
-+            ]
-+        )
-diff --git a/pcs_test/tier0/lib/test_env.py b/pcs_test/tier0/lib/test_env.py
-index edab9dc6..5c1c6a39 100644
---- a/pcs_test/tier0/lib/test_env.py
-+++ b/pcs_test/tier0/lib/test_env.py
-@@ -9,7 +9,7 @@ from pcs_test.tools.misc import (
-     get_test_resource as rc,
- )
- 
--from pcs.common import report_codes
-+from pcs.common import file_type_codes, report_codes
- from pcs.lib.env import LibraryEnvironment
- from pcs.lib.errors import ReportItemSeverity as severity
- 
-@@ -57,6 +57,46 @@ class LibraryEnvironmentTest(TestCase):
-         env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
-         self.assertEqual([], env.user_groups)
- 
-+class GhostFileCodes(TestCase):
-+    def setUp(self):
-+        self.mock_logger = mock.MagicMock(logging.Logger)
-+        self.mock_reporter = MockLibraryReportProcessor()
-+
-+    def _fixture_get_env(self, cib_data=None, corosync_conf_data=None):
-+        return LibraryEnvironment(
-+            self.mock_logger,
-+            self.mock_reporter,
-+            cib_data=cib_data,
-+            corosync_conf_data=corosync_conf_data
-+        )
-+
-+    def test_nothing(self):
-+        self.assertEqual(
-+            self._fixture_get_env().ghost_file_codes,
-+            set()
-+        )
-+
-+    def test_corosync(self):
-+        self.assertEqual(
-+            self._fixture_get_env(corosync_conf_data="x").ghost_file_codes,
-+            set([file_type_codes.COROSYNC_CONF])
-+        )
-+
-+    def test_cib(self):
-+        self.assertEqual(
-+            self._fixture_get_env(cib_data="x").ghost_file_codes,
-+            set([file_type_codes.CIB])
-+        )
-+
-+    def test_all(self):
-+        self.assertEqual(
-+            self._fixture_get_env(
-+                cib_data="x",
-+                corosync_conf_data="x",
-+            ).ghost_file_codes,
-+            set([file_type_codes.COROSYNC_CONF, file_type_codes.CIB])
-+        )
-+
- @patch_env("CommandRunner")
- class CmdRunner(TestCase):
-     def setUp(self):
-diff --git a/pcs_test/tools/command_env/config_corosync_conf.py b/pcs_test/tools/command_env/config_corosync_conf.py
-index 3db57cee..a0bd9f33 100644
---- a/pcs_test/tools/command_env/config_corosync_conf.py
-+++ b/pcs_test/tools/command_env/config_corosync_conf.py
-@@ -9,9 +9,14 @@ class CorosyncConf:
-         self.__calls = call_collection
- 
-     def load_content(
--        self, content, name="corosync_conf.load_content", instead=None
-+        self, content, name="corosync_conf.load_content", instead=None,
-+        exception_msg=None
-     ):
--        self.__calls.place(name, Call(content), instead=instead)
-+        self.__calls.place(
-+            name,
-+            Call(content, exception_msg=exception_msg),
-+            instead=instead
-+        )
- 
-     def load(
-         self, node_name_list=None, name="corosync_conf.load",
-diff --git a/pcs_test/tools/command_env/config_http.py b/pcs_test/tools/command_env/config_http.py
-index 6827c2b1..911a82df 100644
---- a/pcs_test/tools/command_env/config_http.py
-+++ b/pcs_test/tools/command_env/config_http.py
-@@ -7,6 +7,7 @@ from pcs_test.tools.command_env.config_http_files import FilesShortcuts
- from pcs_test.tools.command_env.config_http_host import HostShortcuts
- from pcs_test.tools.command_env.config_http_pcmk import PcmkShortcuts
- from pcs_test.tools.command_env.config_http_sbd import SbdShortcuts
-+from pcs_test.tools.command_env.config_http_status import StatusShortcuts
- from pcs_test.tools.command_env.mock_node_communicator import(
-     place_communication,
-     place_requests,
-@@ -34,6 +35,7 @@ def _mutual_exclusive(param_names, **kwargs):
- 
- 
- class HttpConfig:
-+    # pylint: disable=too-many-instance-attributes
-     def __init__(self, call_collection, wrap_helper):
-         self.__calls = call_collection
- 
-@@ -43,6 +45,7 @@ class HttpConfig:
-         self.host = wrap_helper(HostShortcuts(self.__calls))
-         self.pcmk = wrap_helper(PcmkShortcuts(self.__calls))
-         self.sbd = wrap_helper(SbdShortcuts(self.__calls))
-+        self.status = wrap_helper(StatusShortcuts(self.__calls))
- 
-     def add_communication(self, name, communication_list, **kwargs):
-         """
-diff --git a/pcs_test/tools/command_env/config_http_corosync.py b/pcs_test/tools/command_env/config_http_corosync.py
-index f7df73c1..3d89e649 100644
---- a/pcs_test/tools/command_env/config_http_corosync.py
-+++ b/pcs_test/tools/command_env/config_http_corosync.py
-@@ -29,6 +29,30 @@ class CorosyncShortcuts:
-             output='{"corosync":false}'
-         )
- 
-+    def get_corosync_conf(
-+        self,
-+        corosync_conf="",
-+        node_labels=None,
-+        communication_list=None,
-+        name="http.corosync.get_corosync_conf",
-+    ):
-+        """
-+        Create a call for loading corosync.conf text from remote nodes
-+
-+        string corosync_conf -- corosync.conf text to be loaded
-+        list node_labels -- create success responses from these nodes
-+        list communication_list -- create custom responses
-+        string name -- the key of this call
-+        """
-+        place_multinode_call(
-+            self.__calls,
-+            name,
-+            node_labels,
-+            communication_list,
-+            action="remote/get_corosync_conf",
-+            output=corosync_conf,
-+        )
-+
-     def set_corosync_conf(
-         self, corosync_conf, node_labels=None, communication_list=None,
-         name="http.corosync.set_corosync_conf"
-diff --git a/pcs_test/tools/command_env/config_http_files.py b/pcs_test/tools/command_env/config_http_files.py
-index 8cc9b878..b4e93d64 100644
---- a/pcs_test/tools/command_env/config_http_files.py
-+++ b/pcs_test/tools/command_env/config_http_files.py
-@@ -11,9 +11,11 @@ class FilesShortcuts:
- 
-     def put_files(
-         self, node_labels=None, pcmk_authkey=None, corosync_authkey=None,
--        corosync_conf=None, pcs_settings_conf=None, communication_list=None,
-+        corosync_conf=None, pcs_disaster_recovery_conf=None,
-+        pcs_settings_conf=None, communication_list=None,
-         name="http.files.put_files",
-     ):
-+        # pylint: disable=too-many-arguments
-         """
-         Create a call for the files distribution to the nodes.
- 
-@@ -21,6 +23,7 @@ class FilesShortcuts:
-         pcmk_authkey bytes -- content of pacemaker authkey file
-         corosync_authkey bytes -- content of corosync authkey file
-         corosync_conf string -- content of corosync.conf
-+        pcs_disaster_recovery_conf string -- content of pcs DR config
-         pcs_settings_conf string -- content of pcs_settings.conf
-         communication_list list -- create custom responses
-         name string -- the key of this call
-@@ -58,6 +61,17 @@ class FilesShortcuts:
-             )
-             output_data[file_id] = written_output_dict
- 
-+        if pcs_disaster_recovery_conf:
-+            file_id = "disaster-recovery config"
-+            input_data[file_id] = dict(
-+                data=base64.b64encode(
-+                    pcs_disaster_recovery_conf
-+                ).decode("utf-8"),
-+                type="pcs_disaster_recovery_conf",
-+                rewrite_existing=True,
-+            )
-+            output_data[file_id] = written_output_dict
-+
-         if pcs_settings_conf:
-             file_id = "pcs_settings.conf"
-             input_data[file_id] = dict(
-@@ -78,7 +92,8 @@ class FilesShortcuts:
-         )
- 
-     def remove_files(
--        self, node_labels=None, pcsd_settings=False, communication_list=None,
-+        self, node_labels=None, pcsd_settings=False,
-+        pcs_disaster_recovery_conf=False, communication_list=None,
-         name="http.files.remove_files"
-     ):
-         """
-@@ -86,6 +101,7 @@ class FilesShortcuts:
- 
-         node_labels list -- create success responses from these nodes
-         pcsd_settings bool -- if True, remove file pcsd_settings
-+        pcs_disaster_recovery_conf bool -- if True, remove pcs DR config
-         communication_list list -- create custom responses
-         name string -- the key of this call
-         """
-@@ -100,6 +116,14 @@ class FilesShortcuts:
-                 message="",
-             )
- 
-+        if pcs_disaster_recovery_conf:
-+            file_id = "pcs disaster-recovery config"
-+            input_data[file_id] = dict(type="pcs_disaster_recovery_conf")
-+            output_data[file_id] = dict(
-+                code="deleted",
-+                message="",
-+            )
-+
-         place_multinode_call(
-             self.__calls,
-             name,
-diff --git a/pcs_test/tools/command_env/config_http_status.py b/pcs_test/tools/command_env/config_http_status.py
-new file mode 100644
-index 00000000..888b27bb
---- /dev/null
-+++ b/pcs_test/tools/command_env/config_http_status.py
-@@ -0,0 +1,52 @@
-+import json
-+
-+from pcs_test.tools.command_env.mock_node_communicator import (
-+    place_multinode_call,
-+)
-+
-+class StatusShortcuts:
-+    def __init__(self, calls):
-+        self.__calls = calls
-+
-+    def get_full_cluster_status_plaintext(
-+        self, node_labels=None, communication_list=None,
-+        name="http.status.get_full_cluster_status_plaintext",
-+        hide_inactive_resources=False, verbose=False,
-+        cmd_status="success", cmd_status_msg="", report_list=None,
-+        cluster_status_plaintext="",
-+    ):
-+        # pylint: disable=too-many-arguments
-+        """
-+        Create a call for getting cluster status in plaintext
-+
-+        node_labels list -- create success responses from these nodes
-+        communication_list list -- create custom responses
-+        name string -- the key of this call
-+        bool hide_inactive_resources -- input flag
-+        bool verbose -- input flag
-+        string cmd_status -- did the command succeed?
-+        string_cmd_status_msg -- details for cmd_status
-+        iterable report_list -- reports from a remote node
-+        string cluster_status_plaintext -- resulting cluster status
-+        """
-+        report_list = report_list or []
-+        place_multinode_call(
-+            self.__calls,
-+            name,
-+            node_labels,
-+            communication_list,
-+            action="remote/cluster_status_plaintext",
-+            param_list=[(
-+                "data_json",
-+                json.dumps(dict(
-+                    hide_inactive_resources=hide_inactive_resources,
-+                    verbose=verbose,
-+                ))
-+            )],
-+            output=json.dumps(dict(
-+                status=cmd_status,
-+                status_msg=cmd_status_msg,
-+                data=cluster_status_plaintext,
-+                report_list=report_list,
-+            )),
-+        )
-diff --git a/pcs_test/tools/command_env/mock_get_local_corosync_conf.py b/pcs_test/tools/command_env/mock_get_local_corosync_conf.py
-index 854cb8f0..01eca5f1 100644
---- a/pcs_test/tools/command_env/mock_get_local_corosync_conf.py
-+++ b/pcs_test/tools/command_env/mock_get_local_corosync_conf.py
-@@ -1,10 +1,15 @@
-+from pcs import settings
-+from pcs.lib import reports
-+from pcs.lib.errors import LibraryError
-+
- CALL_TYPE_GET_LOCAL_COROSYNC_CONF = "CALL_TYPE_GET_LOCAL_COROSYNC_CONF"
- 
- class Call:
-     type = CALL_TYPE_GET_LOCAL_COROSYNC_CONF
- 
--    def __init__(self, content):
-+    def __init__(self, content, exception_msg=None):
-         self.content = content
-+        self.exception_msg = exception_msg
- 
-     def __repr__(self):
-         return str("<GetLocalCorosyncConf>")
-@@ -13,5 +18,10 @@ class Call:
- def get_get_local_corosync_conf(call_queue):
-     def get_local_corosync_conf():
-         _, expected_call = call_queue.take(CALL_TYPE_GET_LOCAL_COROSYNC_CONF)
-+        if expected_call.exception_msg:
-+            raise LibraryError(reports.corosync_config_read_error(
-+                settings.corosync_conf_file,
-+                expected_call.exception_msg,
-+            ))
-         return expected_call.content
-     return get_local_corosync_conf
-diff --git a/pcsd/capabilities.xml b/pcsd/capabilities.xml
-index f9a76a22..1adb57ce 100644
---- a/pcsd/capabilities.xml
-+++ b/pcsd/capabilities.xml
-@@ -1696,6 +1696,18 @@
- 
- 
- 
-+    <capability id="pcs.disaster-recovery.essentials" in-pcs="1" in-pcsd="0">
-+      <description>
-+        Configure disaster-recovery with the local cluster as the primary site
-+        and one recovery site. Display local disaster-recovery config. Display
-+        status of all sites. Remove disaster-recovery config.
-+
-+        pcs commands: dr config, dr destroy, dr set-recovery-site, dr status
-+      </description>
-+    </capability>
-+
-+
-+
-     <capability id="resource-agents.describe" in-pcs="1" in-pcsd="1">
-       <description>
-         Describe a resource agent - present its metadata.
-diff --git a/pcsd/pcsd_file.rb b/pcsd/pcsd_file.rb
-index 486b764d..d82b55d2 100644
---- a/pcsd/pcsd_file.rb
-+++ b/pcsd/pcsd_file.rb
-@@ -198,6 +198,20 @@ module PcsdFile
-     end
-   end
- 
-+  class PutPcsDrConf < PutFile
-+    def full_file_name
-+      @full_file_name ||= PCSD_DR_CONFIG_LOCATION
-+    end
-+
-+    def binary?()
-+      return true
-+    end
-+
-+    def permissions()
-+      return 0600
-+    end
-+  end
-+
-   TYPES = {
-     "booth_authfile" => PutFileBoothAuthfile,
-     "booth_config" => PutFileBoothConfig,
-@@ -205,6 +219,7 @@ module PcsdFile
-     "corosync_authkey" => PutFileCorosyncAuthkey,
-     "corosync_conf" => PutFileCorosyncConf,
-     "pcs_settings_conf" => PutPcsSettingsConf,
-+    "pcs_disaster_recovery_conf" => PutPcsDrConf,
-   }
- end
- 
-diff --git a/pcsd/pcsd_remove_file.rb b/pcsd/pcsd_remove_file.rb
-index 1038402d..ffaed8e3 100644
---- a/pcsd/pcsd_remove_file.rb
-+++ b/pcsd/pcsd_remove_file.rb
-@@ -41,8 +41,15 @@ module PcsdRemoveFile
-     end
-   end
- 
-+  class RemovePcsDrConf < RemoveFile
-+    def full_file_name
-+      @full_file_name ||= PCSD_DR_CONFIG_LOCATION
-+    end
-+  end
-+
-   TYPES = {
-     "pcmk_remote_authkey" => RemovePcmkRemoteAuthkey,
-     "pcsd_settings" => RemovePcsdSettings,
-+    "pcs_disaster_recovery_conf" => RemovePcsDrConf,
-   }
- end
-diff --git a/pcsd/remote.rb b/pcsd/remote.rb
-index 6f454681..28b91382 100644
---- a/pcsd/remote.rb
-+++ b/pcsd/remote.rb
-@@ -27,6 +27,7 @@ def remote(params, request, auth_user)
-       :status => method(:node_status),
-       :status_all => method(:status_all),
-       :cluster_status => method(:cluster_status_remote),
-+      :cluster_status_plaintext => method(:cluster_status_plaintext),
-       :auth => method(:auth),
-       :check_auth => method(:check_auth),
-       :cluster_setup => method(:cluster_setup),
-@@ -219,6 +220,18 @@ def cluster_status_remote(params, request, auth_user)
-   return JSON.generate(status)
- end
- 
-+# get cluster status in plaintext (over-the-network version of 'pcs status')
-+def cluster_status_plaintext(params, request, auth_user)
-+  if not allowed_for_local_cluster(auth_user, Permissions::READ)
-+    return 403, 'Permission denied'
-+  end
-+  return pcs_internal_proxy(
-+    auth_user,
-+    params.fetch(:data_json, ""),
-+    "status.full_cluster_status_plaintext"
-+  )
-+end
-+
- def cluster_start(params, request, auth_user)
-   if params[:name]
-     code, response = send_request_with_token(
-@@ -444,7 +457,11 @@ def get_corosync_conf_remote(params, request, auth_user)
-   if not allowed_for_local_cluster(auth_user, Permissions::READ)
-     return 403, 'Permission denied'
-   end
--  return get_corosync_conf()
-+  begin
-+    return get_corosync_conf()
-+  rescue
-+    return 400, 'Unable to read corosync.conf'
-+  end
- end
- 
- # deprecated, use /remote/put_file (note that put_file doesn't support backup
-diff --git a/pcsd/settings.rb b/pcsd/settings.rb
-index a6fd0a26..e8dc0c96 100644
---- a/pcsd/settings.rb
-+++ b/pcsd/settings.rb
-@@ -9,6 +9,7 @@ KEY_FILE = PCSD_VAR_LOCATION + 'pcsd.key'
- KNOWN_HOSTS_FILE_NAME = 'known-hosts'
- PCSD_SETTINGS_CONF_LOCATION = PCSD_VAR_LOCATION + "pcs_settings.conf"
- PCSD_USERS_CONF_LOCATION = PCSD_VAR_LOCATION + "pcs_users.conf"
-+PCSD_DR_CONFIG_LOCATION = PCSD_VAR_LOCATION + "disaster-recovery"
- 
- CRM_MON = "/usr/sbin/crm_mon"
- CRM_NODE = "/usr/sbin/crm_node"
-diff --git a/pcsd/settings.rb.debian b/pcsd/settings.rb.debian
-index 5d830af9..daaae37b 100644
---- a/pcsd/settings.rb.debian
-+++ b/pcsd/settings.rb.debian
-@@ -9,6 +9,7 @@ KEY_FILE = PCSD_VAR_LOCATION + 'pcsd.key'
- KNOWN_HOSTS_FILE_NAME = 'known-hosts'
- PCSD_SETTINGS_CONF_LOCATION = PCSD_VAR_LOCATION + "pcs_settings.conf"
- PCSD_USERS_CONF_LOCATION = PCSD_VAR_LOCATION + "pcs_users.conf"
-+PCSD_DR_CONFIG_LOCATION = PCSD_VAR_LOCATION + "disaster-recovery"
- 
- CRM_MON = "/usr/sbin/crm_mon"
- CRM_NODE = "/usr/sbin/crm_node"
-diff --git a/pylintrc b/pylintrc
-index 5fc4c200..9255a804 100644
---- a/pylintrc
-+++ b/pylintrc
-@@ -19,7 +19,7 @@ max-parents=10
- min-public-methods=0
- 
- [BASIC]
--good-names=e, i, op, ip, el, maxDiff, cm, ok, T
-+good-names=e, i, op, ip, el, maxDiff, cm, ok, T, dr
- 
- [VARIABLES]
- # A regular expression matching the name of dummy variables (i.e. expectedly
--- 
-2.21.0
-
diff --git a/SOURCES/bz1743731-01-fix-error-msg-when-cluster-is-not-set-up.patch b/SOURCES/bz1743731-01-fix-error-msg-when-cluster-is-not-set-up.patch
deleted file mode 100644
index 06f551e..0000000
--- a/SOURCES/bz1743731-01-fix-error-msg-when-cluster-is-not-set-up.patch
+++ /dev/null
@@ -1,130 +0,0 @@
-From 8058591d0d79942bf6c61f105a180592bac7cf69 Mon Sep 17 00:00:00 2001
-From: Ondrej Mular <omular@redhat.com>
-Date: Thu, 28 Nov 2019 16:57:24 +0100
-Subject: [PATCH 2/3] fix error msg when cluster is not set up
-
----
- CHANGELOG.md                                |  4 +++
- pcs/cluster.py                              |  3 +++
- pcs/lib/commands/qdevice.py                 |  2 ++
- pcs_test/tier0/lib/commands/test_qdevice.py | 27 +++++++++++++++++++--
- 4 files changed, 34 insertions(+), 2 deletions(-)
-
-diff --git a/CHANGELOG.md b/CHANGELOG.md
-index 889436c3..5a7ec377 100644
---- a/CHANGELOG.md
-+++ b/CHANGELOG.md
-@@ -6,7 +6,11 @@
- - It is possible to configure a disaster-recovery site and display its status
-   ([rhbz#1676431])
- 
-+### Fixed
-+- Error messages in cases when cluster is not set up ([rhbz#1743731])
-+
- [rhbz#1676431]: https://bugzilla.redhat.com/show_bug.cgi?id=1676431
-+[rhbz#1743731]: https://bugzilla.redhat.com/show_bug.cgi?id=1743731
- 
- 
- ## [0.10.4] - 2019-11-28
-diff --git a/pcs/cluster.py b/pcs/cluster.py
-index 9473675f..0e9b3365 100644
---- a/pcs/cluster.py
-+++ b/pcs/cluster.py
-@@ -190,6 +190,9 @@ def start_cluster(argv):
-             wait_for_nodes_started(nodes, wait_timeout)
-         return
- 
-+    if not utils.hasCorosyncConf():
-+        utils.err("cluster is not currently configured on this node")
-+
-     print("Starting Cluster...")
-     service_list = ["corosync"]
-     if utils.need_to_handle_qdevice_service():
-diff --git a/pcs/lib/commands/qdevice.py b/pcs/lib/commands/qdevice.py
-index 3d7af234..41f7c296 100644
---- a/pcs/lib/commands/qdevice.py
-+++ b/pcs/lib/commands/qdevice.py
-@@ -81,6 +81,8 @@ def qdevice_start(lib_env, model):
-     start qdevice now on local host
-     """
-     _check_model(model)
-+    if not qdevice_net.qdevice_initialized():
-+        raise LibraryError(reports.qdevice_not_initialized(model))
-     _service_start(lib_env, qdevice_net.qdevice_start)
- 
- def qdevice_stop(lib_env, model, proceed_if_used=False):
-diff --git a/pcs_test/tier0/lib/commands/test_qdevice.py b/pcs_test/tier0/lib/commands/test_qdevice.py
-index b2c83ca4..af23db61 100644
---- a/pcs_test/tier0/lib/commands/test_qdevice.py
-+++ b/pcs_test/tier0/lib/commands/test_qdevice.py
-@@ -689,6 +689,7 @@ class QdeviceNetDisableTest(QdeviceTestCase):
-         )
- 
- 
-+@mock.patch("pcs.lib.corosync.qdevice_net.qdevice_initialized")
- @mock.patch("pcs.lib.external.start_service")
- @mock.patch.object(
-     LibraryEnvironment,
-@@ -696,9 +697,11 @@ class QdeviceNetDisableTest(QdeviceTestCase):
-     lambda self: "mock_runner"
- )
- class QdeviceNetStartTest(QdeviceTestCase):
--    def test_success(self, mock_net_start):
-+    def test_success(self, mock_net_start, mock_qdevice_initialized):
-+        mock_qdevice_initialized.return_value = True
-         lib.qdevice_start(self.lib_env, "net")
-         mock_net_start.assert_called_once_with("mock_runner", "corosync-qnetd")
-+        mock_qdevice_initialized.assert_called_once_with()
-         assert_report_item_list_equal(
-             self.mock_reporter.report_item_list,
-             [
-@@ -719,11 +722,12 @@ class QdeviceNetStartTest(QdeviceTestCase):
-             ]
-         )
- 
--    def test_failed(self, mock_net_start):
-+    def test_failed(self, mock_net_start, mock_qdevice_initialized):
-         mock_net_start.side_effect = StartServiceError(
-             "test service",
-             "test error"
-         )
-+        mock_qdevice_initialized.return_value = True
- 
-         assert_raise_library_error(
-             lambda: lib.qdevice_start(self.lib_env, "net"),
-@@ -737,6 +741,7 @@ class QdeviceNetStartTest(QdeviceTestCase):
-             )
-         )
-         mock_net_start.assert_called_once_with("mock_runner", "corosync-qnetd")
-+        mock_qdevice_initialized.assert_called_once_with()
-         assert_report_item_list_equal(
-             self.mock_reporter.report_item_list,
-             [
-@@ -750,6 +755,24 @@ class QdeviceNetStartTest(QdeviceTestCase):
-             ]
-         )
- 
-+    def test_qdevice_not_initialized(
-+        self, mock_net_start, mock_qdevice_initialized
-+    ):
-+        mock_qdevice_initialized.return_value = False
-+
-+        assert_raise_library_error(
-+            lambda: lib.qdevice_start(self.lib_env, "net"),
-+            (
-+                severity.ERROR,
-+                report_codes.QDEVICE_NOT_INITIALIZED,
-+                {
-+                    "model": "net",
-+                }
-+            )
-+        )
-+        mock_net_start.assert_not_called()
-+        mock_qdevice_initialized.assert_called_once_with()
-+
- 
- @mock.patch("pcs.lib.corosync.qdevice_net.qdevice_status_cluster_text")
- @mock.patch("pcs.lib.external.stop_service")
--- 
-2.21.0
-
diff --git a/SOURCES/bz1805082-01-fix-resource-stonith-refresh-documentation.patch b/SOURCES/bz1805082-01-fix-resource-stonith-refresh-documentation.patch
new file mode 100644
index 0000000..7703e96
--- /dev/null
+++ b/SOURCES/bz1805082-01-fix-resource-stonith-refresh-documentation.patch
@@ -0,0 +1,57 @@
+From be40fe494ddeb4f7132389ca0f3c1193de0e425d Mon Sep 17 00:00:00 2001
+From: Tomas Jelinek <tojeline@redhat.com>
+Date: Tue, 23 Jun 2020 12:57:05 +0200
+Subject: [PATCH 2/3] fix 'resource | stonith refresh' documentation
+
+---
+ pcs/pcs.8    | 4 ++--
+ pcs/usage.py | 4 ++--
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/pcs/pcs.8 b/pcs/pcs.8
+index c887d332..3efc5bb2 100644
+--- a/pcs/pcs.8
++++ b/pcs/pcs.8
+@@ -325,7 +325,7 @@ If a node is not specified then resources / stonith devices on all nodes will be
+ refresh [<resource id>] [node=<node>] [\fB\-\-strict\fR]
+ Make the cluster forget the complete operation history (including failures) of the resource and re\-detect its current state. If you are interested in forgetting failed operations only, use the 'pcs resource cleanup' command.
+ .br
+-If the named resource is part of a group, or one numbered instance of a clone or bundled resource, the clean\-up applies to the whole collective resource unless \fB\-\-strict\fR is given.
++If the named resource is part of a group, or one numbered instance of a clone or bundled resource, the refresh applies to the whole collective resource unless \fB\-\-strict\fR is given.
+ .br
+ If a resource id is not specified then all resources / stonith devices will be refreshed.
+ .br
+@@ -613,7 +613,7 @@ If a node is not specified then resources / stonith devices on all nodes will be
+ refresh [<stonith id>] [\fB\-\-node\fR <node>] [\fB\-\-strict\fR]
+ Make the cluster forget the complete operation history (including failures) of the stonith device and re\-detect its current state. If you are interested in forgetting failed operations only, use the 'pcs stonith cleanup' command.
+ .br
+-If the named stonith device is part of a group, or one numbered instance of a clone or bundled resource, the clean\-up applies to the whole collective resource unless \fB\-\-strict\fR is given.
++If the named stonith device is part of a group, or one numbered instance of a clone or bundled resource, the refresh applies to the whole collective resource unless \fB\-\-strict\fR is given.
+ .br
+ If a stonith id is not specified then all resources / stonith devices will be refreshed.
+ .br
+diff --git a/pcs/usage.py b/pcs/usage.py
+index 8722bd7b..0f3c95a3 100644
+--- a/pcs/usage.py
++++ b/pcs/usage.py
+@@ -663,7 +663,7 @@ Commands:
+         interested in forgetting failed operations only, use the 'pcs resource
+         cleanup' command.
+         If the named resource is part of a group, or one numbered instance of a
+-        clone or bundled resource, the clean-up applies to the whole collective
++        clone or bundled resource, the refresh applies to the whole collective
+         resource unless --strict is given.
+         If a resource id is not specified then all resources / stonith devices
+         will be refreshed.
+@@ -1214,7 +1214,7 @@ Commands:
+         are interested in forgetting failed operations only, use the 'pcs
+         stonith cleanup' command.
+         If the named stonith device is part of a group, or one numbered
+-        instance of a clone or bundled resource, the clean-up applies to the
++        instance of a clone or bundled resource, the refresh applies to the
+         whole collective resource unless --strict is given.
+         If a stonith id is not specified then all resources / stonith devices
+         will be refreshed.
+-- 
+2.25.4
+
diff --git a/SOURCES/bz1817547-01-resource-and-operation-defaults.patch b/SOURCES/bz1817547-01-resource-and-operation-defaults.patch
new file mode 100644
index 0000000..34d1795
--- /dev/null
+++ b/SOURCES/bz1817547-01-resource-and-operation-defaults.patch
@@ -0,0 +1,7605 @@
+From ec4f8fc199891ad13235729272c0f115918cade9 Mon Sep 17 00:00:00 2001
+From: Tomas Jelinek <tojeline@redhat.com>
+Date: Thu, 21 May 2020 16:51:25 +0200
+Subject: [PATCH 1/3] squash bz1817547 Resource and operation defaults that
+ apply to specific resource/operation types
+
+add rule parser for rsc and op expressions
+
+improvements to rule parser
+
+make rule parts independent of the parser
+
+export parsed rules into cib
+
+add a command for adding new rsc and op defaults
+
+display rsc and op defaults with multiple nvsets
+
+fix parsing and processing of rsc_expression in rules
+
+improve syntax for creating a new nvset
+
+make the rule parser produce dataclasses
+
+fix for pyparsing-2.4.0
+
+add commands for removing rsc and op defaults sets
+
+add commands for updating rsc and op defaults sets
+
+update chagelog, capabilities
+
+add tier1 tests for rules
+
+various minor fixes
+
+fix routing, create 'defaults update' command
+
+better error messages for unallowed rule expressions
+---
+ .gitlab-ci.yml                                |   3 +
+ README.md                                     |   1 +
+ mypy.ini                                      |   9 +
+ pcs.spec.in                                   |   3 +
+ pcs/cli/common/lib_wrapper.py                 |  10 +-
+ pcs/cli/nvset.py                              |  53 ++
+ pcs/cli/reports/messages.py                   |  39 +
+ pcs/cli/routing/resource.py                   |  77 +-
+ pcs/cli/rule.py                               |  89 +++
+ pcs/common/interface/dto.py                   |   9 +-
+ pcs/common/pacemaker/nvset.py                 |  26 +
+ pcs/common/pacemaker/rule.py                  |  28 +
+ pcs/common/reports/codes.py                   |   3 +
+ pcs/common/reports/const.py                   |   6 +
+ pcs/common/reports/messages.py                |  73 ++
+ pcs/common/reports/types.py                   |   1 +
+ pcs/common/str_tools.py                       |  32 +
+ pcs/common/types.py                           |  13 +
+ pcs/config.py                                 |  20 +-
+ pcs/lib/cib/nvpair_multi.py                   | 323 +++++++++
+ pcs/lib/cib/rule/__init__.py                  |   8 +
+ pcs/lib/cib/rule/cib_to_dto.py                | 185 +++++
+ pcs/lib/cib/rule/expression_part.py           |  49 ++
+ pcs/lib/cib/rule/parsed_to_cib.py             | 103 +++
+ pcs/lib/cib/rule/parser.py                    | 232 ++++++
+ pcs/lib/cib/rule/validator.py                 |  62 ++
+ pcs/lib/cib/tools.py                          |   8 +-
+ pcs/lib/commands/cib_options.py               | 322 ++++++++-
+ pcs/lib/validate.py                           |  15 +
+ pcs/lib/xml_tools.py                          |   9 +-
+ pcs/pcs.8                                     |  86 ++-
+ pcs/resource.py                               | 258 ++++++-
+ pcs/usage.py                                  |  94 ++-
+ pcs_test/resources/cib-empty-3.1.xml          |   2 +-
+ pcs_test/resources/cib-empty-3.2.xml          |   2 +-
+ pcs_test/resources/cib-empty-3.3.xml          |  10 +
+ pcs_test/resources/cib-empty-3.4.xml          |  10 +
+ pcs_test/resources/cib-empty.xml              |   2 +-
+ pcs_test/tier0/cli/reports/test_messages.py   |  29 +
+ pcs_test/tier0/cli/resource/test_defaults.py  | 324 +++++++++
+ pcs_test/tier0/cli/test_nvset.py              |  92 +++
+ pcs_test/tier0/cli/test_rule.py               | 477 +++++++++++++
+ .../tier0/common/reports/test_messages.py     |  55 +-
+ pcs_test/tier0/common/test_str_tools.py       |  33 +
+ .../cib_options => cib/rule}/__init__.py      |   0
+ .../tier0/lib/cib/rule/test_cib_to_dto.py     | 593 ++++++++++++++++
+ .../tier0/lib/cib/rule/test_parsed_to_cib.py  | 214 ++++++
+ pcs_test/tier0/lib/cib/rule/test_parser.py    | 270 +++++++
+ pcs_test/tier0/lib/cib/rule/test_validator.py |  68 ++
+ pcs_test/tier0/lib/cib/test_nvpair_multi.py   | 513 ++++++++++++++
+ pcs_test/tier0/lib/cib/test_tools.py          |  13 +-
+ .../cib_options/test_operations_defaults.py   | 120 ----
+ .../cib_options/test_resources_defaults.py    | 120 ----
+ .../tier0/lib/commands/test_cib_options.py    | 669 ++++++++++++++++++
+ pcs_test/tier0/lib/test_validate.py           |  27 +
+ pcs_test/tier1/legacy/test_resource.py        |   8 +-
+ pcs_test/tier1/legacy/test_stonith.py         |   8 +-
+ pcs_test/tier1/test_cib_options.py            | 571 +++++++++++++++
+ pcs_test/tier1/test_tag.py                    |   4 +-
+ pcs_test/tools/fixture.py                     |   4 +-
+ pcs_test/tools/misc.py                        |  61 +-
+ pcsd/capabilities.xml                         |  30 +
+ test/centos8/Dockerfile                       |   1 +
+ test/fedora30/Dockerfile                      |   1 +
+ test/fedora31/Dockerfile                      |   1 +
+ test/fedora32/Dockerfile                      |   1 +
+ 66 files changed, 6216 insertions(+), 366 deletions(-)
+ create mode 100644 pcs/cli/nvset.py
+ create mode 100644 pcs/cli/rule.py
+ create mode 100644 pcs/common/pacemaker/nvset.py
+ create mode 100644 pcs/common/pacemaker/rule.py
+ create mode 100644 pcs/lib/cib/nvpair_multi.py
+ create mode 100644 pcs/lib/cib/rule/__init__.py
+ create mode 100644 pcs/lib/cib/rule/cib_to_dto.py
+ create mode 100644 pcs/lib/cib/rule/expression_part.py
+ create mode 100644 pcs/lib/cib/rule/parsed_to_cib.py
+ create mode 100644 pcs/lib/cib/rule/parser.py
+ create mode 100644 pcs/lib/cib/rule/validator.py
+ create mode 100644 pcs_test/resources/cib-empty-3.3.xml
+ create mode 100644 pcs_test/resources/cib-empty-3.4.xml
+ create mode 100644 pcs_test/tier0/cli/resource/test_defaults.py
+ create mode 100644 pcs_test/tier0/cli/test_nvset.py
+ create mode 100644 pcs_test/tier0/cli/test_rule.py
+ rename pcs_test/tier0/lib/{commands/cib_options => cib/rule}/__init__.py (100%)
+ create mode 100644 pcs_test/tier0/lib/cib/rule/test_cib_to_dto.py
+ create mode 100644 pcs_test/tier0/lib/cib/rule/test_parsed_to_cib.py
+ create mode 100644 pcs_test/tier0/lib/cib/rule/test_parser.py
+ create mode 100644 pcs_test/tier0/lib/cib/rule/test_validator.py
+ create mode 100644 pcs_test/tier0/lib/cib/test_nvpair_multi.py
+ delete mode 100644 pcs_test/tier0/lib/commands/cib_options/test_operations_defaults.py
+ delete mode 100644 pcs_test/tier0/lib/commands/cib_options/test_resources_defaults.py
+ create mode 100644 pcs_test/tier0/lib/commands/test_cib_options.py
+ create mode 100644 pcs_test/tier1/test_cib_options.py
+
+diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
+index 83eba12d..24444b72 100644
+--- a/.gitlab-ci.yml
++++ b/.gitlab-ci.yml
+@@ -51,6 +51,7 @@ pylint:
+         python3-pip
+         python3-pycurl
+         python3-pyOpenSSL
++        python3-pyparsing
+         findutils
+         make
+         time
+@@ -69,6 +70,7 @@ mypy:
+         python3-pip
+         python3-pycurl
+         python3-pyOpenSSL
++        python3-pyparsing
+         git
+         make
+         tar
+@@ -112,6 +114,7 @@ python_tier0_tests:
+         python3-pip
+         python3-pycurl
+         python3-pyOpenSSL
++        python3-pyparsing
+         which
+         "
+     - make install_pip
+diff --git a/README.md b/README.md
+index f888da68..efb4d0d5 100644
+--- a/README.md
++++ b/README.md
+@@ -30,6 +30,7 @@ These are the runtime dependencies of pcs and pcsd:
+ * python3-pycurl
+ * python3-setuptools
+ * python3-pyOpenSSL (python3-openssl)
++* python3-pyparsing
+ * python3-tornado 6.x
+ * python dataclasses (`pip install dataclasses`; required only for python 3.6,
+   already included in 3.7+)
+diff --git a/mypy.ini b/mypy.ini
+index ad3d1f18..ac6789a9 100644
+--- a/mypy.ini
++++ b/mypy.ini
+@@ -8,12 +8,18 @@ disallow_untyped_defs = True
+ [mypy-pcs.lib.cib.resource.relations]
+ disallow_untyped_defs = True
+ 
++[mypy-pcs.lib.cib.rule]
++disallow_untyped_defs = True
++
+ [mypy-pcs.lib.cib.tag]
+ disallow_untyped_defs = True
+ 
+ [mypy-pcs.lib.commands.tag]
+ disallow_untyped_defs = True
+ 
++[mypy-pcs.lib.commands.cib_options]
++disallow_untyped_defs = True
++
+ [mypy-pcs.lib.dr.*]
+ disallow_untyped_defs = True
+ disallow_untyped_calls = True
+@@ -84,3 +90,6 @@ ignore_missing_imports = True
+ 
+ [mypy-distro]
+ ignore_missing_imports = True
++
++[mypy-pyparsing]
++ignore_missing_imports = True
+diff --git a/pcs.spec.in b/pcs.spec.in
+index c52c2fe4..e292a708 100644
+--- a/pcs.spec.in
++++ b/pcs.spec.in
+@@ -122,6 +122,8 @@ BuildRequires: platform-python-setuptools
+ %endif
+ 
+ BuildRequires: python3-devel
++# for tier0 tests
++BuildRequires: python3-pyparsing
+ 
+ # gcc for compiling custom rubygems
+ BuildRequires: gcc
+@@ -155,6 +157,7 @@ Requires: platform-python-setuptools
+ 
+ Requires: python3-lxml
+ Requires: python3-pycurl
++Requires: python3-pyparsing
+ # clufter and its dependencies
+ Requires: python3-clufter => 0.70.0
+ %if "%{python3_version}" != "3.6" && "%{python3_version}" != "3.7"
+diff --git a/pcs/cli/common/lib_wrapper.py b/pcs/cli/common/lib_wrapper.py
+index 9fd05ac0..192a3dac 100644
+--- a/pcs/cli/common/lib_wrapper.py
++++ b/pcs/cli/common/lib_wrapper.py
+@@ -388,8 +388,14 @@ def load_module(env, middleware_factory, name):
+             env,
+             middleware.build(middleware_factory.cib,),
+             {
+-                "set_operations_defaults": cib_options.set_operations_defaults,
+-                "set_resources_defaults": cib_options.set_resources_defaults,
++                "operation_defaults_config": cib_options.operation_defaults_config,
++                "operation_defaults_create": cib_options.operation_defaults_create,
++                "operation_defaults_remove": cib_options.operation_defaults_remove,
++                "operation_defaults_update": cib_options.operation_defaults_update,
++                "resource_defaults_config": cib_options.resource_defaults_config,
++                "resource_defaults_create": cib_options.resource_defaults_create,
++                "resource_defaults_remove": cib_options.resource_defaults_remove,
++                "resource_defaults_update": cib_options.resource_defaults_update,
+             },
+         )
+ 
+diff --git a/pcs/cli/nvset.py b/pcs/cli/nvset.py
+new file mode 100644
+index 00000000..69442df3
+--- /dev/null
++++ b/pcs/cli/nvset.py
+@@ -0,0 +1,53 @@
++from typing import (
++    cast,
++    Iterable,
++    List,
++    Optional,
++)
++
++from pcs.cli.rule import rule_expression_dto_to_lines
++from pcs.common.pacemaker.nvset import CibNvsetDto
++from pcs.common.str_tools import (
++    format_name_value_list,
++    indent,
++)
++from pcs.common.types import CibNvsetType
++
++
++def nvset_dto_list_to_lines(
++    nvset_dto_list: Iterable[CibNvsetDto],
++    with_ids: bool = False,
++    text_if_empty: Optional[str] = None,
++) -> List[str]:
++    if not nvset_dto_list:
++        return [text_if_empty] if text_if_empty else []
++    return [
++        line
++        for nvset_dto in nvset_dto_list
++        for line in nvset_dto_to_lines(nvset_dto, with_ids=with_ids)
++    ]
++
++
++def nvset_dto_to_lines(nvset: CibNvsetDto, with_ids: bool = False) -> List[str]:
++    nvset_label = _nvset_type_to_label.get(nvset.type, "Options Set")
++    heading_parts = [f"{nvset_label}: {nvset.id}"]
++    if nvset.options:
++        heading_parts.append(
++            " ".join(format_name_value_list(sorted(nvset.options.items())))
++        )
++
++    lines = format_name_value_list(
++        sorted([(nvpair.name, nvpair.value) for nvpair in nvset.nvpairs])
++    )
++    if nvset.rule:
++        lines.extend(
++            rule_expression_dto_to_lines(nvset.rule, with_ids=with_ids)
++        )
++
++    return [" ".join(heading_parts)] + indent(lines)
++
++
++_nvset_type_to_label = {
++    cast(str, CibNvsetType.INSTANCE): "Attributes",
++    cast(str, CibNvsetType.META): "Meta Attrs",
++}
+diff --git a/pcs/cli/reports/messages.py b/pcs/cli/reports/messages.py
+index 36f00a9e..7ccc8ab0 100644
+--- a/pcs/cli/reports/messages.py
++++ b/pcs/cli/reports/messages.py
+@@ -402,6 +402,45 @@ class TagCannotRemoveReferencesWithoutRemovingTag(CliReportMessageCustom):
+         )
+ 
+ 
++class RuleExpressionParseError(CliReportMessageCustom):
++    _obj: messages.RuleExpressionParseError
++
++    @property
++    def message(self) -> str:
++        # Messages coming from the parser are not very useful and readable,
++        # they mostly contain one line grammar expression covering the whole
++        # rule. No user would be able to parse that. Therefore we omit the
++        # messages.
++        marker = "-" * (self._obj.column_number - 1) + "^"
++        return (
++            f"'{self._obj.rule_string}' is not a valid rule expression, parse "
++            f"error near or after line {self._obj.line_number} column "
++            f"{self._obj.column_number}\n"
++            f"  {self._obj.rule_line}\n"
++            f"  {marker}"
++        )
++
++
++class CibNvsetAmbiguousProvideNvsetId(CliReportMessageCustom):
++    _obj: messages.CibNvsetAmbiguousProvideNvsetId
++
++    @property
++    def message(self) -> str:
++        command_map = {
++            const.PCS_COMMAND_RESOURCE_DEFAULTS_UPDATE: (
++                "pcs resource defaults set update"
++            ),
++            const.PCS_COMMAND_OPERATION_DEFAULTS_UPDATE: (
++                "pcs resource op defaults set update"
++            ),
++        }
++        command = command_map.get(self._obj.pcs_command, "")
++        return (
++            f"Several options sets exist, please use the '{command}' command "
++            "and specify an option set ID"
++        )
++
++
+ def _create_report_msg_map() -> Dict[str, type]:
+     result: Dict[str, type] = {}
+     for report_msg_cls in get_all_subclasses(CliReportMessageCustom):
+diff --git a/pcs/cli/routing/resource.py b/pcs/cli/routing/resource.py
+index 28bb3d5e..0706f43b 100644
+--- a/pcs/cli/routing/resource.py
++++ b/pcs/cli/routing/resource.py
+@@ -1,15 +1,88 @@
+ from functools import partial
++from typing import (
++    Any,
++    List,
++)
+ 
+ from pcs import (
+     resource,
+     usage,
+ )
+ from pcs.cli.common.errors import raise_command_replaced
++from pcs.cli.common.parse_args import InputModifiers
+ from pcs.cli.common.routing import create_router
+ 
+ from pcs.cli.resource.relations import show_resource_relations_cmd
+ 
+ 
++def resource_defaults_cmd(
++    lib: Any, argv: List[str], modifiers: InputModifiers
++) -> None:
++    """
++    Options:
++      * -f - CIB file
++      * --force - allow unknown options
++    """
++    if argv and "=" in argv[0]:
++        # DEPRECATED legacy command
++        return resource.resource_defaults_legacy_cmd(
++            lib, argv, modifiers, deprecated_syntax_used=True
++        )
++
++    router = create_router(
++        {
++            "config": resource.resource_defaults_config_cmd,
++            "set": create_router(
++                {
++                    "create": resource.resource_defaults_set_create_cmd,
++                    "delete": resource.resource_defaults_set_remove_cmd,
++                    "remove": resource.resource_defaults_set_remove_cmd,
++                    "update": resource.resource_defaults_set_update_cmd,
++                },
++                ["resource", "defaults", "set"],
++            ),
++            "update": resource.resource_defaults_legacy_cmd,
++        },
++        ["resource", "defaults"],
++        default_cmd="config",
++    )
++    return router(lib, argv, modifiers)
++
++
++def resource_op_defaults_cmd(
++    lib: Any, argv: List[str], modifiers: InputModifiers
++) -> None:
++    """
++    Options:
++      * -f - CIB file
++      * --force - allow unknown options
++    """
++    if argv and "=" in argv[0]:
++        # DEPRECATED legacy command
++        return resource.resource_op_defaults_legacy_cmd(
++            lib, argv, modifiers, deprecated_syntax_used=True
++        )
++
++    router = create_router(
++        {
++            "config": resource.resource_op_defaults_config_cmd,
++            "set": create_router(
++                {
++                    "create": resource.resource_op_defaults_set_create_cmd,
++                    "delete": resource.resource_op_defaults_set_remove_cmd,
++                    "remove": resource.resource_op_defaults_set_remove_cmd,
++                    "update": resource.resource_op_defaults_set_update_cmd,
++                },
++                ["resource", "op", "defaults", "set"],
++            ),
++            "update": resource.resource_op_defaults_legacy_cmd,
++        },
++        ["resource", "op", "defaults"],
++        default_cmd="config",
++    )
++    return router(lib, argv, modifiers)
++
++
+ resource_cmd = create_router(
+     {
+         "help": lambda lib, argv, modifiers: usage.resource(argv),
+@@ -68,14 +141,14 @@ resource_cmd = create_router(
+         "failcount": resource.resource_failcount,
+         "op": create_router(
+             {
+-                "defaults": resource.resource_op_defaults_cmd,
++                "defaults": resource_op_defaults_cmd,
+                 "add": resource.resource_op_add_cmd,
+                 "remove": resource.resource_op_delete_cmd,
+                 "delete": resource.resource_op_delete_cmd,
+             },
+             ["resource", "op"],
+         ),
+-        "defaults": resource.resource_defaults_cmd,
++        "defaults": resource_defaults_cmd,
+         "cleanup": resource.resource_cleanup,
+         "refresh": resource.resource_refresh,
+         "relocate": create_router(
+diff --git a/pcs/cli/rule.py b/pcs/cli/rule.py
+new file mode 100644
+index 00000000..c1149fff
+--- /dev/null
++++ b/pcs/cli/rule.py
+@@ -0,0 +1,89 @@
++from typing import List
++
++from pcs.common.pacemaker.rule import CibRuleExpressionDto
++from pcs.common.str_tools import (
++    format_name_value_list,
++    indent,
++)
++from pcs.common.types import CibRuleExpressionType
++
++
++def rule_expression_dto_to_lines(
++    rule_expr: CibRuleExpressionDto, with_ids: bool = False
++) -> List[str]:
++    if rule_expr.type == CibRuleExpressionType.RULE:
++        return _rule_dto_to_lines(rule_expr, with_ids)
++    if rule_expr.type == CibRuleExpressionType.DATE_EXPRESSION:
++        return _date_dto_to_lines(rule_expr, with_ids)
++    return _simple_expr_to_lines(rule_expr, with_ids)
++
++
++def _rule_dto_to_lines(
++    rule_expr: CibRuleExpressionDto, with_ids: bool = False
++) -> List[str]:
++    heading_parts = [
++        "Rule{0}:".format(" (expired)" if rule_expr.is_expired else "")
++    ]
++    heading_parts.extend(
++        format_name_value_list(sorted(rule_expr.options.items()))
++    )
++    if with_ids:
++        heading_parts.append(f"(id:{rule_expr.id})")
++
++    lines = []
++    for child in rule_expr.expressions:
++        lines.extend(rule_expression_dto_to_lines(child, with_ids))
++
++    return [" ".join(heading_parts)] + indent(lines)
++
++
++def _date_dto_to_lines(
++    rule_expr: CibRuleExpressionDto, with_ids: bool = False
++) -> List[str]:
++    # pylint: disable=too-many-branches
++    operation = rule_expr.options.get("operation", None)
++
++    if operation == "date_spec":
++        heading_parts = ["Expression:"]
++        if with_ids:
++            heading_parts.append(f"(id:{rule_expr.id})")
++        line_parts = ["Date Spec:"]
++        if rule_expr.date_spec:
++            line_parts.extend(
++                format_name_value_list(
++                    sorted(rule_expr.date_spec.options.items())
++                )
++            )
++            if with_ids:
++                line_parts.append(f"(id:{rule_expr.date_spec.id})")
++        return [" ".join(heading_parts)] + indent([" ".join(line_parts)])
++
++    if operation == "in_range" and rule_expr.duration:
++        heading_parts = ["Expression:", "date", "in_range"]
++        if "start" in rule_expr.options:
++            heading_parts.append(rule_expr.options["start"])
++        heading_parts.extend(["to", "duration"])
++        if with_ids:
++            heading_parts.append(f"(id:{rule_expr.id})")
++        lines = [" ".join(heading_parts)]
++
++        line_parts = ["Duration:"]
++        line_parts.extend(
++            format_name_value_list(sorted(rule_expr.duration.options.items()))
++        )
++        if with_ids:
++            line_parts.append(f"(id:{rule_expr.duration.id})")
++        lines.extend(indent([" ".join(line_parts)]))
++
++        return lines
++
++    return _simple_expr_to_lines(rule_expr, with_ids=with_ids)
++
++
++def _simple_expr_to_lines(
++    rule_expr: CibRuleExpressionDto, with_ids: bool = False
++) -> List[str]:
++    parts = ["Expression:", rule_expr.as_string]
++    if with_ids:
++        parts.append(f"(id:{rule_expr.id})")
++    return [" ".join(parts)]
+diff --git a/pcs/common/interface/dto.py b/pcs/common/interface/dto.py
+index fb40fc5e..768156d6 100644
+--- a/pcs/common/interface/dto.py
++++ b/pcs/common/interface/dto.py
+@@ -42,7 +42,14 @@ def from_dict(cls: Type[DtoType], data: DtoPayload) -> DtoType:
+         data=data,
+         # NOTE: all enum types has to be listed here in key cast
+         # see: https://github.com/konradhalas/dacite#casting
+-        config=dacite.Config(cast=[types.DrRole, types.ResourceRelationType,],),
++        config=dacite.Config(
++            cast=[
++                types.CibNvsetType,
++                types.CibRuleExpressionType,
++                types.DrRole,
++                types.ResourceRelationType,
++            ]
++        ),
+     )
+ 
+ 
+diff --git a/pcs/common/pacemaker/nvset.py b/pcs/common/pacemaker/nvset.py
+new file mode 100644
+index 00000000..6d72c787
+--- /dev/null
++++ b/pcs/common/pacemaker/nvset.py
+@@ -0,0 +1,26 @@
++from dataclasses import dataclass
++from typing import (
++    Mapping,
++    Optional,
++    Sequence,
++)
++
++from pcs.common.interface.dto import DataTransferObject
++from pcs.common.pacemaker.rule import CibRuleExpressionDto
++from pcs.common.types import CibNvsetType
++
++
++@dataclass(frozen=True)
++class CibNvpairDto(DataTransferObject):
++    id: str  # pylint: disable=invalid-name
++    name: str
++    value: str
++
++
++@dataclass(frozen=True)
++class CibNvsetDto(DataTransferObject):
++    id: str  # pylint: disable=invalid-name
++    type: CibNvsetType
++    options: Mapping[str, str]
++    rule: Optional[CibRuleExpressionDto]
++    nvpairs: Sequence[CibNvpairDto]
+diff --git a/pcs/common/pacemaker/rule.py b/pcs/common/pacemaker/rule.py
+new file mode 100644
+index 00000000..306e65e6
+--- /dev/null
++++ b/pcs/common/pacemaker/rule.py
+@@ -0,0 +1,28 @@
++from dataclasses import dataclass
++from typing import (
++    Mapping,
++    Optional,
++    Sequence,
++)
++
++from pcs.common.interface.dto import DataTransferObject
++from pcs.common.types import CibRuleExpressionType
++
++
++@dataclass(frozen=True)
++class CibRuleDateCommonDto(DataTransferObject):
++    id: str  # pylint: disable=invalid-name
++    options: Mapping[str, str]
++
++
++@dataclass(frozen=True)
++class CibRuleExpressionDto(DataTransferObject):
++    # pylint: disable=too-many-instance-attributes
++    id: str  # pylint: disable=invalid-name
++    type: CibRuleExpressionType
++    is_expired: bool  # only valid for type==rule
++    options: Mapping[str, str]
++    date_spec: Optional[CibRuleDateCommonDto]
++    duration: Optional[CibRuleDateCommonDto]
++    expressions: Sequence["CibRuleExpressionDto"]
++    as_string: str
+diff --git a/pcs/common/reports/codes.py b/pcs/common/reports/codes.py
+index 26eb8b51..8bcabfab 100644
+--- a/pcs/common/reports/codes.py
++++ b/pcs/common/reports/codes.py
+@@ -123,6 +123,7 @@ CIB_LOAD_ERROR = M("CIB_LOAD_ERROR")
+ CIB_LOAD_ERROR_GET_NODES_FOR_VALIDATION = M(
+     "CIB_LOAD_ERROR_GET_NODES_FOR_VALIDATION"
+ )
++CIB_NVSET_AMBIGUOUS_PROVIDE_NVSET_ID = M("CIB_NVSET_AMBIGUOUS_PROVIDE_NVSET_ID")
+ CIB_LOAD_ERROR_SCOPE_MISSING = M("CIB_LOAD_ERROR_SCOPE_MISSING")
+ CIB_PUSH_FORCED_FULL_DUE_TO_CRM_FEATURE_SET = M(
+     "CIB_PUSH_FORCED_FULL_DUE_TO_CRM_FEATURE_SET"
+@@ -405,6 +406,8 @@ RESOURCE_UNMOVE_UNBAN_PCMK_SUCCESS = M("RESOURCE_UNMOVE_UNBAN_PCMK_SUCCESS")
+ RESOURCE_UNMOVE_UNBAN_PCMK_EXPIRED_NOT_SUPPORTED = M(
+     "RESOURCE_UNMOVE_UNBAN_PCMK_EXPIRED_NOT_SUPPORTED"
+ )
++RULE_EXPRESSION_PARSE_ERROR = M("RULE_EXPRESSION_PARSE_ERROR")
++RULE_EXPRESSION_NOT_ALLOWED = M("RULE_EXPRESSION_NOT_ALLOWED")
+ RUN_EXTERNAL_PROCESS_ERROR = M("RUN_EXTERNAL_PROCESS_ERROR")
+ RUN_EXTERNAL_PROCESS_FINISHED = M("RUN_EXTERNAL_PROCESS_FINISHED")
+ RUN_EXTERNAL_PROCESS_STARTED = M("RUN_EXTERNAL_PROCESS_STARTED")
+diff --git a/pcs/common/reports/const.py b/pcs/common/reports/const.py
+index aeb593ee..fa2122d0 100644
+--- a/pcs/common/reports/const.py
++++ b/pcs/common/reports/const.py
+@@ -1,9 +1,15 @@
+ from .types import (
+     DefaultAddressSource,
++    PcsCommand,
+     ReasonType,
+     ServiceAction,
+ )
+ 
++PCS_COMMAND_OPERATION_DEFAULTS_UPDATE = PcsCommand(
++    "resource op defaults update"
++)
++PCS_COMMAND_RESOURCE_DEFAULTS_UPDATE = PcsCommand("resource defaults update")
++
+ SERVICE_ACTION_START = ServiceAction("START")
+ SERVICE_ACTION_STOP = ServiceAction("STOP")
+ SERVICE_ACTION_ENABLE = ServiceAction("ENABLE")
+diff --git a/pcs/common/reports/messages.py b/pcs/common/reports/messages.py
+index 540e8c69..f04d8632 100644
+--- a/pcs/common/reports/messages.py
++++ b/pcs/common/reports/messages.py
+@@ -27,6 +27,7 @@ from pcs.common.str_tools import (
+     indent,
+     is_iterable_not_str,
+ )
++from pcs.common.types import CibRuleExpressionType
+ 
+ from . import (
+     codes,
+@@ -120,6 +121,7 @@ _type_articles = {
+     "ACL user": "an",
+     "ACL role": "an",
+     "ACL permission": "an",
++    "options set": "an",
+ }
+ 
+ 
+@@ -6399,3 +6401,74 @@ class TagIdsNotInTheTag(ReportItemMessage):
+             ids=format_plural(self.id_list, "id"),
+             id_list=format_list(self.id_list),
+         )
++
++
++@dataclass(frozen=True)
++class RuleExpressionParseError(ReportItemMessage):
++    """
++    Unable to parse pacemaker cib rule expression string
++
++    rule_string -- the whole rule expression string
++    reason -- error message from rule parser
++    rule_line -- part of rule_string - the line where the error occurred
++    line_number -- the line where parsing failed
++    column_number -- the column where parsing failed
++    position -- the start index where parsing failed
++    """
++
++    rule_string: str
++    reason: str
++    rule_line: str
++    line_number: int
++    column_number: int
++    position: int
++    _code = codes.RULE_EXPRESSION_PARSE_ERROR
++
++    @property
++    def message(self) -> str:
++        # Messages coming from the parser are not very useful and readable,
++        # they mostly contain one line grammar expression covering the whole
++        # rule. No user would be able to parse that. Therefore we omit the
++        # messages.
++        return (
++            f"'{self.rule_string}' is not a valid rule expression, parse error "
++            f"near or after line {self.line_number} column {self.column_number}"
++        )
++
++
++@dataclass(frozen=True)
++class RuleExpressionNotAllowed(ReportItemMessage):
++    """
++    Used rule expression is not allowed in current context
++
++    expression_type -- disallowed expression type
++    """
++
++    expression_type: CibRuleExpressionType
++    _code = codes.RULE_EXPRESSION_NOT_ALLOWED
++
++    @property
++    def message(self) -> str:
++        type_map = {
++            CibRuleExpressionType.OP_EXPRESSION: "op",
++            CibRuleExpressionType.RSC_EXPRESSION: "resource",
++        }
++        return (
++            f"Keyword '{type_map[self.expression_type]}' cannot be used "
++            "in a rule in this command"
++        )
++
++
++@dataclass(frozen=True)
++class CibNvsetAmbiguousProvideNvsetId(ReportItemMessage):
++    """
++    An old command supporting only one nvset have been used when several nvsets
++    exist. We require an nvset ID the command should work with to be specified.
++    """
++
++    pcs_command: types.PcsCommand
++    _code = codes.CIB_NVSET_AMBIGUOUS_PROVIDE_NVSET_ID
++
++    @property
++    def message(self) -> str:
++        return "Several options sets exist, please specify an option set ID"
+diff --git a/pcs/common/reports/types.py b/pcs/common/reports/types.py
+index 5973279e..541046ea 100644
+--- a/pcs/common/reports/types.py
++++ b/pcs/common/reports/types.py
+@@ -3,6 +3,7 @@ from typing import NewType
+ DefaultAddressSource = NewType("DefaultAddressSource", str)
+ ForceCode = NewType("ForceCode", str)
+ MessageCode = NewType("MessageCode", str)
++PcsCommand = NewType("PcsCommand", str)
+ ReasonType = NewType("ReasonType", str)
+ ServiceAction = NewType("ServiceAction", str)
+ SeverityLevel = NewType("SeverityLevel", str)
+diff --git a/pcs/common/str_tools.py b/pcs/common/str_tools.py
+index deb38799..80864b50 100644
+--- a/pcs/common/str_tools.py
++++ b/pcs/common/str_tools.py
+@@ -3,6 +3,8 @@ from typing import (
+     Any,
+     List,
+     Mapping,
++    Sequence,
++    Tuple,
+     TypeVar,
+ )
+ 
+@@ -49,6 +51,36 @@ def format_list_custom_last_separator(
+     )
+ 
+ 
++# For now, Tuple[str, str] is sufficient. Feel free to change it if needed,
++# e.g. when values can be integers.
++def format_name_value_list(item_list: Sequence[Tuple[str, str]]) -> List[str]:
++    """
++    Turn 2-tuples to 'name=value' strings with standard quoting
++    """
++    output = []
++    for name, value in item_list:
++        name = quote(name, "= ")
++        value = quote(value, "= ")
++        output.append(f"{name}={value}")
++    return output
++
++
++def quote(string: str, chars_to_quote: str) -> str:
++    """
++    Quote a string if it contains specified characters
++
++    string -- the string to be processed
++    chars_to_quote -- the characters causing quoting
++    """
++    if not frozenset(chars_to_quote) & frozenset(string):
++        return string
++    if '"' not in string:
++        return f'"{string}"'
++    if "'" not in string:
++        return f"'{string}'"
++    return '"{string}"'.format(string=string.replace('"', '\\"'))
++
++
+ def join_multilines(strings):
+     return "\n".join([a.strip() for a in strings if a.strip()])
+ 
+diff --git a/pcs/common/types.py b/pcs/common/types.py
+index dace6f6d..0b656cc0 100644
+--- a/pcs/common/types.py
++++ b/pcs/common/types.py
+@@ -3,6 +3,19 @@ from enum import auto
+ from pcs.common.tools import AutoNameEnum
+ 
+ 
++class CibNvsetType(AutoNameEnum):
++    INSTANCE = auto()
++    META = auto()
++
++
++class CibRuleExpressionType(AutoNameEnum):
++    RULE = auto()
++    EXPRESSION = auto()
++    DATE_EXPRESSION = auto()
++    OP_EXPRESSION = auto()
++    RSC_EXPRESSION = auto()
++
++
+ class ResourceRelationType(AutoNameEnum):
+     ORDER = auto()
+     ORDER_SET = auto()
+diff --git a/pcs/config.py b/pcs/config.py
+index 058ec55a..67aa6e0e 100644
+--- a/pcs/config.py
++++ b/pcs/config.py
+@@ -48,6 +48,7 @@ from pcs import (
+ from pcs.cli.common import middleware
+ from pcs.cli.common.errors import CmdLineInputError
+ from pcs.cli.constraint import command as constraint_command
++from pcs.cli.nvset import nvset_dto_list_to_lines
+ from pcs.cli.reports import process_library_reports
+ from pcs.common.reports import constraints as constraints_reports
+ from pcs.common.str_tools import indent
+@@ -96,7 +97,8 @@ def _config_show_cib_lines(lib):
+     Commandline options:
+       * -f - CIB file
+     """
+-    # update of pcs_options will change output of constraint show
++    # update of pcs_options will change output of constraint show and
++    # displaying resources and operations defaults
+     utils.pcs_options["--full"] = 1
+     # get latest modifiers object after updating pcs_options
+     modifiers = utils.get_input_modifiers()
+@@ -172,11 +174,23 @@ def _config_show_cib_lines(lib):
+     all_lines.append("")
+     all_lines.append("Resources Defaults:")
+     all_lines.extend(
+-        indent(resource.show_defaults(cib_dom, "rsc_defaults"), indent_step=1)
++        indent(
++            nvset_dto_list_to_lines(
++                lib.cib_options.resource_defaults_config(),
++                with_ids=modifiers.get("--full"),
++                text_if_empty="No defaults set",
++            )
++        )
+     )
+     all_lines.append("Operations Defaults:")
+     all_lines.extend(
+-        indent(resource.show_defaults(cib_dom, "op_defaults"), indent_step=1)
++        indent(
++            nvset_dto_list_to_lines(
++                lib.cib_options.operation_defaults_config(),
++                with_ids=modifiers.get("--full"),
++                text_if_empty="No defaults set",
++            )
++        )
+     )
+ 
+     all_lines.append("")
+diff --git a/pcs/lib/cib/nvpair_multi.py b/pcs/lib/cib/nvpair_multi.py
+new file mode 100644
+index 00000000..7bdc2f55
+--- /dev/null
++++ b/pcs/lib/cib/nvpair_multi.py
+@@ -0,0 +1,323 @@
++from typing import (
++    cast,
++    Iterable,
++    List,
++    Mapping,
++    NewType,
++    Optional,
++    Tuple,
++)
++from xml.etree.ElementTree import Element
++
++from lxml import etree
++from lxml.etree import _Element
++
++from pcs.common import reports
++from pcs.common.pacemaker.nvset import (
++    CibNvpairDto,
++    CibNvsetDto,
++)
++from pcs.common.reports import ReportItemList
++from pcs.common.types import CibNvsetType
++from pcs.lib import validate
++from pcs.lib.cib.rule import (
++    RuleParseError,
++    RuleRoot,
++    RuleValidator,
++    parse_rule,
++    rule_element_to_dto,
++    rule_to_cib,
++)
++from pcs.lib.cib.tools import (
++    ElementSearcher,
++    IdProvider,
++    create_subelement_id,
++)
++from pcs.lib.xml_tools import (
++    export_attributes,
++    remove_one_element,
++)
++
++
++NvsetTag = NewType("NvsetTag", str)
++NVSET_INSTANCE = NvsetTag("instance_attributes")
++NVSET_META = NvsetTag("meta_attributes")
++
++_tag_to_type = {
++    str(NVSET_META): CibNvsetType.META,
++    str(NVSET_INSTANCE): CibNvsetType.INSTANCE,
++}
++
++
++def nvpair_element_to_dto(nvpair_el: Element) -> CibNvpairDto:
++    """
++    Export an nvpair xml element to its DTO
++    """
++    return CibNvpairDto(
++        nvpair_el.get("id", ""),
++        nvpair_el.get("name", ""),
++        nvpair_el.get("value", ""),
++    )
++
++
++def nvset_element_to_dto(nvset_el: Element) -> CibNvsetDto:
++    """
++    Export an nvset xml element to its DTO
++    """
++    rule_el = nvset_el.find("./rule")
++    return CibNvsetDto(
++        nvset_el.get("id", ""),
++        _tag_to_type[nvset_el.tag],
++        export_attributes(nvset_el, with_id=False),
++        None if rule_el is None else rule_element_to_dto(rule_el),
++        [
++            nvpair_element_to_dto(nvpair_el)
++            for nvpair_el in nvset_el.iterfind("./nvpair")
++        ],
++    )
++
++
++def find_nvsets(parent_element: Element) -> List[Element]:
++    """
++    Get all nvset xml elements in the given parent element
++
++    parent_element -- an element to look for nvsets in
++    """
++    return cast(
++        # The xpath method has a complicated return value, but we know our xpath
++        # expression returns only elements.
++        List[Element],
++        cast(_Element, parent_element).xpath(
++            "./*[{nvset_tags}]".format(
++                nvset_tags=" or ".join(f"self::{tag}" for tag in _tag_to_type)
++            )
++        ),
++    )
++
++
++def find_nvsets_by_ids(
++    parent_element: Element, id_list: Iterable[str]
++) -> Tuple[List[Element], ReportItemList]:
++    """
++    Find nvset elements by their IDs and return them with non-empty report
++    list in case of errors.
++
++    parent_element -- an element to look for nvsets in
++    id_list -- nvset IDs to be looked for
++    """
++    element_list = []
++    report_list: ReportItemList = []
++    for nvset_id in id_list:
++        searcher = ElementSearcher(
++            _tag_to_type.keys(),
++            nvset_id,
++            parent_element,
++            element_type_desc="options set",
++        )
++        if searcher.element_found():
++            element_list.append(searcher.get_element())
++        else:
++            report_list.extend(searcher.get_errors())
++    return element_list, report_list
++
++
++class ValidateNvsetAppendNew:
++    """
++    Validator for creating new nvset and appending it to CIB
++    """
++
++    def __init__(
++        self,
++        id_provider: IdProvider,
++        nvpair_dict: Mapping[str, str],
++        nvset_options: Mapping[str, str],
++        nvset_rule: Optional[str] = None,
++        rule_allows_rsc_expr: bool = False,
++        rule_allows_op_expr: bool = False,
++    ):
++        """
++        id_provider -- elements' ids generator
++        nvpair_dict -- nvpairs to be put into the new nvset
++        nvset_options -- additional attributes of the created nvset
++        nvset_rule -- optional rule describing when the created nvset applies
++        rule_allows_rsc_expr -- is rsc_expression element allowed in nvset_rule?
++        rule_allows_op_expr -- is op_expression element allowed in nvset_rule?
++        """
++        self._id_provider = id_provider
++        self._nvpair_dict = nvpair_dict
++        self._nvset_options = nvset_options
++        self._nvset_rule = nvset_rule
++        self._allow_rsc_expr = rule_allows_rsc_expr
++        self._allow_op_expr = rule_allows_op_expr
++        self._nvset_rule_parsed: Optional[RuleRoot] = None
++
++    def validate(self, force_options: bool = False) -> reports.ReportItemList:
++        report_list: reports.ReportItemList = []
++
++        # Nvpair dict is intentionally not validated: it may contain any keys
++        # and values. This can change in the future and then we add a
++        # validation. Until then there is really nothing to validate there.
++
++        # validate nvset options
++        validators = [
++            validate.NamesIn(
++                ("id", "score"),
++                **validate.set_warning(
++                    reports.codes.FORCE_OPTIONS, force_options
++                ),
++            ),
++            # with id_provider it validates that the id is available as well
++            validate.ValueId(
++                "id", option_name_for_report="id", id_provider=self._id_provider
++            ),
++            validate.ValueScore("score"),
++        ]
++        report_list.extend(
++            validate.ValidatorAll(validators).validate(self._nvset_options)
++        )
++
++        # parse and validate rule
++        # TODO write and call parsed rule validation and cleanup and tests
++        if self._nvset_rule:
++            try:
++                # Allow flags are set to True always, the parsed rule tree is
++                # checked in the validator instead. That gives us better error
++                # messages, such as "op expression cannot be used in this
++                # context" instead of a universal "parse error".
++                self._nvset_rule_parsed = parse_rule(
++                    self._nvset_rule, allow_rsc_expr=True, allow_op_expr=True
++                )
++                report_list.extend(
++                    RuleValidator(
++                        self._nvset_rule_parsed,
++                        allow_rsc_expr=self._allow_rsc_expr,
++                        allow_op_expr=self._allow_op_expr,
++                    ).get_reports()
++                )
++            except RuleParseError as e:
++                report_list.append(
++                    reports.ReportItem.error(
++                        reports.messages.RuleExpressionParseError(
++                            e.rule_string,
++                            e.msg,
++                            e.rule_line,
++                            e.lineno,
++                            e.colno,
++                            e.pos,
++                        )
++                    )
++                )
++
++        return report_list
++
++    def get_parsed_rule(self) -> Optional[RuleRoot]:
++        return self._nvset_rule_parsed
++
++
++def nvset_append_new(
++    parent_element: Element,
++    id_provider: IdProvider,
++    nvset_tag: NvsetTag,
++    nvpair_dict: Mapping[str, str],
++    nvset_options: Mapping[str, str],
++    nvset_rule: Optional[RuleRoot] = None,
++) -> Element:
++    """
++    Create new nvset and append it to CIB
++
++    parent_element -- the created nvset will be appended into this element
++    id_provider -- elements' ids generator
++    nvset_tag -- type and actual tag of the nvset
++    nvpair_dict -- nvpairs to be put into the new nvset
++    nvset_options -- additional attributes of the created nvset
++    nvset_rule -- optional rule describing when the created nvset applies
++    """
++    nvset_options = dict(nvset_options)  # make a copy which we can modify
++    if "id" not in nvset_options or not nvset_options["id"]:
++        nvset_options["id"] = create_subelement_id(
++            parent_element, nvset_tag, id_provider
++        )
++
++    nvset_el = etree.SubElement(cast(_Element, parent_element), nvset_tag)
++    for name, value in nvset_options.items():
++        if value != "":
++            nvset_el.attrib[name] = value
++    if nvset_rule:
++        rule_to_cib(cast(Element, nvset_el), id_provider, nvset_rule)
++    for name, value in nvpair_dict.items():
++        _set_nvpair(cast(Element, nvset_el), id_provider, name, value)
++    return cast(Element, nvset_el)
++
++
++def nvset_remove(nvset_el_list: Iterable[Element]) -> None:
++    """
++    Remove given nvset elements from CIB
++
++    nvset_el_list -- nvset elements to be removed
++    """
++    for nvset_el in nvset_el_list:
++        remove_one_element(nvset_el)
++
++
++def nvset_update(
++    nvset_el: Element, id_provider: IdProvider, nvpair_dict: Mapping[str, str],
++) -> None:
++    """
++    Update an existing nvset
++
++    nvset_el -- nvset to be updated
++    id_provider -- elements' ids generator
++    nvpair_dict -- nvpairs to be put into the nvset
++    """
++    # Do not ever remove the nvset element, even if it is empty. There may be
++    # ACLs set in pacemaker which allow "write" for nvpairs (adding, changing
++    # and removing) but not nvsets. In such a case, removing the nvset would
++    # cause the whole change to be rejected by pacemaker with a "permission
++    # denied" message.
++    # https://bugzilla.redhat.com/show_bug.cgi?id=1642514
++    for name, value in nvpair_dict.items():
++        _set_nvpair(nvset_el, id_provider, name, value)
++
++
++def _set_nvpair(
++    nvset_element: Element, id_provider: IdProvider, name: str, value: str
++):
++    """
++    Ensure name-value pair is set / removed in specified nvset
++
++    nvset_element -- container for nvpair elements to update
++    id_provider -- elements' ids generator
++    name -- name of the nvpair to be set
++    value -- value of the nvpair to be set, if "" the nvpair will be removed
++    """
++    nvpair_el_list = cast(
++        # The xpath method has a complicated return value, but we know our xpath
++        # expression returns only elements.
++        List[Element],
++        cast(_Element, nvset_element).xpath("./nvpair[@name=$name]", name=name),
++    )
++
++    if not nvpair_el_list:
++        if value != "":
++            etree.SubElement(
++                cast(_Element, nvset_element),
++                "nvpair",
++                {
++                    "id": create_subelement_id(
++                        nvset_element,
++                        # limit id length to prevent excessively long ids
++                        name[:20],
++                        id_provider,
++                    ),
++                    "name": name,
++                    "value": value,
++                },
++            )
++        return
++
++    if value != "":
++        nvpair_el_list[0].set("value", value)
++    else:
++        nvset_element.remove(nvpair_el_list[0])
++    for nvpair_el in nvpair_el_list[1:]:
++        nvset_element.remove(nvpair_el)
+diff --git a/pcs/lib/cib/rule/__init__.py b/pcs/lib/cib/rule/__init__.py
+new file mode 100644
+index 00000000..94228572
+--- /dev/null
++++ b/pcs/lib/cib/rule/__init__.py
+@@ -0,0 +1,8 @@
++from .cib_to_dto import rule_element_to_dto
++from .expression_part import BoolExpr as RuleRoot
++from .parser import (
++    parse_rule,
++    RuleParseError,
++)
++from .parsed_to_cib import export as rule_to_cib
++from .validator import Validator as RuleValidator
+diff --git a/pcs/lib/cib/rule/cib_to_dto.py b/pcs/lib/cib/rule/cib_to_dto.py
+new file mode 100644
+index 00000000..d8198e0c
+--- /dev/null
++++ b/pcs/lib/cib/rule/cib_to_dto.py
+@@ -0,0 +1,185 @@
++from typing import cast
++from xml.etree.ElementTree import Element
++
++from lxml.etree import _Element
++
++from pcs.common.pacemaker.rule import (
++    CibRuleDateCommonDto,
++    CibRuleExpressionDto,
++)
++from pcs.common.str_tools import (
++    format_name_value_list,
++    quote,
++)
++from pcs.common.types import CibRuleExpressionType
++from pcs.lib.xml_tools import export_attributes
++
++
++def rule_element_to_dto(rule_el: Element) -> CibRuleExpressionDto:
++    """
++    Export a rule xml element including its children to their DTOs
++    """
++    return _tag_to_export[rule_el.tag](rule_el)
++
++
++def _attrs_to_str(el: Element) -> str:
++    return " ".join(
++        format_name_value_list(
++            sorted(export_attributes(el, with_id=False).items())
++        )
++    )
++
++
++def _rule_to_dto(rule_el: Element) -> CibRuleExpressionDto:
++    children_dto_list = [
++        _tag_to_export[child.tag](child)
++        # The xpath method has a complicated return value, but we know our xpath
++        # expression only returns elements.
++        for child in cast(
++            Element, cast(_Element, rule_el).xpath(_xpath_for_export)
++        )
++    ]
++    # "and" is a documented pacemaker default
++    # https://clusterlabs.org/pacemaker/doc/en-US/Pacemaker/2.0/html-single/Pacemaker_Explained/index.html#_rule_properties
++    boolean_op = rule_el.get("boolean-op", "and")
++    string_parts = []
++    for child_dto in children_dto_list:
++        if child_dto.type == CibRuleExpressionType.RULE:
++            string_parts.append(f"({child_dto.as_string})")
++        else:
++            string_parts.append(child_dto.as_string)
++    return CibRuleExpressionDto(
++        rule_el.get("id", ""),
++        _tag_to_type[rule_el.tag],
++        False,  # TODO implement is_expired
++        export_attributes(rule_el, with_id=False),
++        None,
++        None,
++        children_dto_list,
++        f" {boolean_op} ".join(string_parts),
++    )
++
++
++def _common_expr_to_dto(
++    expr_el: Element, as_string: str
++) -> CibRuleExpressionDto:
++    return CibRuleExpressionDto(
++        expr_el.get("id", ""),
++        _tag_to_type[expr_el.tag],
++        False,
++        export_attributes(expr_el, with_id=False),
++        None,
++        None,
++        [],
++        as_string,
++    )
++
++
++def _simple_expr_to_dto(expr_el: Element) -> CibRuleExpressionDto:
++    string_parts = []
++    if "value" in expr_el.attrib:
++        # "attribute" and "operation" are defined as mandatory in CIB schema
++        string_parts.extend(
++            [expr_el.get("attribute", ""), expr_el.get("operation", "")]
++        )
++        if "type" in expr_el.attrib:
++            string_parts.append(expr_el.get("type", ""))
++        string_parts.append(quote(expr_el.get("value", ""), " "))
++    else:
++        # "attribute" and "operation" are defined as mandatory in CIB schema
++        string_parts.extend(
++            [expr_el.get("operation", ""), expr_el.get("attribute", "")]
++        )
++    return _common_expr_to_dto(expr_el, " ".join(string_parts))
++
++
++def _date_common_to_dto(expr_el: Element) -> CibRuleDateCommonDto:
++    return CibRuleDateCommonDto(
++        expr_el.get("id", ""), export_attributes(expr_el, with_id=False),
++    )
++
++
++def _date_expr_to_dto(expr_el: Element) -> CibRuleExpressionDto:
++    date_spec = expr_el.find("./date_spec")
++    duration = expr_el.find("./duration")
++
++    string_parts = []
++    # "operation" is defined as mandatory in CIB schema
++    operation = expr_el.get("operation", "")
++    if operation == "date_spec":
++        string_parts.append("date-spec")
++        if date_spec is not None:
++            string_parts.append(_attrs_to_str(date_spec))
++    elif operation == "in_range":
++        string_parts.extend(["date", "in_range"])
++        # CIB schema allows "start" + "duration" or optional "start" + "end"
++        if "start" in expr_el.attrib:
++            string_parts.extend([expr_el.get("start", ""), "to"])
++        if "end" in expr_el.attrib:
++            string_parts.append(expr_el.get("end", ""))
++        if duration is not None:
++            string_parts.append("duration")
++            string_parts.append(_attrs_to_str(duration))
++    else:
++        # CIB schema allows operation=="gt" + "start" or operation=="lt" + "end"
++        string_parts.extend(["date", expr_el.get("operation", "")])
++        if "start" in expr_el.attrib:
++            string_parts.append(expr_el.get("start", ""))
++        if "end" in expr_el.attrib:
++            string_parts.append(expr_el.get("end", ""))
++
++    return CibRuleExpressionDto(
++        expr_el.get("id", ""),
++        _tag_to_type[expr_el.tag],
++        False,
++        export_attributes(expr_el, with_id=False),
++        None if date_spec is None else _date_common_to_dto(date_spec),
++        None if duration is None else _date_common_to_dto(duration),
++        [],
++        " ".join(string_parts),
++    )
++
++
++def _op_expr_to_dto(expr_el: Element) -> CibRuleExpressionDto:
++    string_parts = ["op"]
++    string_parts.append(expr_el.get("name", ""))
++    if "interval" in expr_el.attrib:
++        string_parts.append(
++            "interval={interval}".format(interval=expr_el.get("interval", ""))
++        )
++    return _common_expr_to_dto(expr_el, " ".join(string_parts))
++
++
++def _rsc_expr_to_dto(expr_el: Element) -> CibRuleExpressionDto:
++    return _common_expr_to_dto(
++        expr_el,
++        (
++            "resource "
++            + ":".join(
++                [
++                    expr_el.get(attr, "")
++                    for attr in ["class", "provider", "type"]
++                ]
++            )
++        ),
++    )
++
++
++_tag_to_type = {
++    "rule": CibRuleExpressionType.RULE,
++    "expression": CibRuleExpressionType.EXPRESSION,
++    "date_expression": CibRuleExpressionType.DATE_EXPRESSION,
++    "op_expression": CibRuleExpressionType.OP_EXPRESSION,
++    "rsc_expression": CibRuleExpressionType.RSC_EXPRESSION,
++}
++
++_tag_to_export = {
++    "rule": _rule_to_dto,
++    "expression": _simple_expr_to_dto,
++    "date_expression": _date_expr_to_dto,
++    "op_expression": _op_expr_to_dto,
++    "rsc_expression": _rsc_expr_to_dto,
++}
++_xpath_for_export = "./*[{export_tags}]".format(
++    export_tags=" or ".join(f"self::{tag}" for tag in _tag_to_export)
++)
+diff --git a/pcs/lib/cib/rule/expression_part.py b/pcs/lib/cib/rule/expression_part.py
+new file mode 100644
+index 00000000..3ba63aa2
+--- /dev/null
++++ b/pcs/lib/cib/rule/expression_part.py
+@@ -0,0 +1,49 @@
++"""
++Provides classes used as nodes of a semantic tree of a parsed rule expression.
++"""
++from dataclasses import dataclass
++from typing import (
++    NewType,
++    Optional,
++    Sequence,
++)
++
++
++class RuleExprPart:
++    pass
++
++
++BoolOperator = NewType("BoolOperator", str)
++BOOL_AND = BoolOperator("AND")
++BOOL_OR = BoolOperator("OR")
++
++
++@dataclass(frozen=True)
++class BoolExpr(RuleExprPart):
++    """
++    Represents a rule combining RuleExprPart objects by AND or OR operation.
++    """
++
++    operator: BoolOperator
++    children: Sequence[RuleExprPart]
++
++
++@dataclass(frozen=True)
++class RscExpr(RuleExprPart):
++    """
++    Represents a resource expression in a rule.
++    """
++
++    standard: Optional[str]
++    provider: Optional[str]
++    type: Optional[str]
++
++
++@dataclass(frozen=True)
++class OpExpr(RuleExprPart):
++    """
++    Represents an op expression in a rule.
++    """
++
++    name: str
++    interval: Optional[str]
+diff --git a/pcs/lib/cib/rule/parsed_to_cib.py b/pcs/lib/cib/rule/parsed_to_cib.py
+new file mode 100644
+index 00000000..0fcae4f1
+--- /dev/null
++++ b/pcs/lib/cib/rule/parsed_to_cib.py
+@@ -0,0 +1,103 @@
++from typing import cast
++from xml.etree.ElementTree import Element
++
++from lxml import etree
++from lxml.etree import _Element
++
++from pcs.lib.cib.tools import (
++    IdProvider,
++    create_subelement_id,
++)
++
++from .expression_part import (
++    BoolExpr,
++    OpExpr,
++    RscExpr,
++    RuleExprPart,
++)
++
++
++def export(
++    parent_el: Element, id_provider: IdProvider, expr_tree: BoolExpr,
++) -> Element:
++    """
++    Export parsed rule to a CIB element
++
++    parent_el -- element to place the rule into
++    id_provider -- elements' ids generator
++    expr_tree -- parsed rule tree root
++    """
++    element = __export_part(parent_el, expr_tree, id_provider)
++    # Add score only to the top level rule element (which is represented by
++    # BoolExpr class). This is achieved by this function not being called for
++    # child nodes.
++    # TODO This was implemented originaly only for rules in resource and
++    # operation defaults. In those cases, score is the only rule attribute and
++    # it is always INFINITY. Once this code is used for other rules, modify
++    # this behavior as needed.
++    if isinstance(expr_tree, BoolExpr):
++        element.set("score", "INFINITY")
++    return element
++
++
++def __export_part(
++    parent_el: Element, expr_tree: RuleExprPart, id_provider: IdProvider
++) -> Element:
++    part_export_map = {
++        BoolExpr: __export_bool,
++        OpExpr: __export_op,
++        RscExpr: __export_rsc,
++    }
++    func = part_export_map[type(expr_tree)]
++    # mypy doesn't handle this dynamic call
++    return func(parent_el, expr_tree, id_provider)  # type: ignore
++
++
++def __export_bool(
++    parent_el: Element, boolean: BoolExpr, id_provider: IdProvider
++) -> Element:
++    element = etree.SubElement(
++        cast(_Element, parent_el),
++        "rule",
++        {
++            "id": create_subelement_id(parent_el, "rule", id_provider),
++            "boolean-op": boolean.operator.lower(),
++        },
++    )
++    for child in boolean.children:
++        __export_part(cast(Element, element), child, id_provider)
++    return cast(Element, element)
++
++
++def __export_op(
++    parent_el: Element, op: OpExpr, id_provider: IdProvider
++) -> Element:
++    element = etree.SubElement(
++        cast(_Element, parent_el),
++        "op_expression",
++        {
++            "id": create_subelement_id(parent_el, f"op-{op.name}", id_provider),
++            "name": op.name,
++        },
++    )
++    if op.interval:
++        element.attrib["interval"] = op.interval
++    return cast(Element, element)
++
++
++def __export_rsc(
++    parent_el: Element, rsc: RscExpr, id_provider: IdProvider
++) -> Element:
++    id_part = "-".join(filter(None, [rsc.standard, rsc.provider, rsc.type]))
++    element = etree.SubElement(
++        cast(_Element, parent_el),
++        "rsc_expression",
++        {"id": create_subelement_id(parent_el, f"rsc-{id_part}", id_provider)},
++    )
++    if rsc.standard:
++        element.attrib["class"] = rsc.standard
++    if rsc.provider:
++        element.attrib["provider"] = rsc.provider
++    if rsc.type:
++        element.attrib["type"] = rsc.type
++    return cast(Element, element)
+diff --git a/pcs/lib/cib/rule/parser.py b/pcs/lib/cib/rule/parser.py
+new file mode 100644
+index 00000000..2215c524
+--- /dev/null
++++ b/pcs/lib/cib/rule/parser.py
+@@ -0,0 +1,232 @@
++from typing import (
++    Any,
++    Iterator,
++    Optional,
++    Tuple,
++)
++
++import pyparsing
++
++from .expression_part import (
++    BOOL_AND,
++    BOOL_OR,
++    BoolExpr,
++    OpExpr,
++    RscExpr,
++    RuleExprPart,
++)
++
++pyparsing.ParserElement.enablePackrat()
++
++
++class RuleParseError(Exception):
++    def __init__(
++        self,
++        rule_string: str,
++        rule_line: str,
++        lineno: int,
++        colno: int,
++        pos: int,
++        msg: str,
++    ):
++        super().__init__()
++        self.rule_string = rule_string
++        self.rule_line = rule_line
++        self.lineno = lineno
++        self.colno = colno
++        self.pos = pos
++        self.msg = msg
++
++
++def parse_rule(
++    rule_string: str, allow_rsc_expr: bool = False, allow_op_expr: bool = False
++) -> BoolExpr:
++    """
++    Parse a rule string and return a corresponding semantic tree
++
++    rule_string -- the whole rule expression
++    allow_rsc_expr -- allow resource expressions in the rule
++    allow_op_expr -- allow resource operation expressions in the rule
++    """
++    if not rule_string:
++        return BoolExpr(BOOL_AND, [])
++
++    try:
++        parsed = __get_rule_parser(
++            allow_rsc_expr=allow_rsc_expr, allow_op_expr=allow_op_expr
++        ).parseString(rule_string, parseAll=True)[0]
++    except pyparsing.ParseException as e:
++        raise RuleParseError(
++            rule_string, e.line, e.lineno, e.col, e.loc, e.args[2],
++        )
++
++    if not isinstance(parsed, BoolExpr):
++        # If we only got a representation on an inner rule element instead of a
++        # rule element itself, wrap the result in a default AND-rule. (There is
++        # only one expression so "and" vs. "or" doesn't really matter.)
++        parsed = BoolExpr(BOOL_AND, [parsed])
++
++    return parsed
++
++
++def __operator_operands(
++    token_list: pyparsing.ParseResults,
++) -> Iterator[Tuple[Any, Any]]:
++    # See pyparsing examples
++    # https://github.com/pyparsing/pyparsing/blob/master/examples/eval_arith.py
++    token_iterator = iter(token_list)
++    while True:
++        try:
++            yield (next(token_iterator), next(token_iterator))
++        except StopIteration:
++            break
++
++
++def __build_bool_tree(token_list: pyparsing.ParseResults) -> RuleExprPart:
++    # See pyparsing examples
++    # https://github.com/pyparsing/pyparsing/blob/master/examples/eval_arith.py
++    token_to_operator = {
++        "and": BOOL_AND,
++        "or": BOOL_OR,
++    }
++    operand_left = token_list[0][0]
++    last_operator: Optional[str] = None
++    operand_list = []
++    for operator, operand_right in __operator_operands(token_list[0][1:]):
++        # In each iteration, we get a bool_op ("and" or "or") and the right
++        # operand.
++        if last_operator == operator or last_operator is None:
++            # If we got the same operator as last time (or this is the first
++            # one), stack all the operads so we can put them all into one
++            # BoolExpr class.
++            operand_list.append(operand_right)
++        else:
++            # The operator has changed. Put all the stacked operands into the
++            # correct BoolExpr class and start the stacking again. The created
++            # class is the left operand of the current operator.
++            operand_left = BoolExpr(
++                token_to_operator[last_operator], [operand_left] + operand_list
++            )
++            operand_list = [operand_right]
++        last_operator = operator
++    if operand_list and last_operator:
++        # Use any of the remaining stacked operands.
++        operand_left = BoolExpr(
++            token_to_operator[last_operator], [operand_left] + operand_list
++        )
++    return operand_left
++
++
++def __build_op_expr(parse_result: pyparsing.ParseResults) -> RuleExprPart:
++    # Those attr are defined by setResultsName in op_expr grammar rule
++    return OpExpr(
++        parse_result.name,
++        # pyparsing-2.1.0 puts "interval_value" into parse_result.interval as
++        # defined in the grammar AND it also puts "interval_value" into
++        # parse_result. pyparsing-2.4.0 only puts "interval_value" into
++        # parse_result. Not sure why, maybe it's a bug, maybe it's intentional.
++        parse_result.interval_value if parse_result.interval_value else None,
++    )
++
++
++def __build_rsc_expr(parse_result: pyparsing.ParseResults) -> RuleExprPart:
++    # Those attrs are defined by the regexp in rsc_expr grammar rule
++    return RscExpr(
++        parse_result.standard, parse_result.provider, parse_result.type
++    )
++
++
++def __get_rule_parser(
++    allow_rsc_expr: bool = False, allow_op_expr: bool = False
++) -> pyparsing.ParserElement:
++    # This function defines the rule grammar
++
++    # It was created for 'pcs resource [op] defaults' commands to be able to
++    # set defaults for specified resources and/or operation using rules. When
++    # implementing that feature, there was no time to reimplement all the other
++    # rule expressions from old code. The plan is to move old rule parser code
++    # here once there is time / need to do it.
++    # How to add other rule expressions:
++    #   1 Create new grammar rules in a way similar to existing rsc_expr and
++    #     op_expr. Use setName for better description of a grammar when printed.
++    #     Use setResultsName for an easy access to parsed parts.
++    #   2 Create new classes in expression_part module, probably one for each
++    #     type of expression. Those are data containers holding the parsed data
++    #     independent of the parser.
++    #   3 Create builders for the new classes and connect them to created
++    #     grammar rules using setParseAction.
++    #   4 Add the new expressions into simple_expr_list.
++    #   5 Test and debug the whole thing.
++
++    rsc_expr = pyparsing.And(
++        [
++            pyparsing.CaselessKeyword("resource"),
++            # resource name
++            # Up to three parts seperated by ":". The parts can contain any
++            # characters except whitespace (token separator), ":" (parts
++            # separator) and "()" (brackets).
++            pyparsing.Regex(
++                r"(?P<standard>[^\s:()]+)?:(?P<provider>[^\s:()]+)?:(?P<type>[^\s:()]+)?"
++            ).setName("<resource name>"),
++        ]
++    )
++    rsc_expr.setParseAction(__build_rsc_expr)
++
++    op_interval = pyparsing.And(
++        [
++            pyparsing.CaselessKeyword("interval"),
++            # no spaces allowed around the "="
++            pyparsing.Literal("=").leaveWhitespace(),
++            # interval value: number followed by a time unit, no spaces allowed
++            # between the number and the unit thanks to Combine being used
++            pyparsing.Combine(
++                pyparsing.And(
++                    [
++                        pyparsing.Word(pyparsing.nums),
++                        pyparsing.Optional(pyparsing.Word(pyparsing.alphas)),
++                    ]
++                )
++            )
++            .setName("<integer>[<time unit>]")
++            .setResultsName("interval_value"),
++        ]
++    )
++    op_expr = pyparsing.And(
++        [
++            pyparsing.CaselessKeyword("op"),
++            # operation name
++            # It can by any string containing any characters except whitespace
++            # (token separator) and "()" (brackets). Operations are defined in
++            # agents' metadata which we do not have access to (e.g. when the
++            # user sets operation "my_check" and doesn't even specify agent's
++            # name).
++            pyparsing.Regex(r"[^\s()]+")
++            .setName("<operation name>")
++            .setResultsName("name"),
++            pyparsing.Optional(op_interval).setResultsName("interval"),
++        ]
++    )
++    op_expr.setParseAction(__build_op_expr)
++
++    simple_expr_list = []
++    if allow_rsc_expr:
++        simple_expr_list.append(rsc_expr)
++    if allow_op_expr:
++        simple_expr_list.append(op_expr)
++    simple_expr = pyparsing.Or(simple_expr_list)
++
++    # See pyparsing examples
++    # https://github.com/pyparsing/pyparsing/blob/master/examples/simpleBool.py
++    # https://github.com/pyparsing/pyparsing/blob/master/examples/eval_arith.py
++    bool_operator = pyparsing.Or(
++        [pyparsing.CaselessKeyword("and"), pyparsing.CaselessKeyword("or")]
++    )
++    bool_expr = pyparsing.infixNotation(
++        simple_expr,
++        # By putting both "and" and "or" in one tuple we say they have the same
++        # priority. This is consistent with legacy pcs parsers. And it is how
++        # it should be, they work as a glue between "simple_expr"s.
++        [(bool_operator, 2, pyparsing.opAssoc.LEFT, __build_bool_tree)],
++    )
++
++    return pyparsing.Or([bool_expr, simple_expr])
+diff --git a/pcs/lib/cib/rule/validator.py b/pcs/lib/cib/rule/validator.py
+new file mode 100644
+index 00000000..c733ad96
+--- /dev/null
++++ b/pcs/lib/cib/rule/validator.py
+@@ -0,0 +1,62 @@
++from typing import Set
++
++from pcs.common import reports
++from pcs.common.types import CibRuleExpressionType
++
++from .expression_part import (
++    BoolExpr,
++    OpExpr,
++    RscExpr,
++)
++
++
++class Validator:
++    # TODO For now we only check allowed expressions. Other checks and
++    # validations can be added if needed.
++    def __init__(
++        self,
++        parsed_rule: BoolExpr,
++        allow_rsc_expr: bool = False,
++        allow_op_expr: bool = False,
++    ):
++        """
++        parsed_rule -- a rule to be validated
++        allow_op_expr -- are op expressions allowed in the rule?
++        allow_rsc_expr -- are resource expressions allowed in the rule?
++        """
++        self._rule = parsed_rule
++        self._allow_op_expr = allow_op_expr
++        self._allow_rsc_expr = allow_rsc_expr
++        self._disallowed_expr_list: Set[CibRuleExpressionType] = set()
++
++        self._method_map = {
++            BoolExpr: self._validate_bool_expr,
++            OpExpr: self._validate_op_expr,
++            RscExpr: self._validate_rsc_expr,
++        }
++
++    def get_reports(self) -> reports.ReportItemList:
++        self._method_map[type(self._rule)](self._rule)
++        report_list = []
++        for expr_type in self._disallowed_expr_list:
++            report_list.append(
++                reports.ReportItem.error(
++                    reports.messages.RuleExpressionNotAllowed(expr_type)
++                )
++            )
++        return report_list
++
++    def _validate_bool_expr(self, expr: BoolExpr):
++        for child in expr.children:
++            if type(child) in self._method_map:
++                self._method_map[type(child)](child)
++
++    def _validate_op_expr(self, expr):
++        del expr
++        if not self._allow_op_expr:
++            self._disallowed_expr_list.add(CibRuleExpressionType.OP_EXPRESSION)
++
++    def _validate_rsc_expr(self, expr):
++        del expr
++        if not self._allow_rsc_expr:
++            self._disallowed_expr_list.add(CibRuleExpressionType.RSC_EXPRESSION)
+diff --git a/pcs/lib/cib/tools.py b/pcs/lib/cib/tools.py
+index 920b7442..cfc5ba59 100644
+--- a/pcs/lib/cib/tools.py
++++ b/pcs/lib/cib/tools.py
+@@ -28,7 +28,7 @@ class IdProvider:
+         self._cib = get_root(cib_element)
+         self._booked_ids = set()
+ 
+-    def allocate_id(self, proposed_id):
++    def allocate_id(self, proposed_id: str) -> str:
+         """
+         Generate a new unique id based on the proposal and keep track of it
+         string proposed_id -- requested id
+@@ -294,9 +294,11 @@ def find_element_by_tag_and_id(
+     return None
+ 
+ 
+-def create_subelement_id(context_element, suffix, id_provider):
++def create_subelement_id(
++    context_element: Element, suffix: str, id_provider: IdProvider
++) -> str:
+     proposed_id = sanitize_id(
+-        "{0}-{1}".format(context_element.get("id"), suffix)
++        "{0}-{1}".format(context_element.get("id", context_element.tag), suffix)
+     )
+     return id_provider.allocate_id(proposed_id)
+ 
+diff --git a/pcs/lib/commands/cib_options.py b/pcs/lib/commands/cib_options.py
+index 713644ca..368ce409 100644
+--- a/pcs/lib/commands/cib_options.py
++++ b/pcs/lib/commands/cib_options.py
+@@ -1,54 +1,312 @@
+-from functools import partial
++from typing import (
++    Any,
++    Container,
++    Iterable,
++    List,
++    Mapping,
++    Optional,
++)
+ 
+ from pcs.common import reports
++from pcs.common.pacemaker.nvset import CibNvsetDto
+ from pcs.common.reports.item import ReportItem
+-from pcs.lib.cib import sections
+-from pcs.lib.cib.nvpair import arrange_first_meta_attributes
++from pcs.common.tools import Version
++from pcs.lib.cib import (
++    nvpair_multi,
++    sections,
++)
+ from pcs.lib.cib.tools import IdProvider
+ from pcs.lib.env import LibraryEnvironment
++from pcs.lib.errors import LibraryError
+ 
+ 
+-def _set_any_defaults(section_name, env: LibraryEnvironment, options):
++def resource_defaults_create(
++    env: LibraryEnvironment,
++    nvpairs: Mapping[str, str],
++    nvset_options: Mapping[str, str],
++    nvset_rule: Optional[str] = None,
++    force_flags: Optional[Container] = None,
++) -> None:
+     """
+-    string section_name -- determine the section of defaults
+-    env -- provides access to outside environment
+-    dict options -- are desired options with its values; when value is empty the
+-        option have to be removed
++    Create new resource defaults nvset
++
++    env --
++    nvpairs -- name-value pairs to be put into the new nvset
++    nvset_options -- additional attributes of the created nvset
++    nvset_rule -- optional rule describing when the created nvset applies
++    force_flags -- list of flags codes
++    """
++    return _defaults_create(
++        env,
++        sections.RSC_DEFAULTS,
++        dict(rule_allows_rsc_expr=True, rule_allows_op_expr=False),
++        nvpairs,
++        nvset_options,
++        nvset_rule=nvset_rule,
++        force_flags=force_flags,
++    )
++
++
++def operation_defaults_create(
++    env: LibraryEnvironment,
++    nvpairs: Mapping[str, str],
++    nvset_options: Mapping[str, str],
++    nvset_rule: Optional[str] = None,
++    force_flags: Optional[Container] = None,
++) -> None:
++    """
++    Create new operation defaults nvset
++
++    env --
++    nvpairs -- name-value pairs to be put into the new nvset
++    nvset_options -- additional attributes of the created nvset
++    nvset_rule -- optional rule describing when the created nvset applies
++    force_flags -- list of flags codes
+     """
+-    # Do not ever remove the nvset element, even if it is empty. There may be
+-    # ACLs set in pacemaker which allow "write" for nvpairs (adding, changing
+-    # and removing) but not nvsets. In such a case, removing the nvset would
+-    # cause the whole change to be rejected by pacemaker with a "permission
+-    # denied" message.
+-    # https://bugzilla.redhat.com/show_bug.cgi?id=1642514
++    return _defaults_create(
++        env,
++        sections.OP_DEFAULTS,
++        dict(rule_allows_rsc_expr=True, rule_allows_op_expr=True),
++        nvpairs,
++        nvset_options,
++        nvset_rule=nvset_rule,
++        force_flags=force_flags,
++    )
++
++
++def _defaults_create(
++    env: LibraryEnvironment,
++    cib_section_name: str,
++    validator_options: Mapping[str, Any],
++    nvpairs: Mapping[str, str],
++    nvset_options: Mapping[str, str],
++    nvset_rule: Optional[str] = None,
++    force_flags: Optional[Container] = None,
++) -> None:
++    if force_flags is None:
++        force_flags = set()
++    force = (reports.codes.FORCE in force_flags) or (
++        reports.codes.FORCE_OPTIONS in force_flags
++    )
++
++    required_cib_version = None
++    if nvset_rule:
++        required_cib_version = Version(3, 4, 0)
++    cib = env.get_cib(required_cib_version)
++    id_provider = IdProvider(cib)
++
++    validator = nvpair_multi.ValidateNvsetAppendNew(
++        id_provider,
++        nvpairs,
++        nvset_options,
++        nvset_rule=nvset_rule,
++        **validator_options,
++    )
++    if env.report_processor.report_list(
++        validator.validate(force_options=force)
++    ).has_errors:
++        raise LibraryError()
++
++    nvpair_multi.nvset_append_new(
++        sections.get(cib, cib_section_name),
++        id_provider,
++        nvpair_multi.NVSET_META,
++        nvpairs,
++        nvset_options,
++        nvset_rule=validator.get_parsed_rule(),
++    )
++
+     env.report_processor.report(
+         ReportItem.warning(reports.messages.DefaultsCanBeOverriden())
+     )
++    env.push_cib()
++
++
++def resource_defaults_config(env: LibraryEnvironment) -> List[CibNvsetDto]:
++    """
++    List all resource defaults nvsets
++    """
++    return _defaults_config(env, sections.RSC_DEFAULTS)
++
++
++def operation_defaults_config(env: LibraryEnvironment) -> List[CibNvsetDto]:
++    """
++    List all operation defaults nvsets
++    """
++    return _defaults_config(env, sections.OP_DEFAULTS)
++
++
++def _defaults_config(
++    env: LibraryEnvironment, cib_section_name: str,
++) -> List[CibNvsetDto]:
++    return [
++        nvpair_multi.nvset_element_to_dto(nvset_el)
++        for nvset_el in nvpair_multi.find_nvsets(
++            sections.get(env.get_cib(), cib_section_name)
++        )
++    ]
++
++
++def resource_defaults_remove(
++    env: LibraryEnvironment, nvset_id_list: Iterable[str]
++) -> None:
++    """
++    Remove specified resource defaults nvsets
++
++    env --
++    nvset_id_list -- nvset IDs to be removed
++    """
++    return _defaults_remove(env, sections.RSC_DEFAULTS, nvset_id_list)
++
++
++def operation_defaults_remove(
++    env: LibraryEnvironment, nvset_id_list: Iterable[str]
++) -> None:
++    """
++    Remove specified operation defaults nvsets
+ 
+-    if not options:
++    env --
++    nvset_id_list -- nvset IDs to be removed
++    """
++    return _defaults_remove(env, sections.OP_DEFAULTS, nvset_id_list)
++
++
++def _defaults_remove(
++    env: LibraryEnvironment, cib_section_name: str, nvset_id_list: Iterable[str]
++) -> None:
++    if not nvset_id_list:
+         return
++    nvset_elements, report_list = nvpair_multi.find_nvsets_by_ids(
++        sections.get(env.get_cib(), cib_section_name), nvset_id_list
++    )
++    if env.report_processor.report_list(report_list).has_errors:
++        raise LibraryError()
++    nvpair_multi.nvset_remove(nvset_elements)
++    env.push_cib()
++
++
++def resource_defaults_update(
++    env: LibraryEnvironment,
++    nvset_id: Optional[str],
++    nvpairs: Mapping[str, str],
++) -> None:
++    """
++    Update specified resource defaults nvset
++
++    env --
++    nvset_id -- nvset ID to be updated; if None, update an existing nvset if
++        there is only one
++    nvpairs -- name-value pairs to be put into the nvset
++    """
++    return _defaults_update(
++        env,
++        sections.RSC_DEFAULTS,
++        nvset_id,
++        nvpairs,
++        reports.const.PCS_COMMAND_RESOURCE_DEFAULTS_UPDATE,
++    )
++
+ 
++def operation_defaults_update(
++    env: LibraryEnvironment,
++    nvset_id: Optional[str],
++    nvpairs: Mapping[str, str],
++) -> None:
++    """
++    Update specified operation defaults nvset
++
++    env --
++    nvset_id -- nvset ID to be updated; if None, update an existing nvset if
++        there is only one
++    nvpairs -- name-value pairs to be put into the nvset
++    """
++    return _defaults_update(
++        env,
++        sections.OP_DEFAULTS,
++        nvset_id,
++        nvpairs,
++        reports.const.PCS_COMMAND_OPERATION_DEFAULTS_UPDATE,
++    )
++
++
++def _defaults_update(
++    env: LibraryEnvironment,
++    cib_section_name: str,
++    nvset_id: Optional[str],
++    nvpairs: Mapping[str, str],
++    pcs_command: reports.types.PcsCommand,
++) -> None:
+     cib = env.get_cib()
++    id_provider = IdProvider(cib)
++
++    if nvset_id is None:
++        # Backward compatibility code to support an old use case where no id
++        # was requested and provided and the first meta_attributes nvset was
++        # created / updated. However, we check that there is only one nvset
++        # present in the CIB to prevent breaking the configuration with
++        # multiple nvsets in place.
++
++        # This is to be supported as it provides means of easily managing
++        # defaults if only one set of defaults is needed.
+ 
+-    # Do not create new defaults element if we are only removing values from it.
+-    only_removing = True
+-    for value in options.values():
+-        if value != "":
+-            only_removing = False
+-            break
+-    if only_removing and not sections.exists(cib, section_name):
++        # TODO move this to a separate lib command.
++
++        if not nvpairs:
++            return
++
++        # Do not create new defaults element if we are only removing values
++        # from it.
++        only_removing = True
++        for value in nvpairs.values():
++            if value != "":
++                only_removing = False
++                break
++        if only_removing and not sections.exists(cib, cib_section_name):
++            env.report_processor.report(
++                ReportItem.warning(reports.messages.DefaultsCanBeOverriden())
++            )
++            return
++
++        nvset_elements = nvpair_multi.find_nvsets(
++            sections.get(cib, cib_section_name)
++        )
++        if len(nvset_elements) > 1:
++            env.report_processor.report(
++                reports.item.ReportItem.error(
++                    reports.messages.CibNvsetAmbiguousProvideNvsetId(
++                        pcs_command
++                    )
++                )
++            )
++            raise LibraryError()
++        env.report_processor.report(
++            ReportItem.warning(reports.messages.DefaultsCanBeOverriden())
++        )
++        if len(nvset_elements) == 1:
++            nvpair_multi.nvset_update(nvset_elements[0], id_provider, nvpairs)
++        elif only_removing:
++            # do not create new nvset if there is none and we are only removing
++            # nvpairs
++            return
++        else:
++            nvpair_multi.nvset_append_new(
++                sections.get(cib, cib_section_name),
++                id_provider,
++                nvpair_multi.NVSET_META,
++                nvpairs,
++                {},
++            )
++        env.push_cib()
+         return
+ 
+-    defaults_section = sections.get(cib, section_name)
+-    arrange_first_meta_attributes(
+-        defaults_section,
+-        options,
+-        IdProvider(cib),
+-        new_id="{0}-options".format(section_name),
++    nvset_elements, report_list = nvpair_multi.find_nvsets_by_ids(
++        sections.get(cib, cib_section_name), [nvset_id]
+     )
++    if env.report_processor.report_list(report_list).has_errors:
++        raise LibraryError()
+ 
++    nvpair_multi.nvset_update(nvset_elements[0], id_provider, nvpairs)
++    env.report_processor.report(
++        ReportItem.warning(reports.messages.DefaultsCanBeOverriden())
++    )
+     env.push_cib()
+-
+-
+-set_operations_defaults = partial(_set_any_defaults, sections.OP_DEFAULTS)
+-set_resources_defaults = partial(_set_any_defaults, sections.RSC_DEFAULTS)
+diff --git a/pcs/lib/validate.py b/pcs/lib/validate.py
+index 2edf8b31..7890585a 100644
+--- a/pcs/lib/validate.py
++++ b/pcs/lib/validate.py
+@@ -39,6 +39,7 @@ from pcs.common.reports import (
+ )
+ from pcs.lib.corosync import constants as corosync_constants
+ from pcs.lib.pacemaker.values import (
++    is_score,
+     timeout_to_seconds,
+     validate_id,
+ )
+@@ -676,6 +677,20 @@ class ValuePositiveInteger(ValuePredicateBase):
+         return "a positive integer"
+ 
+ 
++class ValueScore(ValueValidator):
++    """
++    Report INVALID_SCORE if the value is not a valid CIB score
++    """
++
++    def _validate_value(self, value):
++        report_list = []
++        if not is_score(value.normalized):
++            report_list.append(
++                ReportItem.error(reports.messages.InvalidScore(value.original))
++            )
++        return report_list
++
++
+ class ValueTimeInterval(ValuePredicateBase):
+     """
+     Report INVALID_OPTION_VALUE when the value is not a time interval
+diff --git a/pcs/lib/xml_tools.py b/pcs/lib/xml_tools.py
+index a463c418..b7d778a3 100644
+--- a/pcs/lib/xml_tools.py
++++ b/pcs/lib/xml_tools.py
+@@ -1,4 +1,4 @@
+-from typing import cast, Iterable
++from typing import cast, Dict, Iterable
+ from xml.etree.ElementTree import Element
+ 
+ from lxml import etree
+@@ -56,8 +56,11 @@ def get_sub_element(
+     return sub_element
+ 
+ 
+-def export_attributes(element):
+-    return dict((key, value) for key, value in element.attrib.items())
++def export_attributes(element: Element, with_id: bool = True) -> Dict[str, str]:
++    result = dict((key, value) for key, value in element.attrib.items())
++    if not with_id:
++        result.pop("id", None)
++    return result
+ 
+ 
+ def update_attribute_remove_empty(element, name, value):
+diff --git a/pcs/pcs.8 b/pcs/pcs.8
+index 85c6adb1..c887d332 100644
+--- a/pcs/pcs.8
++++ b/pcs/pcs.8
+@@ -185,8 +185,48 @@ Remove specified operation (note: you must specify the exact operation propertie
+ op remove <operation id>
+ Remove the specified operation id.
+ .TP
+-op defaults [options]
+-Set default values for operations, if no options are passed, lists currently configured defaults. Defaults do not apply to resources which override them with their own defined operations.
++op defaults [config] [\fB\-\-full\fR]
++List currently configured default values for operations. If \fB\-\-full\fR is specified, also list ids.
++.TP
++op defaults <name>=<value>
++Set default values for operations.
++.br
++NOTE: Defaults do not apply to resources which override them with their own defined values.
++.TP
++op defaults set create [<set options>] [meta [<name>=<value>]...] [rule [<expression>]]
++Create a new set of default values for resource operations. You may specify a rule describing resources and / or operations to which the set applies.
++.br
++Set options are: id, score
++.br
++Expression looks like one of the following:
++.br
++  op <operation name> [interval=<interval>]
++.br
++  resource [<standard>]:[<provider>]:[<type>]
++.br
++  <expression> and|or <expression>
++.br
++  ( <expression> )
++.br
++You may specify all or any of 'standard', 'provider' and 'type' in a resource expression. For example: 'resource ocf::' matches all resources of 'ocf' standard, while 'resource ::Dummy' matches all resources of 'Dummy' type regardless of their standard and provider.
++.br
++NOTE: Defaults do not apply to resources which override them with their own defined values.
++.TP
++op defaults set delete [<set id>]...
++Delete specified options sets.
++.TP
++op defaults set remove [<set id>]...
++Delete specified options sets.
++.TP
++op defaults set update <set id> [meta [<name>=<value>]...]
++Add, remove or change values in specified set of default values for resource operations.
++.br
++NOTE: Defaults do not apply to resources which override them with their own defined values.
++.TP
++op defaults update <name>=<value>...
++Set default values for operations. This is a simplified command useful for cases when you only manage one set of default values.
++.br
++NOTE: Defaults do not apply to resources which override them with their own defined values.
+ .TP
+ meta <resource id | group id | clone id> <meta options> [\fB\-\-wait\fR[=n]]
+ Add specified options to the specified resource, group or clone. Meta options should be in the format of name=value, options may be removed by setting an option without a value. If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the changes to take effect and then return 0 if the changes have been processed or 1 otherwise. If 'n' is not specified it defaults to 60 minutes.
+@@ -232,8 +272,46 @@ Set resources listed to managed mode (default). If \fB\-\-monitor\fR is specifie
+ unmanage <resource id | tag id>... [\fB\-\-monitor\fR]
+ Set resources listed to unmanaged mode. When a resource is in unmanaged mode, the cluster is not allowed to start nor stop the resource. If \fB\-\-monitor\fR is specified, disable all monitor operations of the resources.
+ .TP
+-defaults [options]
+-Set default values for resources, if no options are passed, lists currently configured defaults. Defaults do not apply to resources which override them with their own defined values.
++defaults [config] [\fB\-\-full\fR]
++List currently configured default values for resources. If \fB\-\-full\fR is specified, also list ids.
++.TP
++defaults <name>=<value>
++Set default values for resources.
++.br
++NOTE: Defaults do not apply to resources which override them with their own defined values.
++.TP
++defaults set create [<set options>] [meta [<name>=<value>]...] [rule [<expression>]]
++Create a new set of default values for resources. You may specify a rule describing resources to which the set applies.
++.br
++Set options are: id, score
++.br
++Expression looks like one of the following:
++.br
++  resource [<standard>]:[<provider>]:[<type>]
++.br
++  <expression> and|or <expression>
++.br
++  ( <expression> )
++.br
++You may specify all or any of 'standard', 'provider' and 'type' in a resource expression. For example: 'resource ocf::' matches all resources of 'ocf' standard, while 'resource ::Dummy' matches all resources of 'Dummy' type regardless of their standard and provider.
++.br
++NOTE: Defaults do not apply to resources which override them with their own defined values.
++.TP
++defaults set delete [<set id>]...
++Delete specified options sets.
++.TP
++defaults set remove [<set id>]...
++Delete specified options sets.
++.TP
++defaults set update <set id> [meta [<name>=<value>]...]
++Add, remove or change values in specified set of default values for resources.
++.br
++NOTE: Defaults do not apply to resources which override them with their own defined values.
++.TP
++defaults update <name>=<value>...
++Set default values for resources. This is a simplified command useful for cases when you only manage one set of default values.
++.br
++NOTE: Defaults do not apply to resources which override them with their own defined values.
+ .TP
+ cleanup [<resource id>] [node=<node>] [operation=<operation> [interval=<interval>]] [\fB\-\-strict\fR]
+ Make the cluster forget failed operations from history of the resource and re\-detect its current state. This can be useful to purge knowledge of past failures that have since been resolved.
+diff --git a/pcs/resource.py b/pcs/resource.py
+index dd199bea..e835fc99 100644
+--- a/pcs/resource.py
++++ b/pcs/resource.py
+@@ -6,7 +6,12 @@ import textwrap
+ import time
+ import json
+ 
+-from typing import Any, List
++from typing import (
++    Any,
++    Callable,
++    List,
++    Sequence,
++)
+ 
+ from pcs import (
+     usage,
+@@ -19,10 +24,12 @@ from pcs.settings import (
+ )
+ from pcs.cli.common.errors import CmdLineInputError, raise_command_replaced
+ from pcs.cli.common.parse_args import (
++    group_by_keywords,
+     prepare_options,
+     prepare_options_allowed,
+     InputModifiers,
+ )
++from pcs.cli.nvset import nvset_dto_list_to_lines
+ from pcs.cli.reports import process_library_reports
+ from pcs.cli.reports.output import error, warn
+ from pcs.cli.resource.parse_args import (
+@@ -31,8 +38,8 @@ from pcs.cli.resource.parse_args import (
+     parse_bundle_update_options,
+     parse_create as parse_create_args,
+ )
++from pcs.common import reports
+ from pcs.common.str_tools import indent
+-from pcs.common.reports import ReportItemSeverity
+ import pcs.lib.cib.acl as lib_acl
+ from pcs.lib.cib.resource import (
+     bundle,
+@@ -113,28 +120,228 @@ def resource_utilization_cmd(lib, argv, modifiers):
+         set_resource_utilization(argv.pop(0), argv)
+ 
+ 
+-def resource_defaults_cmd(lib, argv, modifiers):
++def _defaults_set_create_cmd(
++    lib_command: Callable[..., Any],
++    argv: Sequence[str],
++    modifiers: InputModifiers,
++):
++    modifiers.ensure_only_supported("-f", "--force")
++
++    groups = group_by_keywords(
++        argv,
++        set(["meta", "rule"]),
++        implicit_first_group_key="options",
++        keyword_repeat_allowed=False,
++    )
++    force_flags = set()
++    if modifiers.get("--force"):
++        force_flags.add(reports.codes.FORCE)
++
++    lib_command(
++        prepare_options(groups["meta"]),
++        prepare_options(groups["options"]),
++        nvset_rule=(" ".join(groups["rule"]) if groups["rule"] else None),
++        force_flags=force_flags,
++    )
++
++
++def resource_defaults_set_create_cmd(
++    lib: Any, argv: Sequence[str], modifiers: InputModifiers,
++) -> None:
++    """
++    Options:
++      * -f - CIB file
++      * --force - allow unknown options
++    """
++    return _defaults_set_create_cmd(
++        lib.cib_options.resource_defaults_create, argv, modifiers
++    )
++
++
++def resource_op_defaults_set_create_cmd(
++    lib: Any, argv: Sequence[str], modifiers: InputModifiers,
++) -> None:
++    """
++    Options:
++      * -f - CIB file
++      * --force - allow unknown options
++    """
++    return _defaults_set_create_cmd(
++        lib.cib_options.operation_defaults_create, argv, modifiers
++    )
++
++
++def _defaults_config_cmd(
++    lib_command: Callable[..., Any],
++    argv: Sequence[str],
++    modifiers: InputModifiers,
++) -> None:
++    """
++    Options:
++      * -f - CIB file
++      * --full - verbose output
++    """
++    if argv:
++        raise CmdLineInputError()
++    modifiers.ensure_only_supported("-f", "--full")
++    print(
++        "\n".join(
++            nvset_dto_list_to_lines(
++                lib_command(),
++                with_ids=modifiers.get("--full"),
++                text_if_empty="No defaults set",
++            )
++        )
++    )
++
++
++def resource_defaults_config_cmd(
++    lib: Any, argv: Sequence[str], modifiers: InputModifiers,
++) -> None:
++    """
++    Options:
++      * -f - CIB file
++      * --full - verbose output
++    """
++    return _defaults_config_cmd(
++        lib.cib_options.resource_defaults_config, argv, modifiers
++    )
++
++
++def resource_op_defaults_config_cmd(
++    lib: Any, argv: Sequence[str], modifiers: InputModifiers,
++) -> None:
++    """
++    Options:
++      * -f - CIB file
++      * --full - verbose output
++    """
++    return _defaults_config_cmd(
++        lib.cib_options.operation_defaults_config, argv, modifiers
++    )
++
++
++def _defaults_set_remove_cmd(
++    lib_command: Callable[..., Any],
++    argv: Sequence[str],
++    modifiers: InputModifiers,
++) -> None:
+     """
+     Options:
+       * -f - CIB file
+     """
+     modifiers.ensure_only_supported("-f")
+-    if not argv:
+-        print("\n".join(show_defaults(utils.get_cib_dom(), "rsc_defaults")))
+-    else:
+-        lib.cib_options.set_resources_defaults(prepare_options(argv))
++    lib_command(argv)
+ 
+ 
+-def resource_op_defaults_cmd(lib, argv, modifiers):
++def resource_defaults_set_remove_cmd(
++    lib: Any, argv: Sequence[str], modifiers: InputModifiers,
++) -> None:
++    """
++    Options:
++      * -f - CIB file
++    """
++    return _defaults_set_remove_cmd(
++        lib.cib_options.resource_defaults_remove, argv, modifiers
++    )
++
++
++def resource_op_defaults_set_remove_cmd(
++    lib: Any, argv: Sequence[str], modifiers: InputModifiers,
++) -> None:
++    """
++    Options:
++      * -f - CIB file
++    """
++    return _defaults_set_remove_cmd(
++        lib.cib_options.operation_defaults_remove, argv, modifiers
++    )
++
++
++def _defaults_set_update_cmd(
++    lib_command: Callable[..., Any],
++    argv: Sequence[str],
++    modifiers: InputModifiers,
++) -> None:
+     """
+     Options:
+       * -f - CIB file
+     """
+     modifiers.ensure_only_supported("-f")
+     if not argv:
+-        print("\n".join(show_defaults(utils.get_cib_dom(), "op_defaults")))
+-    else:
+-        lib.cib_options.set_operations_defaults(prepare_options(argv))
++        raise CmdLineInputError()
++
++    set_id = argv[0]
++    groups = group_by_keywords(
++        argv[1:], set(["meta"]), keyword_repeat_allowed=False,
++    )
++    lib_command(
++        set_id, prepare_options(groups["meta"]),
++    )
++
++
++def resource_defaults_set_update_cmd(
++    lib: Any, argv: Sequence[str], modifiers: InputModifiers,
++) -> None:
++    """
++    Options:
++      * -f - CIB file
++    """
++    return _defaults_set_update_cmd(
++        lib.cib_options.resource_defaults_update, argv, modifiers
++    )
++
++
++def resource_op_defaults_set_update_cmd(
++    lib: Any, argv: Sequence[str], modifiers: InputModifiers,
++) -> None:
++    """
++    Options:
++      * -f - CIB file
++    """
++    return _defaults_set_update_cmd(
++        lib.cib_options.operation_defaults_update, argv, modifiers
++    )
++
++
++def resource_defaults_legacy_cmd(
++    lib: Any,
++    argv: Sequence[str],
++    modifiers: InputModifiers,
++    deprecated_syntax_used: bool = False,
++) -> None:
++    """
++    Options:
++      * -f - CIB file
++    """
++    del modifiers
++    if deprecated_syntax_used:
++        warn(
++            "This command is deprecated and will be removed. "
++            "Please use 'pcs resource defaults update' instead."
++        )
++    return lib.cib_options.resource_defaults_update(None, prepare_options(argv))
++
++
++def resource_op_defaults_legacy_cmd(
++    lib: Any,
++    argv: Sequence[str],
++    modifiers: InputModifiers,
++    deprecated_syntax_used: bool = False,
++) -> None:
++    """
++    Options:
++      * -f - CIB file
++    """
++    del modifiers
++    if deprecated_syntax_used:
++        warn(
++            "This command is deprecated and will be removed. "
++            "Please use 'pcs resource op defaults update' instead."
++        )
++    return lib.cib_options.operation_defaults_update(
++        None, prepare_options(argv)
++    )
+ 
+ 
+ def resource_op_add_cmd(lib, argv, modifiers):
+@@ -741,9 +948,9 @@ def resource_update(lib, args, modifiers, deal_with_guest_change=True):
+             process_library_reports(report_list)
+     except lib_ra.ResourceAgentError as e:
+         severity = (
+-            ReportItemSeverity.WARNING
++            reports.ReportItemSeverity.WARNING
+             if modifiers.get("--force")
+-            else ReportItemSeverity.ERROR
++            else reports.ReportItemSeverity.ERROR
+         )
+         process_library_reports(
+             [lib_ra.resource_agent_error_to_report_item(e, severity)]
+@@ -2543,30 +2750,6 @@ def resource_failcount_show(lib, resource, node, operation, interval, full):
+     return "\n".join(result_lines)
+ 
+ 
+-def show_defaults(cib_dom, def_type):
+-    """
+-    Commandline options: no options
+-    """
+-    defs = cib_dom.getElementsByTagName(def_type)
+-    if not defs:
+-        return ["No defaults set"]
+-    defs = defs[0]
+-
+-    # TODO duplicite to _nvpairs_strings
+-    key_val = {
+-        nvpair.getAttribute("name"): nvpair.getAttribute("value")
+-        for nvpair in defs.getElementsByTagName("nvpair")
+-    }
+-    if not key_val:
+-        return ["No defaults set"]
+-    strings = []
+-    for name, value in sorted(key_val.items()):
+-        if " " in value:
+-            value = f'"{value}"'
+-        strings.append(f"{name}={value}")
+-    return strings
+-
+-
+ def resource_node_lines(node):
+     """
+     Commandline options: no options
+@@ -2677,6 +2860,7 @@ def _nvpairs_strings(node, parent_tag, extra_vars_dict=None):
+     """
+     Commandline options: no options
+     """
++    # In the new architecture, this is implemented in pcs.cli.nvset.
+     key_val = {
+         nvpair.attrib["name"]: nvpair.attrib["value"]
+         for nvpair in node.findall(f"{parent_tag}/nvpair")
+diff --git a/pcs/usage.py b/pcs/usage.py
+index 2cab7a6c..8722bd7b 100644
+--- a/pcs/usage.py
++++ b/pcs/usage.py
+@@ -442,10 +442,50 @@ Commands:
+     op remove <operation id>
+         Remove the specified operation id.
+ 
+-    op defaults [options]
+-        Set default values for operations, if no options are passed, lists
+-        currently configured defaults. Defaults do not apply to resources which
+-        override them with their own defined operations.
++    op defaults [config] [--full]
++        List currently configured default values for operations. If --full is
++        specified, also list ids.
++
++    op defaults <name>=<value>...
++        Set default values for operations.
++        NOTE: Defaults do not apply to resources which override them with their
++        own defined values.
++
++    op defaults set create [<set options>] [meta [<name>=<value>]...]
++            [rule [<expression>]]
++        Create a new set of default values for resource operations. You may
++        specify a rule describing resources and / or operations to which the set
++        applies.
++        Set options are: id, score
++        Expression looks like one of the following:
++          op <operation name> [interval=<interval>]
++          resource [<standard>]:[<provider>]:[<type>]
++          <expression> and|or <expression>
++          ( <expression> )
++        You may specify all or any of 'standard', 'provider' and 'type' in
++        a resource expression. For example: 'resource ocf::' matches all
++        resources of 'ocf' standard, while 'resource ::Dummy' matches all
++        resources of 'Dummy' type regardless of their standard and provider.
++        NOTE: Defaults do not apply to resources which override them with their
++        own defined values.
++
++    op defaults set delete [<set id>]...
++        Delete specified options sets.
++
++    op defaults set remove [<set id>]...
++        Delete specified options sets.
++
++    op defaults set update <set id> [meta [<name>=<value>]...]
++        Add, remove or change values in specified set of default values for
++        resource operations.
++        NOTE: Defaults do not apply to resources which override them with their
++        own defined values.
++
++    op defaults update <name>=<value>...
++        Set default values for operations. This is a simplified command useful
++        for cases when you only manage one set of default values.
++        NOTE: Defaults do not apply to resources which override them with their
++        own defined values.
+ 
+     meta <resource id | group id | clone id> <meta options>
+          [--wait[=n]]
+@@ -561,10 +601,48 @@ Commands:
+         --monitor is specified, disable all monitor operations of the
+         resources.
+ 
+-    defaults [options]
+-        Set default values for resources, if no options are passed, lists
+-        currently configured defaults. Defaults do not apply to resources which
+-        override them with their own defined values.
++    defaults [config] [--full]
++        List currently configured default values for resources. If --full is
++        specified, also list ids.
++
++    defaults <name>=<value>...
++        Set default values for resources.
++        NOTE: Defaults do not apply to resources which override them with their
++        own defined values.
++
++    defaults set create [<set options>] [meta [<name>=<value>]...]
++            [rule [<expression>]]
++        Create a new set of default values for resources. You may specify a rule
++        describing resources to which the set applies.
++        Set options are: id, score
++        Expression looks like one of the following:
++          resource [<standard>]:[<provider>]:[<type>]
++          <expression> and|or <expression>
++          ( <expression> )
++        You may specify all or any of 'standard', 'provider' and 'type' in
++        a resource expression. For example: 'resource ocf::' matches all
++        resources of 'ocf' standard, while 'resource ::Dummy' matches all
++        resources of 'Dummy' type regardless of their standard and provider.
++        NOTE: Defaults do not apply to resources which override them with their
++        own defined values.
++
++    defaults set delete [<set id>]...
++        Delete specified options sets.
++
++    defaults set remove [<set id>]...
++        Delete specified options sets.
++
++    defaults set update <set id> [meta [<name>=<value>]...]
++        Add, remove or change values in specified set of default values for
++        resources.
++        NOTE: Defaults do not apply to resources which override them with their
++        own defined values.
++
++    defaults update <name>=<value>...
++        Set default values for resources. This is a simplified command useful
++        for cases when you only manage one set of default values.
++        NOTE: Defaults do not apply to resources which override them with their
++        own defined values.
+ 
+     cleanup [<resource id>] [node=<node>] [operation=<operation>
+             [interval=<interval>]] [--strict]
+diff --git a/pcs_test/resources/cib-empty-3.1.xml b/pcs_test/resources/cib-empty-3.1.xml
+index 75bbb26d..88f5c414 100644
+--- a/pcs_test/resources/cib-empty-3.1.xml
++++ b/pcs_test/resources/cib-empty-3.1.xml
+@@ -1,4 +1,4 @@
+-<cib epoch="557" num_updates="122" admin_epoch="0" validate-with="pacemaker-3.1" crm_feature_set="3.0.9" update-origin="rh7-3" update-client="crmd" cib-last-written="Thu Aug 23 16:49:17 2012" have-quorum="0" dc-uuid="2">
++<cib epoch="557" num_updates="122" admin_epoch="0" validate-with="pacemaker-3.1" crm_feature_set="3.1.0" update-origin="rh7-3" update-client="crmd" cib-last-written="Thu Aug 23 16:49:17 2012" have-quorum="0" dc-uuid="2">
+   <configuration>
+     <crm_config/>
+     <nodes>
+diff --git a/pcs_test/resources/cib-empty-3.2.xml b/pcs_test/resources/cib-empty-3.2.xml
+index 0b0b04b8..7ffaccb1 100644
+--- a/pcs_test/resources/cib-empty-3.2.xml
++++ b/pcs_test/resources/cib-empty-3.2.xml
+@@ -1,4 +1,4 @@
+-<cib epoch="557" num_updates="122" admin_epoch="0" validate-with="pacemaker-3.2" crm_feature_set="3.0.9" update-origin="rh7-3" update-client="crmd" cib-last-written="Thu Aug 23 16:49:17 2012" have-quorum="0" dc-uuid="2">
++<cib epoch="557" num_updates="122" admin_epoch="0" validate-with="pacemaker-3.2" crm_feature_set="3.1.0" update-origin="rh7-3" update-client="crmd" cib-last-written="Thu Aug 23 16:49:17 2012" have-quorum="0" dc-uuid="2">
+   <configuration>
+     <crm_config/>
+     <nodes>
+diff --git a/pcs_test/resources/cib-empty-3.3.xml b/pcs_test/resources/cib-empty-3.3.xml
+new file mode 100644
+index 00000000..3a44fe08
+--- /dev/null
++++ b/pcs_test/resources/cib-empty-3.3.xml
+@@ -0,0 +1,10 @@
++<cib epoch="557" num_updates="122" admin_epoch="0" validate-with="pacemaker-3.3" crm_feature_set="3.1.0" update-origin="rh7-3" update-client="crmd" cib-last-written="Thu Aug 23 16:49:17 2012" have-quorum="0" dc-uuid="2">
++  <configuration>
++    <crm_config/>
++    <nodes>
++    </nodes>
++    <resources/>
++    <constraints/>
++  </configuration>
++  <status/>
++</cib>
+diff --git a/pcs_test/resources/cib-empty-3.4.xml b/pcs_test/resources/cib-empty-3.4.xml
+new file mode 100644
+index 00000000..dcd4ff44
+--- /dev/null
++++ b/pcs_test/resources/cib-empty-3.4.xml
+@@ -0,0 +1,10 @@
++<cib epoch="557" num_updates="122" admin_epoch="0" validate-with="pacemaker-3.4" crm_feature_set="3.1.0" update-origin="rh7-3" update-client="crmd" cib-last-written="Thu Aug 23 16:49:17 2012" have-quorum="0" dc-uuid="2">
++  <configuration>
++    <crm_config/>
++    <nodes>
++    </nodes>
++    <resources/>
++    <constraints/>
++  </configuration>
++  <status/>
++</cib>
+diff --git a/pcs_test/resources/cib-empty.xml b/pcs_test/resources/cib-empty.xml
+index 75bbb26d..7ffaccb1 100644
+--- a/pcs_test/resources/cib-empty.xml
++++ b/pcs_test/resources/cib-empty.xml
+@@ -1,4 +1,4 @@
+-<cib epoch="557" num_updates="122" admin_epoch="0" validate-with="pacemaker-3.1" crm_feature_set="3.0.9" update-origin="rh7-3" update-client="crmd" cib-last-written="Thu Aug 23 16:49:17 2012" have-quorum="0" dc-uuid="2">
++<cib epoch="557" num_updates="122" admin_epoch="0" validate-with="pacemaker-3.2" crm_feature_set="3.1.0" update-origin="rh7-3" update-client="crmd" cib-last-written="Thu Aug 23 16:49:17 2012" have-quorum="0" dc-uuid="2">
+   <configuration>
+     <crm_config/>
+     <nodes>
+diff --git a/pcs_test/tier0/cli/reports/test_messages.py b/pcs_test/tier0/cli/reports/test_messages.py
+index 06f32e68..47aabd63 100644
+--- a/pcs_test/tier0/cli/reports/test_messages.py
++++ b/pcs_test/tier0/cli/reports/test_messages.py
+@@ -481,6 +481,35 @@ class TagCannotRemoveReferencesWithoutRemovingTag(CliReportMessageTestBase):
+         )
+ 
+ 
++class RuleExpressionParseError(CliReportMessageTestBase):
++    def test_success(self):
++        self.assert_message(
++            messages.RuleExpressionParseError(
++                "resource dummy op monitor",
++                "Expected end of text",
++                "resource dummy op monitor",
++                1,
++                16,
++                15,
++            ),
++            "'resource dummy op monitor' is not a valid rule expression, "
++            "parse error near or after line 1 column 16\n"
++            "  resource dummy op monitor\n"
++            "  ---------------^",
++        )
++
++
++class CibNvsetAmbiguousProvideNvsetId(CliReportMessageTestBase):
++    def test_success(self):
++        self.assert_message(
++            messages.CibNvsetAmbiguousProvideNvsetId(
++                const.PCS_COMMAND_RESOURCE_DEFAULTS_UPDATE
++            ),
++            "Several options sets exist, please use the 'pcs resource defaults "
++            "set update' command and specify an option set ID",
++        )
++
++
+ # TODO: create test/check that all subclasses of
+ # pcs.cli.reports.messages.CliReportMessageCustom have their test class with
+ # the same name in this file
+diff --git a/pcs_test/tier0/cli/resource/test_defaults.py b/pcs_test/tier0/cli/resource/test_defaults.py
+new file mode 100644
+index 00000000..0582c664
+--- /dev/null
++++ b/pcs_test/tier0/cli/resource/test_defaults.py
+@@ -0,0 +1,324 @@
++from textwrap import dedent
++from unittest import mock, TestCase
++
++from pcs_test.tools.misc import dict_to_modifiers
++
++from pcs import resource
++from pcs.cli.common.errors import CmdLineInputError
++from pcs.common.pacemaker.nvset import (
++    CibNvpairDto,
++    CibNvsetDto,
++)
++from pcs.common.pacemaker.rule import CibRuleExpressionDto
++from pcs.common.reports import codes as report_codes
++from pcs.common.types import (
++    CibNvsetType,
++    CibRuleExpressionType,
++)
++
++
++class DefaultsBaseMixin:
++    cli_command_name = ""
++    lib_command_name = ""
++
++    def setUp(self):
++        # pylint: disable=invalid-name
++        self.lib = mock.Mock(spec_set=["cib_options"])
++        self.cib_options = mock.Mock(spec_set=[self.lib_command_name])
++        self.lib.cib_options = self.cib_options
++        self.lib_command = getattr(self.cib_options, self.lib_command_name)
++        self.cli_command = getattr(resource, self.cli_command_name)
++
++    def _call_cmd(self, argv, modifiers=None):
++        modifiers = modifiers or dict()
++        self.cli_command(self.lib, argv, dict_to_modifiers(modifiers))
++
++
++@mock.patch("pcs.resource.print")
++class DefaultsConfigMixin(DefaultsBaseMixin):
++    dto_list = [
++        CibNvsetDto(
++            "my-meta_attributes",
++            CibNvsetType.META,
++            {},
++            CibRuleExpressionDto(
++                "my-meta-rule",
++                CibRuleExpressionType.RULE,
++                False,
++                {"boolean-op": "and", "score": "INFINITY"},
++                None,
++                None,
++                [
++                    CibRuleExpressionDto(
++                        "my-meta-rule-rsc",
++                        CibRuleExpressionType.RSC_EXPRESSION,
++                        False,
++                        {
++                            "class": "ocf",
++                            "provider": "pacemaker",
++                            "type": "Dummy",
++                        },
++                        None,
++                        None,
++                        [],
++                        "resource ocf:pacemaker:Dummy",
++                    ),
++                ],
++                "resource ocf:pacemaker:Dummy",
++            ),
++            [
++                CibNvpairDto("my-id-pair1", "name1", "value1"),
++                CibNvpairDto("my-id-pair2", "name2", "value2"),
++            ],
++        ),
++        CibNvsetDto(
++            "instance",
++            CibNvsetType.INSTANCE,
++            {},
++            None,
++            [CibNvpairDto("instance-pair", "inst", "ance")],
++        ),
++        CibNvsetDto(
++            "meta-plain",
++            CibNvsetType.META,
++            {"score": "123"},
++            None,
++            [CibNvpairDto("my-id-pair3", "name 1", "value 1")],
++        ),
++    ]
++
++    def test_no_args(self, mock_print):
++        self.lib_command.return_value = []
++        self._call_cmd([])
++        self.lib_command.assert_called_once_with()
++        mock_print.assert_called_once_with("No defaults set")
++
++    def test_usage(self, mock_print):
++        with self.assertRaises(CmdLineInputError) as cm:
++            self._call_cmd(["arg"])
++        self.assertIsNone(cm.exception.message)
++        self.lib_command.assert_not_called()
++        mock_print.assert_not_called()
++
++    def test_full(self, mock_print):
++        self.lib_command.return_value = []
++        self._call_cmd([], {"full": True})
++        self.lib_command.assert_called_once_with()
++        mock_print.assert_called_once_with("No defaults set")
++
++    def test_print(self, mock_print):
++        self.lib_command.return_value = self.dto_list
++        self._call_cmd([])
++        self.lib_command.assert_called_once_with()
++        mock_print.assert_called_once_with(
++            dedent(
++                '''\
++                Meta Attrs: my-meta_attributes
++                  name1=value1
++                  name2=value2
++                  Rule: boolean-op=and score=INFINITY
++                    Expression: resource ocf:pacemaker:Dummy
++                Attributes: instance
++                  inst=ance
++                Meta Attrs: meta-plain score=123
++                  "name 1"="value 1"'''
++            )
++        )
++
++    def test_print_full(self, mock_print):
++        self.lib_command.return_value = self.dto_list
++        self._call_cmd([], {"full": True})
++        self.lib_command.assert_called_once_with()
++        mock_print.assert_called_once_with(
++            dedent(
++                '''\
++                Meta Attrs: my-meta_attributes
++                  name1=value1
++                  name2=value2
++                  Rule: boolean-op=and score=INFINITY (id:my-meta-rule)
++                    Expression: resource ocf:pacemaker:Dummy (id:my-meta-rule-rsc)
++                Attributes: instance
++                  inst=ance
++                Meta Attrs: meta-plain score=123
++                  "name 1"="value 1"'''
++            )
++        )
++
++
++class RscDefaultsConfig(DefaultsConfigMixin, TestCase):
++    cli_command_name = "resource_defaults_config_cmd"
++    lib_command_name = "resource_defaults_config"
++
++
++class OpDefaultsConfig(DefaultsConfigMixin, TestCase):
++    cli_command_name = "resource_op_defaults_config_cmd"
++    lib_command_name = "operation_defaults_config"
++
++
++class DefaultsSetCreateMixin(DefaultsBaseMixin):
++    def test_no_args(self):
++        self._call_cmd([])
++        self.lib_command.assert_called_once_with(
++            {}, {}, nvset_rule=None, force_flags=set()
++        )
++
++    def test_no_values(self):
++        self._call_cmd(["meta", "rule"])
++        self.lib_command.assert_called_once_with(
++            {}, {}, nvset_rule=None, force_flags=set()
++        )
++
++    def test_bad_options_or_keyword(self):
++        with self.assertRaises(CmdLineInputError) as cm:
++            self._call_cmd(["aaa"])
++        self.assertEqual(
++            cm.exception.message, "missing value of 'aaa' option",
++        )
++        self.lib_command.assert_not_called()
++
++    def test_bad_values(self):
++        with self.assertRaises(CmdLineInputError) as cm:
++            self._call_cmd(["meta", "aaa"])
++        self.assertEqual(
++            cm.exception.message, "missing value of 'aaa' option",
++        )
++        self.lib_command.assert_not_called()
++
++    def test_options(self):
++        self._call_cmd(["id=custom-id", "score=10"])
++        self.lib_command.assert_called_once_with(
++            {},
++            {"id": "custom-id", "score": "10"},
++            nvset_rule=None,
++            force_flags=set(),
++        )
++
++    def test_nvpairs(self):
++        self._call_cmd(["meta", "name1=value1", "name2=value2"])
++        self.lib_command.assert_called_once_with(
++            {"name1": "value1", "name2": "value2"},
++            {},
++            nvset_rule=None,
++            force_flags=set(),
++        )
++
++    def test_rule(self):
++        self._call_cmd(["rule", "resource", "dummy", "or", "op", "monitor"])
++        self.lib_command.assert_called_once_with(
++            {},
++            {},
++            nvset_rule="resource dummy or op monitor",
++            force_flags=set(),
++        )
++
++    def test_force(self):
++        self._call_cmd([], {"force": True})
++        self.lib_command.assert_called_once_with(
++            {}, {}, nvset_rule=None, force_flags=set([report_codes.FORCE])
++        )
++
++    def test_all(self):
++        self._call_cmd(
++            [
++                "id=custom-id",
++                "score=10",
++                "meta",
++                "name1=value1",
++                "name2=value2",
++                "rule",
++                "resource",
++                "dummy",
++                "or",
++                "op",
++                "monitor",
++            ],
++            {"force": True},
++        )
++        self.lib_command.assert_called_once_with(
++            {"name1": "value1", "name2": "value2"},
++            {"id": "custom-id", "score": "10"},
++            nvset_rule="resource dummy or op monitor",
++            force_flags=set([report_codes.FORCE]),
++        )
++
++
++class RscDefaultsSetCreate(DefaultsSetCreateMixin, TestCase):
++    cli_command_name = "resource_defaults_set_create_cmd"
++    lib_command_name = "resource_defaults_create"
++
++
++class OpDefaultsSetCreate(DefaultsSetCreateMixin, TestCase):
++    cli_command_name = "resource_op_defaults_set_create_cmd"
++    lib_command_name = "operation_defaults_create"
++
++
++class DefaultsSetRemoveMixin(DefaultsBaseMixin):
++    def test_no_args(self):
++        self._call_cmd([])
++        self.lib_command.assert_called_once_with([])
++
++    def test_some_args(self):
++        self._call_cmd(["set1", "set2"])
++        self.lib_command.assert_called_once_with(["set1", "set2"])
++
++
++class RscDefaultsSetRemove(DefaultsSetRemoveMixin, TestCase):
++    cli_command_name = "resource_defaults_set_remove_cmd"
++    lib_command_name = "resource_defaults_remove"
++
++
++class OpDefaultsSetRemove(DefaultsSetRemoveMixin, TestCase):
++    cli_command_name = "resource_op_defaults_set_remove_cmd"
++    lib_command_name = "operation_defaults_remove"
++
++
++class DefaultsSetUpdateMixin(DefaultsBaseMixin):
++    def test_no_args(self):
++        with self.assertRaises(CmdLineInputError) as cm:
++            self._call_cmd([])
++        self.assertIsNone(cm.exception.message)
++        self.lib_command.assert_not_called()
++
++    def test_no_meta(self):
++        self._call_cmd(["nvset-id"])
++        self.lib_command.assert_called_once_with("nvset-id", {})
++
++    def test_no_meta_values(self):
++        self._call_cmd(["nvset-id", "meta"])
++        self.lib_command.assert_called_once_with("nvset-id", {})
++
++    def test_meta_values(self):
++        self._call_cmd(["nvset-id", "meta", "a=b", "c=d"])
++        self.lib_command.assert_called_once_with(
++            "nvset-id", {"a": "b", "c": "d"}
++        )
++
++
++class RscDefaultsSetUpdate(DefaultsSetUpdateMixin, TestCase):
++    cli_command_name = "resource_defaults_set_update_cmd"
++    lib_command_name = "resource_defaults_update"
++
++
++class OpDefaultsSetUpdate(DefaultsSetUpdateMixin, TestCase):
++    cli_command_name = "resource_op_defaults_set_update_cmd"
++    lib_command_name = "operation_defaults_update"
++
++
++class DefaultsUpdateMixin(DefaultsBaseMixin):
++    def test_no_args(self):
++        self._call_cmd([])
++        self.lib_command.assert_called_once_with(None, {})
++
++    def test_args(self):
++        self._call_cmd(["a=b", "c="])
++        self.lib_command.assert_called_once_with(None, {"a": "b", "c": ""})
++
++
++class RscDefaultsUpdate(DefaultsUpdateMixin, TestCase):
++    cli_command_name = "resource_defaults_legacy_cmd"
++    lib_command_name = "resource_defaults_update"
++
++
++class OpDefaultsUpdate(DefaultsUpdateMixin, TestCase):
++    cli_command_name = "resource_op_defaults_legacy_cmd"
++    lib_command_name = "operation_defaults_update"
+diff --git a/pcs_test/tier0/cli/test_nvset.py b/pcs_test/tier0/cli/test_nvset.py
+new file mode 100644
+index 00000000..675d2899
+--- /dev/null
++++ b/pcs_test/tier0/cli/test_nvset.py
+@@ -0,0 +1,92 @@
++import re
++from textwrap import dedent
++from unittest import TestCase
++
++from pcs.cli import nvset
++from pcs.common.pacemaker.nvset import (
++    CibNvpairDto,
++    CibNvsetDto,
++)
++from pcs.common.pacemaker.rule import CibRuleExpressionDto
++from pcs.common.types import (
++    CibNvsetType,
++    CibRuleExpressionType,
++)
++
++
++class NvsetDtoToLines(TestCase):
++    type_to_label = (
++        (CibNvsetType.META, "Meta Attrs"),
++        (CibNvsetType.INSTANCE, "Attributes"),
++    )
++
++    @staticmethod
++    def _export(dto, with_ids):
++        return (
++            "\n".join(nvset.nvset_dto_to_lines(dto, with_ids=with_ids)) + "\n"
++        )
++
++    def assert_lines(self, dto, lines):
++        self.assertEqual(
++            self._export(dto, True), lines,
++        )
++        self.assertEqual(
++            self._export(dto, False), re.sub(r" +\(id:.*\)", "", lines),
++        )
++
++    def test_minimal(self):
++        for nvtype, label in self.type_to_label:
++            with self.subTest(nvset_type=nvtype, lanel=label):
++                dto = CibNvsetDto("my-id", nvtype, {}, None, [])
++                output = dedent(
++                    f"""\
++                      {label}: my-id
++                    """
++                )
++                self.assert_lines(dto, output)
++
++    def test_full(self):
++        for nvtype, label in self.type_to_label:
++            with self.subTest(nvset_type=nvtype, lanel=label):
++                dto = CibNvsetDto(
++                    "my-id",
++                    nvtype,
++                    {"score": "150"},
++                    CibRuleExpressionDto(
++                        "my-id-rule",
++                        CibRuleExpressionType.RULE,
++                        False,
++                        {"boolean-op": "or"},
++                        None,
++                        None,
++                        [
++                            CibRuleExpressionDto(
++                                "my-id-rule-op",
++                                CibRuleExpressionType.OP_EXPRESSION,
++                                False,
++                                {"name": "monitor"},
++                                None,
++                                None,
++                                [],
++                                "op monitor",
++                            ),
++                        ],
++                        "op monitor",
++                    ),
++                    [
++                        CibNvpairDto("my-id-pair1", "name1", "value1"),
++                        CibNvpairDto("my-id-pair2", "name 2", "value 2"),
++                        CibNvpairDto("my-id-pair3", "name=3", "value=3"),
++                    ],
++                )
++                output = dedent(
++                    f"""\
++                    {label}: my-id score=150
++                      "name 2"="value 2"
++                      name1=value1
++                      "name=3"="value=3"
++                      Rule: boolean-op=or (id:my-id-rule)
++                        Expression: op monitor (id:my-id-rule-op)
++                    """
++                )
++                self.assert_lines(dto, output)
+diff --git a/pcs_test/tier0/cli/test_rule.py b/pcs_test/tier0/cli/test_rule.py
+new file mode 100644
+index 00000000..c3f6ddc4
+--- /dev/null
++++ b/pcs_test/tier0/cli/test_rule.py
+@@ -0,0 +1,477 @@
++import re
++from textwrap import dedent
++from unittest import TestCase
++
++from pcs.cli import rule
++from pcs.common.pacemaker.rule import (
++    CibRuleDateCommonDto,
++    CibRuleExpressionDto,
++)
++from pcs.common.types import CibRuleExpressionType
++
++
++class RuleDtoToLinesMixin:
++    @staticmethod
++    def _export(dto, with_ids):
++        return (
++            "\n".join(rule.rule_expression_dto_to_lines(dto, with_ids=with_ids))
++            + "\n"
++        )
++
++    def assert_lines(self, dto, lines):
++        self.assertEqual(
++            self._export(dto, True), lines,
++        )
++        self.assertEqual(
++            self._export(dto, False), re.sub(r" +\(id:.*\)", "", lines),
++        )
++
++
++class ExpressionDtoToLines(RuleDtoToLinesMixin, TestCase):
++    def test_defined(self):
++        dto = CibRuleExpressionDto(
++            "my-id",
++            CibRuleExpressionType.RULE,
++            False,
++            {},
++            None,
++            None,
++            [
++                CibRuleExpressionDto(
++                    "my-id-expr",
++                    CibRuleExpressionType.EXPRESSION,
++                    False,
++                    {"attribute": "pingd", "operation": "defined"},
++                    None,
++                    None,
++                    [],
++                    "defined pingd",
++                ),
++            ],
++            "defined pingd",
++        )
++        output = dedent(
++            """\
++              Rule: (id:my-id)
++                Expression: defined pingd (id:my-id-expr)
++            """
++        )
++        self.assert_lines(dto, output)
++
++    def test_value_comparison(self):
++        dto = CibRuleExpressionDto(
++            "my-id",
++            CibRuleExpressionType.RULE,
++            False,
++            {},
++            None,
++            None,
++            [
++                CibRuleExpressionDto(
++                    "my-id-expr",
++                    CibRuleExpressionType.EXPRESSION,
++                    False,
++                    {
++                        "attribute": "my-attr",
++                        "operation": "eq",
++                        "value": "my value",
++                    },
++                    None,
++                    None,
++                    [],
++                    "my-attr eq 'my value'",
++                ),
++            ],
++            "my-attr eq 'my value'",
++        )
++        output = dedent(
++            """\
++              Rule: (id:my-id)
++                Expression: my-attr eq 'my value' (id:my-id-expr)
++            """
++        )
++        self.assert_lines(dto, output)
++
++    def test_value_comparison_with_type(self):
++        dto = CibRuleExpressionDto(
++            "my-id",
++            CibRuleExpressionType.RULE,
++            False,
++            {},
++            None,
++            None,
++            [
++                CibRuleExpressionDto(
++                    "my-id-expr",
++                    CibRuleExpressionType.EXPRESSION,
++                    False,
++                    {
++                        "attribute": "foo",
++                        "operation": "gt",
++                        "type": "version",
++                        "value": "1.2.3",
++                    },
++                    None,
++                    None,
++                    [],
++                    "foo gt version 1.2.3",
++                ),
++            ],
++            "foo gt version 1.2.3",
++        )
++        output = dedent(
++            """\
++              Rule: (id:my-id)
++                Expression: foo gt version 1.2.3 (id:my-id-expr)
++            """
++        )
++        self.assert_lines(dto, output)
++
++
++class DateExpressionDtoToLines(RuleDtoToLinesMixin, TestCase):
++    def test_simple(self):
++        dto = CibRuleExpressionDto(
++            "rule",
++            CibRuleExpressionType.RULE,
++            False,
++            {},
++            None,
++            None,
++            [
++                CibRuleExpressionDto(
++                    "rule-expr",
++                    CibRuleExpressionType.DATE_EXPRESSION,
++                    False,
++                    {"operation": "gt", "start": "2014-06-26"},
++                    None,
++                    None,
++                    [],
++                    "date gt 2014-06-26",
++                ),
++            ],
++            "date gt 2014-06-26",
++        )
++        output = dedent(
++            """\
++              Rule: (id:rule)
++                Expression: date gt 2014-06-26 (id:rule-expr)
++            """
++        )
++        self.assert_lines(dto, output)
++
++    def test_datespec(self):
++        dto = CibRuleExpressionDto(
++            "rule",
++            CibRuleExpressionType.RULE,
++            False,
++            {},
++            None,
++            None,
++            [
++                CibRuleExpressionDto(
++                    "rule-expr",
++                    CibRuleExpressionType.DATE_EXPRESSION,
++                    False,
++                    {"operation": "date_spec"},
++                    CibRuleDateCommonDto(
++                        "rule-expr-datespec",
++                        {"hours": "1-14", "monthdays": "20-30", "months": "1"},
++                    ),
++                    None,
++                    [],
++                    "date-spec hours=1-14 monthdays=20-30 months=1",
++                ),
++            ],
++            "date-spec hours=1-14 monthdays=20-30 months=1",
++        )
++        output = dedent(
++            """\
++              Rule: (id:rule)
++                Expression: (id:rule-expr)
++                  Date Spec: hours=1-14 monthdays=20-30 months=1 (id:rule-expr-datespec)
++            """
++        )
++        self.assert_lines(dto, output)
++
++    def test_inrange(self):
++        dto = CibRuleExpressionDto(
++            "rule",
++            CibRuleExpressionType.RULE,
++            False,
++            {},
++            None,
++            None,
++            [
++                CibRuleExpressionDto(
++                    "rule-expr",
++                    CibRuleExpressionType.DATE_EXPRESSION,
++                    False,
++                    {
++                        "operation": "in_range",
++                        "start": "2014-06-26",
++                        "end": "2014-07-26",
++                    },
++                    None,
++                    None,
++                    [],
++                    "date in_range 2014-06-26 to 2014-07-26",
++                ),
++            ],
++            "date in_range 2014-06-26 to 2014-07-26",
++        )
++        output = dedent(
++            """\
++              Rule: (id:rule)
++                Expression: date in_range 2014-06-26 to 2014-07-26 (id:rule-expr)
++            """
++        )
++        self.assert_lines(dto, output)
++
++    def test_inrange_duration(self):
++        dto = CibRuleExpressionDto(
++            "rule",
++            CibRuleExpressionType.RULE,
++            False,
++            {},
++            None,
++            None,
++            [
++                CibRuleExpressionDto(
++                    "rule-expr",
++                    CibRuleExpressionType.DATE_EXPRESSION,
++                    False,
++                    {"operation": "in_range", "start": "2014-06-26",},
++                    None,
++                    CibRuleDateCommonDto("rule-expr-duration", {"years": "1"}),
++                    [],
++                    "date in_range 2014-06-26 to duration years=1",
++                ),
++            ],
++            "date in_range 2014-06-26 to duration years=1",
++        )
++        output = dedent(
++            """\
++              Rule: (id:rule)
++                Expression: date in_range 2014-06-26 to duration (id:rule-expr)
++                  Duration: years=1 (id:rule-expr-duration)
++            """
++        )
++        self.assert_lines(dto, output)
++
++
++class OpExpressionDtoToLines(RuleDtoToLinesMixin, TestCase):
++    def test_minimal(self):
++        dto = CibRuleExpressionDto(
++            "my-id",
++            CibRuleExpressionType.RULE,
++            False,
++            {},
++            None,
++            None,
++            [
++                CibRuleExpressionDto(
++                    "my-id-op",
++                    CibRuleExpressionType.OP_EXPRESSION,
++                    False,
++                    {"name": "start"},
++                    None,
++                    None,
++                    [],
++                    "op start",
++                ),
++            ],
++            "op start",
++        )
++        output = dedent(
++            """\
++              Rule: (id:my-id)
++                Expression: op start (id:my-id-op)
++            """
++        )
++        self.assert_lines(dto, output)
++
++    def test_interval(self):
++        dto = CibRuleExpressionDto(
++            "my-id",
++            CibRuleExpressionType.RULE,
++            False,
++            {},
++            None,
++            None,
++            [
++                CibRuleExpressionDto(
++                    "my-id-op",
++                    CibRuleExpressionType.OP_EXPRESSION,
++                    False,
++                    {"name": "start", "interval": "2min"},
++                    None,
++                    None,
++                    [],
++                    "op start interval=2min",
++                ),
++            ],
++            "op start interval=2min",
++        )
++        output = dedent(
++            """\
++              Rule: (id:my-id)
++                Expression: op start interval=2min (id:my-id-op)
++            """
++        )
++        self.assert_lines(dto, output)
++
++
++class ResourceExpressionDtoToLines(RuleDtoToLinesMixin, TestCase):
++    def test_success(self):
++        dto = CibRuleExpressionDto(
++            "my-id",
++            CibRuleExpressionType.RULE,
++            False,
++            {},
++            None,
++            None,
++            [
++                CibRuleExpressionDto(
++                    "my-id-expr",
++                    CibRuleExpressionType.RSC_EXPRESSION,
++                    False,
++                    {"class": "ocf", "provider": "pacemaker", "type": "Dummy"},
++                    None,
++                    None,
++                    [],
++                    "resource ocf:pacemaker:Dummy",
++                ),
++            ],
++            "resource ocf:pacemaker:Dummy",
++        )
++        output = dedent(
++            """\
++              Rule: (id:my-id)
++                Expression: resource ocf:pacemaker:Dummy (id:my-id-expr)
++            """
++        )
++        self.assert_lines(dto, output)
++
++
++class RuleDtoToLines(RuleDtoToLinesMixin, TestCase):
++    def test_complex_rule(self):
++        dto = CibRuleExpressionDto(
++            "complex",
++            CibRuleExpressionType.RULE,
++            False,
++            {"boolean-op": "or", "score": "INFINITY"},
++            None,
++            None,
++            [
++                CibRuleExpressionDto(
++                    "complex-rule-1",
++                    CibRuleExpressionType.RULE,
++                    False,
++                    {"boolean-op": "and", "score": "0"},
++                    None,
++                    None,
++                    [
++                        CibRuleExpressionDto(
++                            "complex-rule-1-expr",
++                            CibRuleExpressionType.DATE_EXPRESSION,
++                            False,
++                            {"operation": "date_spec"},
++                            CibRuleDateCommonDto(
++                                "complex-rule-1-expr-datespec",
++                                {"hours": "12-23", "weekdays": "1-5"},
++                            ),
++                            None,
++                            [],
++                            "date-spec hours=12-23 weekdays=1-5",
++                        ),
++                        CibRuleExpressionDto(
++                            "complex-rule-1-expr-1",
++                            CibRuleExpressionType.DATE_EXPRESSION,
++                            False,
++                            {"operation": "in_range", "start": "2014-07-26",},
++                            None,
++                            CibRuleDateCommonDto(
++                                "complex-rule-1-expr-1-durat", {"months": "1"},
++                            ),
++                            [],
++                            "date in_range 2014-07-26 to duration months=1",
++                        ),
++                    ],
++                    "date-spec hours=12-23 weekdays=1-5 and date in_range "
++                    "2014-07-26 to duration months=1",
++                ),
++                CibRuleExpressionDto(
++                    "complex-rule",
++                    CibRuleExpressionType.RULE,
++                    False,
++                    {"boolean-op": "and", "score": "0"},
++                    None,
++                    None,
++                    [
++                        CibRuleExpressionDto(
++                            "complex-rule-expr-1",
++                            CibRuleExpressionType.EXPRESSION,
++                            False,
++                            {
++                                "attribute": "foo",
++                                "operation": "gt",
++                                "type": "version",
++                                "value": "1.2",
++                            },
++                            None,
++                            None,
++                            [],
++                            "foo gt version 1.2",
++                        ),
++                        CibRuleExpressionDto(
++                            "complex-rule-expr",
++                            CibRuleExpressionType.EXPRESSION,
++                            False,
++                            {
++                                "attribute": "#uname",
++                                "operation": "eq",
++                                "value": "node3 4",
++                            },
++                            None,
++                            None,
++                            [],
++                            "#uname eq 'node3 4'",
++                        ),
++                        CibRuleExpressionDto(
++                            "complex-rule-expr-2",
++                            CibRuleExpressionType.EXPRESSION,
++                            False,
++                            {
++                                "attribute": "#uname",
++                                "operation": "eq",
++                                "value": "nodeA",
++                            },
++                            None,
++                            None,
++                            [],
++                            "#uname eq nodeA",
++                        ),
++                    ],
++                    "foo gt version 1.2 and #uname eq 'node3 4' and #uname "
++                    "eq nodeA",
++                ),
++            ],
++            "(date-spec hours=12-23 weekdays=1-5 and date in_range "
++            "2014-07-26 to duration months=1) or (foo gt version 1.2 and "
++            "#uname eq 'node3 4' and #uname eq nodeA)",
++        )
++        output = dedent(
++            """\
++            Rule: boolean-op=or score=INFINITY (id:complex)
++              Rule: boolean-op=and score=0 (id:complex-rule-1)
++                Expression: (id:complex-rule-1-expr)
++                  Date Spec: hours=12-23 weekdays=1-5 (id:complex-rule-1-expr-datespec)
++                Expression: date in_range 2014-07-26 to duration (id:complex-rule-1-expr-1)
++                  Duration: months=1 (id:complex-rule-1-expr-1-durat)
++              Rule: boolean-op=and score=0 (id:complex-rule)
++                Expression: foo gt version 1.2 (id:complex-rule-expr-1)
++                Expression: #uname eq 'node3 4' (id:complex-rule-expr)
++                Expression: #uname eq nodeA (id:complex-rule-expr-2)
++            """
++        )
++        self.assert_lines(dto, output)
+diff --git a/pcs_test/tier0/common/reports/test_messages.py b/pcs_test/tier0/common/reports/test_messages.py
+index 2592bd40..fd217ffb 100644
+--- a/pcs_test/tier0/common/reports/test_messages.py
++++ b/pcs_test/tier0/common/reports/test_messages.py
+@@ -1,16 +1,17 @@
+ from unittest import TestCase
+ 
+ from pcs.common import file_type_codes
+-from pcs.common.file import RawFileError
+-from pcs.common.reports import (
+-    const,
+-    messages as reports,
+-)
+ from pcs.common.fencing_topology import (
+     TARGET_TYPE_NODE,
+     TARGET_TYPE_REGEXP,
+     TARGET_TYPE_ATTRIBUTE,
+ )
++from pcs.common.file import RawFileError
++from pcs.common.reports import (
++    const,
++    messages as reports,
++)
++from pcs.common.types import CibRuleExpressionType
+ 
+ # pylint: disable=too-many-lines
+ 
+@@ -4653,3 +4654,47 @@ class TagIdsNotInTheTag(NameBuildTest):
+             "Tag 'tag-id' does not contain ids: 'a', 'b'",
+             reports.TagIdsNotInTheTag("tag-id", ["b", "a"]),
+         )
++
++
++class RuleExpressionParseError(NameBuildTest):
++    def test_success(self):
++        self.assert_message_from_report(
++            "'resource dummy op monitor' is not a valid rule expression, "
++            "parse error near or after line 1 column 16",
++            reports.RuleExpressionParseError(
++                "resource dummy op monitor",
++                "Expected end of text",
++                "resource dummy op monitor",
++                1,
++                16,
++                15,
++            ),
++        )
++
++
++class RuleExpressionNotAllowed(NameBuildTest):
++    def test_op(self):
++        self.assert_message_from_report(
++            "Keyword 'op' cannot be used in a rule in this command",
++            reports.RuleExpressionNotAllowed(
++                CibRuleExpressionType.OP_EXPRESSION
++            ),
++        )
++
++    def test_rsc(self):
++        self.assert_message_from_report(
++            "Keyword 'resource' cannot be used in a rule in this command",
++            reports.RuleExpressionNotAllowed(
++                CibRuleExpressionType.RSC_EXPRESSION
++            ),
++        )
++
++
++class CibNvsetAmbiguousProvideNvsetId(NameBuildTest):
++    def test_success(self):
++        self.assert_message_from_report(
++            "Several options sets exist, please specify an option set ID",
++            reports.CibNvsetAmbiguousProvideNvsetId(
++                const.PCS_COMMAND_RESOURCE_DEFAULTS_UPDATE
++            ),
++        )
+diff --git a/pcs_test/tier0/common/test_str_tools.py b/pcs_test/tier0/common/test_str_tools.py
+index c4753437..97c1d223 100644
+--- a/pcs_test/tier0/common/test_str_tools.py
++++ b/pcs_test/tier0/common/test_str_tools.py
+@@ -249,6 +249,39 @@ class FormatListCustomLastSeparatort(TestCase):
+         )
+ 
+ 
++class FormatNameValueList(TestCase):
++    def test_empty(self):
++        self.assertEqual([], tools.format_name_value_list([]))
++
++    def test_many(self):
++        self.assertEqual(
++            ["name1=value1", '"name=2"="value 2"', '"name 3"="value=3"'],
++            tools.format_name_value_list(
++                [
++                    ("name1", "value1"),
++                    ("name=2", "value 2"),
++                    ("name 3", "value=3"),
++                ]
++            ),
++        )
++
++
++class Quote(TestCase):
++    def test_no_quote(self):
++        self.assertEqual("string", tools.quote("string", " "))
++        self.assertEqual("string", tools.quote("string", " ="))
++
++    def test_quote(self):
++        self.assertEqual('"str ing"', tools.quote("str ing", " ="))
++        self.assertEqual('"str=ing"', tools.quote("str=ing", " ="))
++
++    def test_alternative_quote(self):
++        self.assertEqual("""'st"r i"ng'""", tools.quote('st"r i"ng', " "))
++
++    def test_escape(self):
++        self.assertEqual('''"st\\"r i'ng"''', tools.quote("st\"r i'ng", " "))
++
++
+ class Transform(TestCase):
+     def test_transform(self):
+         self.assertEqual(
+diff --git a/pcs_test/tier0/lib/commands/cib_options/__init__.py b/pcs_test/tier0/lib/cib/rule/__init__.py
+similarity index 100%
+rename from pcs_test/tier0/lib/commands/cib_options/__init__.py
+rename to pcs_test/tier0/lib/cib/rule/__init__.py
+diff --git a/pcs_test/tier0/lib/cib/rule/test_cib_to_dto.py b/pcs_test/tier0/lib/cib/rule/test_cib_to_dto.py
+new file mode 100644
+index 00000000..ce06c469
+--- /dev/null
++++ b/pcs_test/tier0/lib/cib/rule/test_cib_to_dto.py
+@@ -0,0 +1,593 @@
++from unittest import TestCase
++
++from lxml import etree
++
++from pcs.common.pacemaker.rule import (
++    CibRuleDateCommonDto,
++    CibRuleExpressionDto,
++)
++from pcs.common.types import CibRuleExpressionType
++from pcs.lib.cib.rule import rule_element_to_dto
++
++
++class ExpressionToDto(TestCase):
++    def test_defined(self):
++        xml = etree.fromstring(
++            """
++            <rule id="my-id">
++                <expression id="my-id-expr"
++                    attribute="pingd" operation="defined"
++                />
++            </rule>
++        """
++        )
++        self.assertEqual(
++            rule_element_to_dto(xml),
++            CibRuleExpressionDto(
++                "my-id",
++                CibRuleExpressionType.RULE,
++                False,
++                {},
++                None,
++                None,
++                [
++                    CibRuleExpressionDto(
++                        "my-id-expr",
++                        CibRuleExpressionType.EXPRESSION,
++                        False,
++                        {"attribute": "pingd", "operation": "defined"},
++                        None,
++                        None,
++                        [],
++                        "defined pingd",
++                    ),
++                ],
++                "defined pingd",
++            ),
++        )
++
++    def test_value_comparison(self):
++        xml = etree.fromstring(
++            """
++            <rule id="my-id">
++                <expression id="my-id-expr"
++                    attribute="my-attr" operation="eq" value="my value"
++                />
++            </rule>
++        """
++        )
++        self.assertEqual(
++            rule_element_to_dto(xml),
++            CibRuleExpressionDto(
++                "my-id",
++                CibRuleExpressionType.RULE,
++                False,
++                {},
++                None,
++                None,
++                [
++                    CibRuleExpressionDto(
++                        "my-id-expr",
++                        CibRuleExpressionType.EXPRESSION,
++                        False,
++                        {
++                            "attribute": "my-attr",
++                            "operation": "eq",
++                            "value": "my value",
++                        },
++                        None,
++                        None,
++                        [],
++                        'my-attr eq "my value"',
++                    ),
++                ],
++                'my-attr eq "my value"',
++            ),
++        )
++
++    def test_value_comparison_with_type(self):
++        xml = etree.fromstring(
++            """
++            <rule id="my-id">
++                <expression id="my-id-expr"
++                    attribute="foo" operation="gt" type="version" value="1.2.3"
++                />
++            </rule>
++        """
++        )
++        self.assertEqual(
++            rule_element_to_dto(xml),
++            CibRuleExpressionDto(
++                "my-id",
++                CibRuleExpressionType.RULE,
++                False,
++                {},
++                None,
++                None,
++                [
++                    CibRuleExpressionDto(
++                        "my-id-expr",
++                        CibRuleExpressionType.EXPRESSION,
++                        False,
++                        {
++                            "attribute": "foo",
++                            "operation": "gt",
++                            "type": "version",
++                            "value": "1.2.3",
++                        },
++                        None,
++                        None,
++                        [],
++                        "foo gt version 1.2.3",
++                    ),
++                ],
++                "foo gt version 1.2.3",
++            ),
++        )
++
++
++class DateExpressionToDto(TestCase):
++    def test_gt(self):
++        xml = etree.fromstring(
++            """
++            <rule id="rule">
++                <date_expression id="rule-expr"
++                    operation="gt" start="2014-06-26"
++                />
++            </rule>
++        """
++        )
++        self.assertEqual(
++            rule_element_to_dto(xml),
++            CibRuleExpressionDto(
++                "rule",
++                CibRuleExpressionType.RULE,
++                False,
++                {},
++                None,
++                None,
++                [
++                    CibRuleExpressionDto(
++                        "rule-expr",
++                        CibRuleExpressionType.DATE_EXPRESSION,
++                        False,
++                        {"operation": "gt", "start": "2014-06-26"},
++                        None,
++                        None,
++                        [],
++                        "date gt 2014-06-26",
++                    ),
++                ],
++                "date gt 2014-06-26",
++            ),
++        )
++
++    def test_lt(self):
++        xml = etree.fromstring(
++            """
++            <rule id="rule">
++                <date_expression id="rule-expr"
++                    operation="lt" end="2014-06-26"
++                />
++            </rule>
++        """
++        )
++        self.assertEqual(
++            rule_element_to_dto(xml),
++            CibRuleExpressionDto(
++                "rule",
++                CibRuleExpressionType.RULE,
++                False,
++                {},
++                None,
++                None,
++                [
++                    CibRuleExpressionDto(
++                        "rule-expr",
++                        CibRuleExpressionType.DATE_EXPRESSION,
++                        False,
++                        {"operation": "lt", "end": "2014-06-26"},
++                        None,
++                        None,
++                        [],
++                        "date lt 2014-06-26",
++                    ),
++                ],
++                "date lt 2014-06-26",
++            ),
++        )
++
++    def test_datespec(self):
++        xml = etree.fromstring(
++            """
++            <rule id="rule">
++                <date_expression id="rule-expr" operation="date_spec">
++                    <date_spec id="rule-expr-datespec"
++                        hours="1-14" monthdays="20-30" months="1"
++                    />
++                </date_expression>
++            </rule>
++        """
++        )
++        self.assertEqual(
++            rule_element_to_dto(xml),
++            CibRuleExpressionDto(
++                "rule",
++                CibRuleExpressionType.RULE,
++                False,
++                {},
++                None,
++                None,
++                [
++                    CibRuleExpressionDto(
++                        "rule-expr",
++                        CibRuleExpressionType.DATE_EXPRESSION,
++                        False,
++                        {"operation": "date_spec"},
++                        CibRuleDateCommonDto(
++                            "rule-expr-datespec",
++                            {
++                                "hours": "1-14",
++                                "monthdays": "20-30",
++                                "months": "1",
++                            },
++                        ),
++                        None,
++                        [],
++                        "date-spec hours=1-14 monthdays=20-30 months=1",
++                    ),
++                ],
++                "date-spec hours=1-14 monthdays=20-30 months=1",
++            ),
++        )
++
++    def test_inrange(self):
++        xml = etree.fromstring(
++            """
++            <rule id="rule">
++                <date_expression id="rule-expr"
++                    operation="in_range" start="2014-06-26" end="2014-07-26"
++                />
++            </rule>
++        """
++        )
++        self.assertEqual(
++            rule_element_to_dto(xml),
++            CibRuleExpressionDto(
++                "rule",
++                CibRuleExpressionType.RULE,
++                False,
++                {},
++                None,
++                None,
++                [
++                    CibRuleExpressionDto(
++                        "rule-expr",
++                        CibRuleExpressionType.DATE_EXPRESSION,
++                        False,
++                        {
++                            "operation": "in_range",
++                            "start": "2014-06-26",
++                            "end": "2014-07-26",
++                        },
++                        None,
++                        None,
++                        [],
++                        "date in_range 2014-06-26 to 2014-07-26",
++                    ),
++                ],
++                "date in_range 2014-06-26 to 2014-07-26",
++            ),
++        )
++
++    def test_inrange_duration(self):
++        xml = etree.fromstring(
++            """
++            <rule id="rule">
++                <date_expression id="rule-expr"
++                    operation="in_range" start="2014-06-26"
++                >
++                    <duration id="rule-expr-duration" years="1"/>
++                </date_expression>
++            </rule>
++        """
++        )
++        self.assertEqual(
++            rule_element_to_dto(xml),
++            CibRuleExpressionDto(
++                "rule",
++                CibRuleExpressionType.RULE,
++                False,
++                {},
++                None,
++                None,
++                [
++                    CibRuleExpressionDto(
++                        "rule-expr",
++                        CibRuleExpressionType.DATE_EXPRESSION,
++                        False,
++                        {"operation": "in_range", "start": "2014-06-26",},
++                        None,
++                        CibRuleDateCommonDto(
++                            "rule-expr-duration", {"years": "1"},
++                        ),
++                        [],
++                        "date in_range 2014-06-26 to duration years=1",
++                    ),
++                ],
++                "date in_range 2014-06-26 to duration years=1",
++            ),
++        )
++
++
++class OpExpressionToDto(TestCase):
++    def test_minimal(self):
++        xml = etree.fromstring(
++            """
++            <rule id="my-id">
++                <op_expression id="my-id-op" name="start" />
++            </rule>
++        """
++        )
++        self.assertEqual(
++            rule_element_to_dto(xml),
++            CibRuleExpressionDto(
++                "my-id",
++                CibRuleExpressionType.RULE,
++                False,
++                {},
++                None,
++                None,
++                [
++                    CibRuleExpressionDto(
++                        "my-id-op",
++                        CibRuleExpressionType.OP_EXPRESSION,
++                        False,
++                        {"name": "start"},
++                        None,
++                        None,
++                        [],
++                        "op start",
++                    ),
++                ],
++                "op start",
++            ),
++        )
++
++    def test_interval(self):
++        xml = etree.fromstring(
++            """
++            <rule id="my-id">
++                <op_expression id="my-id-op" name="start" interval="2min" />
++            </rule>
++        """
++        )
++        self.assertEqual(
++            rule_element_to_dto(xml),
++            CibRuleExpressionDto(
++                "my-id",
++                CibRuleExpressionType.RULE,
++                False,
++                {},
++                None,
++                None,
++                [
++                    CibRuleExpressionDto(
++                        "my-id-op",
++                        CibRuleExpressionType.OP_EXPRESSION,
++                        False,
++                        {"name": "start", "interval": "2min"},
++                        None,
++                        None,
++                        [],
++                        "op start interval=2min",
++                    ),
++                ],
++                "op start interval=2min",
++            ),
++        )
++
++
++class ResourceExpressionToDto(TestCase):
++    def test_success(self):
++        test_data = [
++            # ((class, provider, type), output)
++            ((None, None, None), "::"),
++            (("ocf", None, None), "ocf::"),
++            ((None, "pacemaker", None), ":pacemaker:"),
++            ((None, None, "Dummy"), "::Dummy"),
++            (("ocf", "pacemaker", None), "ocf:pacemaker:"),
++            (("ocf", None, "Dummy"), "ocf::Dummy"),
++            ((None, "pacemaker", "Dummy"), ":pacemaker:Dummy"),
++            (("ocf", "pacemaker", "Dummy"), "ocf:pacemaker:Dummy"),
++        ]
++        for in_data, out_data in test_data:
++            with self.subTest(in_data=in_data):
++                attrs = {}
++                if in_data[0] is not None:
++                    attrs["class"] = in_data[0]
++                if in_data[1] is not None:
++                    attrs["provider"] = in_data[1]
++                if in_data[2] is not None:
++                    attrs["type"] = in_data[2]
++                attrs_str = " ".join(
++                    [f"{name}='{value}'" for name, value in attrs.items()]
++                )
++                xml = etree.fromstring(
++                    f"""
++                    <rule id="my-id">
++                        <rsc_expression id="my-id-expr" {attrs_str}/>
++                    </rule>
++                """
++                )
++                self.assertEqual(
++                    rule_element_to_dto(xml),
++                    CibRuleExpressionDto(
++                        "my-id",
++                        CibRuleExpressionType.RULE,
++                        False,
++                        {},
++                        None,
++                        None,
++                        [
++                            CibRuleExpressionDto(
++                                "my-id-expr",
++                                CibRuleExpressionType.RSC_EXPRESSION,
++                                False,
++                                attrs,
++                                None,
++                                None,
++                                [],
++                                f"resource {out_data}",
++                            ),
++                        ],
++                        f"resource {out_data}",
++                    ),
++                )
++
++
++class RuleToDto(TestCase):
++    def test_complex_rule(self):
++        xml = etree.fromstring(
++            """
++            <rule id="complex" boolean-op="or" score="INFINITY">
++                <rule id="complex-rule-1" boolean-op="and" score="0">
++                    <date_expression id="complex-rule-1-expr"
++                        operation="date_spec"
++                    >
++                        <date_spec id="complex-rule-1-expr-datespec"
++                            weekdays="1-5" hours="12-23"
++                        />
++                    </date_expression>
++                    <date_expression id="complex-rule-1-expr-1"
++                        operation="in_range" start="2014-07-26"
++                    >
++                        <duration id="complex-rule-1-expr-1-durat" months="1"/>
++                    </date_expression>
++                </rule>
++                <rule id="complex-rule" boolean-op="and" score="0">
++                    <expression id="complex-rule-expr-1"
++                        attribute="foo" operation="gt" type="version" value="1.2"
++                    />
++                    <expression id="complex-rule-expr"
++                        attribute="#uname" operation="eq" value="node3 4"
++                    />
++                    <expression id="complex-rule-expr-2"
++                        attribute="#uname" operation="eq" value="nodeA"
++                    />
++                </rule>
++            </rule>
++        """
++        )
++        self.assertEqual(
++            rule_element_to_dto(xml),
++            CibRuleExpressionDto(
++                "complex",
++                CibRuleExpressionType.RULE,
++                False,
++                {"boolean-op": "or", "score": "INFINITY"},
++                None,
++                None,
++                [
++                    CibRuleExpressionDto(
++                        "complex-rule-1",
++                        CibRuleExpressionType.RULE,
++                        False,
++                        {"boolean-op": "and", "score": "0"},
++                        None,
++                        None,
++                        [
++                            CibRuleExpressionDto(
++                                "complex-rule-1-expr",
++                                CibRuleExpressionType.DATE_EXPRESSION,
++                                False,
++                                {"operation": "date_spec"},
++                                CibRuleDateCommonDto(
++                                    "complex-rule-1-expr-datespec",
++                                    {"hours": "12-23", "weekdays": "1-5"},
++                                ),
++                                None,
++                                [],
++                                "date-spec hours=12-23 weekdays=1-5",
++                            ),
++                            CibRuleExpressionDto(
++                                "complex-rule-1-expr-1",
++                                CibRuleExpressionType.DATE_EXPRESSION,
++                                False,
++                                {
++                                    "operation": "in_range",
++                                    "start": "2014-07-26",
++                                },
++                                None,
++                                CibRuleDateCommonDto(
++                                    "complex-rule-1-expr-1-durat",
++                                    {"months": "1"},
++                                ),
++                                [],
++                                "date in_range 2014-07-26 to duration months=1",
++                            ),
++                        ],
++                        "date-spec hours=12-23 weekdays=1-5 and date in_range "
++                        "2014-07-26 to duration months=1",
++                    ),
++                    CibRuleExpressionDto(
++                        "complex-rule",
++                        CibRuleExpressionType.RULE,
++                        False,
++                        {"boolean-op": "and", "score": "0"},
++                        None,
++                        None,
++                        [
++                            CibRuleExpressionDto(
++                                "complex-rule-expr-1",
++                                CibRuleExpressionType.EXPRESSION,
++                                False,
++                                {
++                                    "attribute": "foo",
++                                    "operation": "gt",
++                                    "type": "version",
++                                    "value": "1.2",
++                                },
++                                None,
++                                None,
++                                [],
++                                "foo gt version 1.2",
++                            ),
++                            CibRuleExpressionDto(
++                                "complex-rule-expr",
++                                CibRuleExpressionType.EXPRESSION,
++                                False,
++                                {
++                                    "attribute": "#uname",
++                                    "operation": "eq",
++                                    "value": "node3 4",
++                                },
++                                None,
++                                None,
++                                [],
++                                '#uname eq "node3 4"',
++                            ),
++                            CibRuleExpressionDto(
++                                "complex-rule-expr-2",
++                                CibRuleExpressionType.EXPRESSION,
++                                False,
++                                {
++                                    "attribute": "#uname",
++                                    "operation": "eq",
++                                    "value": "nodeA",
++                                },
++                                None,
++                                None,
++                                [],
++                                "#uname eq nodeA",
++                            ),
++                        ],
++                        'foo gt version 1.2 and #uname eq "node3 4" and #uname '
++                        "eq nodeA",
++                    ),
++                ],
++                "(date-spec hours=12-23 weekdays=1-5 and date in_range "
++                "2014-07-26 to duration months=1) or (foo gt version 1.2 and "
++                '#uname eq "node3 4" and #uname eq nodeA)',
++            ),
++        )
+diff --git a/pcs_test/tier0/lib/cib/rule/test_parsed_to_cib.py b/pcs_test/tier0/lib/cib/rule/test_parsed_to_cib.py
+new file mode 100644
+index 00000000..f61fce99
+--- /dev/null
++++ b/pcs_test/tier0/lib/cib/rule/test_parsed_to_cib.py
+@@ -0,0 +1,214 @@
++from unittest import TestCase
++
++from lxml import etree
++
++from pcs_test.tools.assertions import assert_xml_equal
++from pcs_test.tools.xml import etree_to_str
++
++from pcs.lib.cib import rule
++from pcs.lib.cib.rule.expression_part import (
++    BOOL_AND,
++    BOOL_OR,
++    BoolExpr,
++    OpExpr,
++    RscExpr,
++)
++from pcs.lib.cib.tools import IdProvider
++
++
++class Base(TestCase):
++    @staticmethod
++    def assert_cib(tree, expected_xml):
++        xml = etree.fromstring('<root id="X"/>')
++        rule.rule_to_cib(xml, IdProvider(xml), tree)
++        assert_xml_equal(
++            '<root id="X">' + expected_xml + "</root>", etree_to_str(xml)
++        )
++
++
++class SimpleBool(Base):
++    def test_no_children(self):
++        self.assert_cib(
++            BoolExpr(BOOL_AND, []),
++            """
++                <rule id="X-rule" boolean-op="and" score="INFINITY" />
++            """,
++        )
++
++    def test_one_child(self):
++        self.assert_cib(
++            BoolExpr(BOOL_AND, [OpExpr("start", None)]),
++            """
++                <rule id="X-rule" boolean-op="and" score="INFINITY">
++                    <op_expression id="X-rule-op-start" name="start" />
++                </rule>
++            """,
++        )
++
++    def test_two_children(self):
++        operators = [
++            (BOOL_OR, "or"),
++            (BOOL_AND, "and"),
++        ]
++        for op_in, op_out in operators:
++            with self.subTest(op_in=op_in, op_out=op_out):
++                self.assert_cib(
++                    BoolExpr(
++                        op_in,
++                        [
++                            OpExpr("start", None),
++                            RscExpr("systemd", None, "pcsd"),
++                        ],
++                    ),
++                    f"""
++                        <rule id="X-rule" boolean-op="{op_out}" score="INFINITY">
++                            <op_expression id="X-rule-op-start" name="start" />
++                            <rsc_expression id="X-rule-rsc-systemd-pcsd"
++                                class="systemd" type="pcsd"
++                            />
++                        </rule>
++                    """,
++                )
++
++
++class SimpleOp(Base):
++    def test_minimal(self):
++        self.assert_cib(
++            OpExpr("start", None),
++            """
++                <op_expression id="X-op-start" name="start" />
++            """,
++        )
++
++    def test_interval(self):
++        self.assert_cib(
++            OpExpr("monitor", "2min"),
++            """
++                <op_expression id="X-op-monitor" name="monitor"
++                    interval="2min"
++                />
++            """,
++        )
++
++
++class SimpleRsc(Base):
++    def test_class(self):
++        self.assert_cib(
++            RscExpr("ocf", None, None),
++            """
++                <rsc_expression id="X-rsc-ocf" class="ocf" />
++            """,
++        )
++
++    def test_provider(self):
++        self.assert_cib(
++            RscExpr(None, "pacemaker", None),
++            """
++                <rsc_expression id="X-rsc-pacemaker" provider="pacemaker" />
++            """,
++        )
++
++    def type(self):
++        self.assert_cib(
++            RscExpr(None, None, "Dummy"),
++            """
++                <rsc_expression id="X-rsc-Dummy" type="Dummy" />
++            """,
++        )
++
++    def test_provider_type(self):
++        self.assert_cib(
++            RscExpr(None, "pacemaker", "Dummy"),
++            """
++                <rsc_expression id="X-rsc-pacemaker-Dummy"
++                    provider="pacemaker" type="Dummy"
++                />
++            """,
++        )
++
++    def test_class_provider(self):
++        self.assert_cib(
++            RscExpr("ocf", "pacemaker", None),
++            """
++                <rsc_expression id="X-rsc-ocf-pacemaker"
++                    class="ocf" provider="pacemaker"
++                />
++            """,
++        )
++
++    def test_class_type(self):
++        self.assert_cib(
++            RscExpr("systemd", None, "pcsd"),
++            """
++                <rsc_expression id="X-rsc-systemd-pcsd"
++                    class="systemd" type="pcsd"
++                />
++            """,
++        )
++
++    def test_class_provider_type(self):
++        self.assert_cib(
++            RscExpr("ocf", "pacemaker", "Dummy"),
++            """
++                <rsc_expression id="X-rsc-ocf-pacemaker-Dummy"
++                    class="ocf" provider="pacemaker" type="Dummy"
++                />
++            """,
++        )
++
++
++class Complex(Base):
++    def test_expr_1(self):
++        self.assert_cib(
++            BoolExpr(
++                BOOL_AND,
++                [
++                    BoolExpr(
++                        BOOL_OR,
++                        [
++                            RscExpr("ocf", "pacemaker", "Dummy"),
++                            OpExpr("start", None),
++                            RscExpr("systemd", None, "pcsd"),
++                            RscExpr("ocf", "heartbeat", "Dummy"),
++                        ],
++                    ),
++                    BoolExpr(
++                        BOOL_OR,
++                        [
++                            OpExpr("monitor", "30s"),
++                            RscExpr("ocf", "pacemaker", "Dummy"),
++                            OpExpr("start", None),
++                            OpExpr("monitor", "2min"),
++                        ],
++                    ),
++                ],
++            ),
++            """
++                <rule id="X-rule" boolean-op="and" score="INFINITY">
++                  <rule id="X-rule-rule" boolean-op="or">
++                    <rsc_expression id="X-rule-rule-rsc-ocf-pacemaker-Dummy"
++                        class="ocf" provider="pacemaker" type="Dummy"
++                    />
++                    <op_expression id="X-rule-rule-op-start" name="start" />
++                    <rsc_expression id="X-rule-rule-rsc-systemd-pcsd"
++                        class="systemd" type="pcsd"
++                    />
++                    <rsc_expression id="X-rule-rule-rsc-ocf-heartbeat-Dummy"
++                        class="ocf" provider="heartbeat" type="Dummy"
++                    />
++                  </rule>
++                  <rule id="X-rule-rule-1" boolean-op="or">
++                    <op_expression id="X-rule-rule-1-op-monitor"
++                        name="monitor" interval="30s"
++                    />
++                    <rsc_expression id="X-rule-rule-1-rsc-ocf-pacemaker-Dummy"
++                        class="ocf" provider="pacemaker" type="Dummy"
++                    />
++                    <op_expression id="X-rule-rule-1-op-start" name="start" />
++                    <op_expression id="X-rule-rule-1-op-monitor-1"
++                        name="monitor" interval="2min"
++                    />
++                  </rule>
++                </rule>
++            """,
++        )
+diff --git a/pcs_test/tier0/lib/cib/rule/test_parser.py b/pcs_test/tier0/lib/cib/rule/test_parser.py
+new file mode 100644
+index 00000000..110fc739
+--- /dev/null
++++ b/pcs_test/tier0/lib/cib/rule/test_parser.py
+@@ -0,0 +1,270 @@
++from dataclasses import fields
++from textwrap import dedent
++from unittest import TestCase
++
++from pcs.common.str_tools import indent
++from pcs.lib.cib import rule
++from pcs.lib.cib.rule.expression_part import BoolExpr
++
++
++def _parsed_to_str(parsed):
++    if isinstance(parsed, BoolExpr):
++        str_args = []
++        for arg in parsed.children:
++            str_args.extend(_parsed_to_str(arg).splitlines())
++        return "\n".join(
++            [f"{parsed.__class__.__name__} {parsed.operator}"]
++            + indent(str_args)
++        )
++
++    parts = [parsed.__class__.__name__]
++    for field in fields(parsed):
++        value = getattr(parsed, field.name)
++        if value is not None:
++            parts.append(f"{field.name}={value}")
++    return " ".join(parts)
++
++
++class Parser(TestCase):
++    def test_success_parse_to_tree(self):
++        test_data = [
++            ("", "BoolExpr AND"),
++            (
++                "resource ::",
++                dedent(
++                    """\
++                    BoolExpr AND
++                      RscExpr"""
++                ),
++            ),
++            (
++                "resource ::dummy",
++                dedent(
++                    """\
++                    BoolExpr AND
++                      RscExpr type=dummy"""
++                ),
++            ),
++            (
++                "resource ocf::",
++                dedent(
++                    """\
++                    BoolExpr AND
++                      RscExpr standard=ocf"""
++                ),
++            ),
++            (
++                "resource :pacemaker:",
++                dedent(
++                    """\
++                    BoolExpr AND
++                      RscExpr provider=pacemaker"""
++                ),
++            ),
++            (
++                "resource systemd::Dummy",
++                dedent(
++                    """\
++                    BoolExpr AND
++                      RscExpr standard=systemd type=Dummy"""
++                ),
++            ),
++            (
++                "resource ocf:pacemaker:",
++                dedent(
++                    """\
++                    BoolExpr AND
++                      RscExpr standard=ocf provider=pacemaker"""
++                ),
++            ),
++            (
++                "resource :pacemaker:Dummy",
++                dedent(
++                    """\
++                    BoolExpr AND
++                      RscExpr provider=pacemaker type=Dummy"""
++                ),
++            ),
++            (
++                "resource ocf:pacemaker:Dummy",
++                dedent(
++                    """\
++                    BoolExpr AND
++                      RscExpr standard=ocf provider=pacemaker type=Dummy"""
++                ),
++            ),
++            (
++                "op monitor",
++                dedent(
++                    """\
++                    BoolExpr AND
++                      OpExpr name=monitor"""
++                ),
++            ),
++            (
++                "op monitor interval=10",
++                dedent(
++                    """\
++                    BoolExpr AND
++                      OpExpr name=monitor interval=10"""
++                ),
++            ),
++            (
++                "resource ::dummy and op monitor",
++                dedent(
++                    """\
++                    BoolExpr AND
++                      RscExpr type=dummy
++                      OpExpr name=monitor"""
++                ),
++            ),
++            (
++                "resource ::dummy or op monitor interval=15s",
++                dedent(
++                    """\
++                    BoolExpr OR
++                      RscExpr type=dummy
++                      OpExpr name=monitor interval=15s"""
++                ),
++            ),
++            (
++                "op monitor and resource ::dummy",
++                dedent(
++                    """\
++                    BoolExpr AND
++                      OpExpr name=monitor
++                      RscExpr type=dummy"""
++                ),
++            ),
++            (
++                "op monitor interval=5min or resource ::dummy",
++                dedent(
++                    """\
++                    BoolExpr OR
++                      OpExpr name=monitor interval=5min
++                      RscExpr type=dummy"""
++                ),
++            ),
++            (
++                "(resource ::dummy or resource ::delay) and op monitor",
++                dedent(
++                    """\
++                    BoolExpr AND
++                      BoolExpr OR
++                        RscExpr type=dummy
++                        RscExpr type=delay
++                      OpExpr name=monitor"""
++                ),
++            ),
++            (
++                "(op start and op stop) or resource ::dummy",
++                dedent(
++                    """\
++                    BoolExpr OR
++                      BoolExpr AND
++                        OpExpr name=start
++                        OpExpr name=stop
++                      RscExpr type=dummy"""
++                ),
++            ),
++            (
++                "op monitor or (resource ::dummy and resource ::delay)",
++                dedent(
++                    """\
++                    BoolExpr OR
++                      OpExpr name=monitor
++                      BoolExpr AND
++                        RscExpr type=dummy
++                        RscExpr type=delay"""
++                ),
++            ),
++            (
++                "resource ::dummy and (op start or op stop)",
++                dedent(
++                    """\
++                    BoolExpr AND
++                      RscExpr type=dummy
++                      BoolExpr OR
++                        OpExpr name=start
++                        OpExpr name=stop"""
++                ),
++            ),
++            (
++                "resource ::dummy and resource ::delay and op monitor",
++                dedent(
++                    """\
++                    BoolExpr AND
++                      RscExpr type=dummy
++                      RscExpr type=delay
++                      OpExpr name=monitor"""
++                ),
++            ),
++            (
++                "resource ::rA or resource ::rB or resource ::rC and op monitor",
++                dedent(
++                    """\
++                    BoolExpr AND
++                      BoolExpr OR
++                        RscExpr type=rA
++                        RscExpr type=rB
++                        RscExpr type=rC
++                      OpExpr name=monitor"""
++                ),
++            ),
++            (
++                "op start and op stop and op monitor or resource ::delay",
++                dedent(
++                    """\
++                    BoolExpr OR
++                      BoolExpr AND
++                        OpExpr name=start
++                        OpExpr name=stop
++                        OpExpr name=monitor
++                      RscExpr type=delay"""
++                ),
++            ),
++            (
++                "(resource ::rA or resource ::rB or resource ::rC) and (op oX or op oY or op oZ)",
++                dedent(
++                    """\
++                    BoolExpr AND
++                      BoolExpr OR
++                        RscExpr type=rA
++                        RscExpr type=rB
++                        RscExpr type=rC
++                      BoolExpr OR
++                        OpExpr name=oX
++                        OpExpr name=oY
++                        OpExpr name=oZ"""
++                ),
++            ),
++        ]
++        for rule_string, rule_tree in test_data:
++            with self.subTest(rule_string=rule_string):
++                self.assertEqual(
++                    rule_tree,
++                    _parsed_to_str(
++                        rule.parse_rule(
++                            rule_string, allow_rsc_expr=True, allow_op_expr=True
++                        )
++                    ),
++                )
++
++    def test_not_valid_rule(self):
++        test_data = [
++            ("resource", (1, 9, 8, "Expected <resource name>")),
++            ("op", (1, 3, 2, "Expected <operation name>")),
++            ("resource ::rA and", (1, 15, 14, "Expected end of text")),
++            ("resource ::rA and op ", (1, 15, 14, "Expected end of text")),
++            ("resource ::rA and (", (1, 15, 14, "Expected end of text")),
++        ]
++
++        for rule_string, exception_data in test_data:
++            with self.subTest(rule_string=rule_string):
++                with self.assertRaises(rule.RuleParseError) as cm:
++                    rule.parse_rule(
++                        rule_string, allow_rsc_expr=True, allow_op_expr=True
++                    )
++            e = cm.exception
++            self.assertEqual(exception_data, (e.lineno, e.colno, e.pos, e.msg))
++            self.assertEqual(rule_string, e.rule_string)
+diff --git a/pcs_test/tier0/lib/cib/rule/test_validator.py b/pcs_test/tier0/lib/cib/rule/test_validator.py
+new file mode 100644
+index 00000000..95344a4a
+--- /dev/null
++++ b/pcs_test/tier0/lib/cib/rule/test_validator.py
+@@ -0,0 +1,68 @@
++from unittest import TestCase
++
++from pcs_test.tools import fixture
++from pcs_test.tools.assertions import assert_report_item_list_equal
++
++from pcs.common import reports
++from pcs.common.types import CibRuleExpressionType
++from pcs.lib.cib.rule.expression_part import (
++    BOOL_AND,
++    BOOL_OR,
++    BoolExpr,
++    OpExpr,
++    RscExpr,
++)
++from pcs.lib.cib.rule.validator import Validator
++
++
++class ValidatorTest(TestCase):
++    def setUp(self):
++        self.report_op = fixture.error(
++            reports.codes.RULE_EXPRESSION_NOT_ALLOWED,
++            expression_type=CibRuleExpressionType.OP_EXPRESSION,
++        )
++        self.report_rsc = fixture.error(
++            reports.codes.RULE_EXPRESSION_NOT_ALLOWED,
++            expression_type=CibRuleExpressionType.RSC_EXPRESSION,
++        )
++        self.rule_rsc = BoolExpr(
++            BOOL_OR, [RscExpr(None, None, "a"), RscExpr(None, None, "b")]
++        )
++        self.rule_op = BoolExpr(
++            BOOL_OR, [OpExpr("start", None), OpExpr("stop", None)]
++        )
++        self.rule = BoolExpr(BOOL_AND, [self.rule_rsc, self.rule_op])
++
++    def test_complex_rule(self):
++        test_data = (
++            (True, True, []),
++            (True, False, [self.report_rsc]),
++            (False, True, [self.report_op]),
++            (False, False, [self.report_rsc, self.report_op]),
++        )
++        for op_allowed, rsc_allowed, report_list in test_data:
++            with self.subTest(op_allowed=op_allowed, rsc_allowed=rsc_allowed):
++                assert_report_item_list_equal(
++                    Validator(
++                        self.rule,
++                        allow_rsc_expr=rsc_allowed,
++                        allow_op_expr=op_allowed,
++                    ).get_reports(),
++                    report_list,
++                )
++
++    def test_disallow_missing_op(self):
++        assert_report_item_list_equal(
++            Validator(
++                self.rule_rsc, allow_rsc_expr=True, allow_op_expr=False
++            ).get_reports(),
++            [],
++        )
++
++    def test_disallow_missing_rsc(self):
++        assert_report_item_list_equal(
++            Validator(
++                self.rule_op, allow_rsc_expr=False, allow_op_expr=True
++            ).get_reports(),
++            [],
++        )
+diff --git a/pcs_test/tier0/lib/cib/test_nvpair_multi.py b/pcs_test/tier0/lib/cib/test_nvpair_multi.py
+new file mode 100644
+index 00000000..c68c7233
+--- /dev/null
++++ b/pcs_test/tier0/lib/cib/test_nvpair_multi.py
+@@ -0,0 +1,513 @@
++from unittest import TestCase
++
++from lxml import etree
++
++from pcs_test.tools import fixture
++from pcs_test.tools.assertions import (
++    assert_report_item_list_equal,
++    assert_xml_equal,
++)
++from pcs_test.tools.xml import etree_to_str
++
++from pcs.common import reports
++from pcs.common.pacemaker.nvset import (
++    CibNvpairDto,
++    CibNvsetDto,
++)
++from pcs.common.pacemaker.rule import CibRuleExpressionDto
++from pcs.common.types import (
++    CibNvsetType,
++    CibRuleExpressionType,
++)
++from pcs.lib.cib import nvpair_multi
++from pcs.lib.cib.rule.expression_part import (
++    BOOL_AND,
++    BoolExpr,
++    OpExpr,
++    RscExpr,
++)
++from pcs.lib.cib.tools import IdProvider
++
++
++class NvpairElementToDto(TestCase):
++    def test_success(self):
++        xml = etree.fromstring(
++            """
++            <nvpair id="my-id" name="my-name" value="my-value" />
++        """
++        )
++        self.assertEqual(
++            nvpair_multi.nvpair_element_to_dto(xml),
++            CibNvpairDto("my-id", "my-name", "my-value"),
++        )
++
++
++class NvsetElementToDto(TestCase):
++    tag_type = (
++        ("meta_attributes", CibNvsetType.META),
++        ("instance_attributes", CibNvsetType.INSTANCE),
++    )
++
++    def test_minimal(self):
++        for tag, nvtype in self.tag_type:
++            with self.subTest(tag=tag, nvset_type=nvtype):
++                xml = etree.fromstring(f"""<{tag} id="my-id" />""")
++                self.assertEqual(
++                    nvpair_multi.nvset_element_to_dto(xml),
++                    CibNvsetDto("my-id", nvtype, {}, None, []),
++                )
++
++    def test_full(self):
++        for tag, nvtype in self.tag_type:
++            with self.subTest(tag=tag, nvset_type=nvtype):
++                xml = etree.fromstring(
++                    f"""
++                    <{tag} id="my-id" score="150">
++                        <rule id="my-id-rule" boolean-op="or">
++                            <op_expression id="my-id-rule-op" name="monitor" />
++                        </rule>
++                        <nvpair id="my-id-pair1" name="name1" value="value1" />
++                        <nvpair id="my-id-pair2" name="name2" value="value2" />
++                    </{tag}>
++                """
++                )
++                self.assertEqual(
++                    nvpair_multi.nvset_element_to_dto(xml),
++                    CibNvsetDto(
++                        "my-id",
++                        nvtype,
++                        {"score": "150"},
++                        CibRuleExpressionDto(
++                            "my-id-rule",
++                            CibRuleExpressionType.RULE,
++                            False,
++                            {"boolean-op": "or"},
++                            None,
++                            None,
++                            [
++                                CibRuleExpressionDto(
++                                    "my-id-rule-op",
++                                    CibRuleExpressionType.OP_EXPRESSION,
++                                    False,
++                                    {"name": "monitor"},
++                                    None,
++                                    None,
++                                    [],
++                                    "op monitor",
++                                ),
++                            ],
++                            "op monitor",
++                        ),
++                        [
++                            CibNvpairDto("my-id-pair1", "name1", "value1"),
++                            CibNvpairDto("my-id-pair2", "name2", "value2"),
++                        ],
++                    ),
++                )
++
++
++class FindNvsets(TestCase):
++    def test_empty(self):
++        xml = etree.fromstring("<parent />")
++        self.assertEqual([], nvpair_multi.find_nvsets(xml))
++
++    def test_full(self):
++        xml = etree.fromstring(
++            """
++            <parent>
++                <meta_attributes id="set1" />
++                <instance_attributes id="set2" />
++                <not_an_nvset id="set3" />
++            </parent>
++        """
++        )
++        self.assertEqual(
++            ["set1", "set2"],
++            [el.get("id") for el in nvpair_multi.find_nvsets(xml)],
++        )
++
++
++class FindNvsetsByIds(TestCase):
++    def test_success(self):
++        xml = etree.fromstring(
++            """
++            <parent>
++                <meta_attributes id="set1" />
++                <instance_attributes id="set2" />
++                <not_an_nvset id="set3" />
++                <meta_attributes id="set4" />
++            </parent>
++        """
++        )
++        element_list, report_list = nvpair_multi.find_nvsets_by_ids(
++            xml, ["set1", "set2", "set3", "setX"]
++        )
++        self.assertEqual(
++            ["set1", "set2"], [el.get("id") for el in element_list],
++        )
++        assert_report_item_list_equal(
++            report_list,
++            [
++                fixture.report_unexpected_element(
++                    "set3", "not_an_nvset", ["options set"]
++                ),
++                fixture.report_not_found(
++                    "setX",
++                    context_type="parent",
++                    expected_types=["options set"],
++                ),
++            ],
++        )
++
++
++class ValidateNvsetAppendNew(TestCase):
++    def setUp(self):
++        self.id_provider = IdProvider(
++            etree.fromstring("""<cib><tags><tag id="a" /></tags></cib>""")
++        )
++
++    def test_success_minimal(self):
++        validator = nvpair_multi.ValidateNvsetAppendNew(
++            self.id_provider, {}, {}
++        )
++        assert_report_item_list_equal(
++            validator.validate(force_options=True), []
++        )
++        self.assertIsNone(validator.get_parsed_rule())
++
++    def test_success_full(self):
++        validator = nvpair_multi.ValidateNvsetAppendNew(
++            self.id_provider,
++            {"name": "value"},
++            {"id": "some-id", "score": "10"},
++            nvset_rule="resource ::stateful",
++            rule_allows_rsc_expr=True,
++            rule_allows_op_expr=True,
++        )
++        assert_report_item_list_equal(
++            validator.validate(), [],
++        )
++        self.assertEqual(
++            repr(validator.get_parsed_rule()),
++            "BoolExpr(operator='AND', children=["
++            "RscExpr(standard=None, provider=None, type='stateful')"
++            "])",
++        )
++
++    def test_id_not_valid(self):
++        validator = nvpair_multi.ValidateNvsetAppendNew(
++            self.id_provider, {}, {"id": "123"}
++        )
++        assert_report_item_list_equal(
++            validator.validate(force_options=True),
++            [fixture.report_invalid_id("123", "1")],
++        )
++        self.assertIsNone(validator.get_parsed_rule())
++
++    def test_id_not_available(self):
++        validator = nvpair_multi.ValidateNvsetAppendNew(
++            self.id_provider, {}, {"id": "a"}
++        )
++        assert_report_item_list_equal(
++            validator.validate(force_options=True),
++            [fixture.error(reports.codes.ID_ALREADY_EXISTS, id="a")],
++        )
++        self.assertIsNone(validator.get_parsed_rule())
++
++    def test_score_not_valid(self):
++        validator = nvpair_multi.ValidateNvsetAppendNew(
++            self.id_provider, {}, {"score": "a"}
++        )
++        assert_report_item_list_equal(
++            validator.validate(force_options=True),
++            [fixture.error(reports.codes.INVALID_SCORE, score="a")],
++        )
++        self.assertIsNone(validator.get_parsed_rule())
++
++    def test_options_names(self):
++        validator = nvpair_multi.ValidateNvsetAppendNew(
++            self.id_provider, {}, {"not_valid": "a"}
++        )
++        assert_report_item_list_equal(
++            validator.validate(),
++            [
++                fixture.error(
++                    reports.codes.INVALID_OPTIONS,
++                    force_code=reports.codes.FORCE_OPTIONS,
++                    option_names=["not_valid"],
++                    allowed=["id", "score"],
++                    option_type=None,
++                    allowed_patterns=[],
++                ),
++            ],
++        )
++        self.assertIsNone(validator.get_parsed_rule())
++
++    def test_options_names_forced(self):
++        validator = nvpair_multi.ValidateNvsetAppendNew(
++            self.id_provider, {}, {"not_valid": "a"}
++        )
++        assert_report_item_list_equal(
++            validator.validate(force_options=True),
++            [
++                fixture.warn(
++                    reports.codes.INVALID_OPTIONS,
++                    option_names=["not_valid"],
++                    allowed=["id", "score"],
++                    option_type=None,
++                    allowed_patterns=[],
++                ),
++            ],
++        )
++        self.assertIsNone(validator.get_parsed_rule())
++
++    def test_rule_not_valid(self):
++        validator = nvpair_multi.ValidateNvsetAppendNew(
++            self.id_provider,
++            {},
++            {},
++            "bad rule",
++            rule_allows_rsc_expr=True,
++            rule_allows_op_expr=True,
++        )
++        assert_report_item_list_equal(
++            validator.validate(force_options=True),
++            [
++                fixture.error(
++                    reports.codes.RULE_EXPRESSION_PARSE_ERROR,
++                    rule_string="bad rule",
++                    reason='Expected "resource"',
++                    rule_line="bad rule",
++                    line_number=1,
++                    column_number=1,
++                    position=0,
++                ),
++            ],
++        )
++        self.assertIsNone(validator.get_parsed_rule())
++
++
++class NvsetAppendNew(TestCase):
++    # pylint: disable=no-self-use
++    def test_minimal(self):
++        context_element = etree.fromstring("""<context id="a" />""")
++        id_provider = IdProvider(context_element)
++        nvpair_multi.nvset_append_new(
++            context_element, id_provider, nvpair_multi.NVSET_META, {}, {}
++        )
++        assert_xml_equal(
++            """
++                <context id="a">
++                    <meta_attributes id="a-meta_attributes" />
++                </context>
++            """,
++            etree_to_str(context_element),
++        )
++
++    def test_nvpairs(self):
++        context_element = etree.fromstring("""<context id="a" />""")
++        id_provider = IdProvider(context_element)
++        nvpair_multi.nvset_append_new(
++            context_element,
++            id_provider,
++            nvpair_multi.NVSET_META,
++            {"attr1": "value1", "attr-empty": "", "attr2": "value2"},
++            {},
++        )
++        assert_xml_equal(
++            """
++                <context id="a">
++                    <meta_attributes id="a-meta_attributes">
++                        <nvpair id="a-meta_attributes-attr1"
++                            name="attr1" value="value1"
++                        />
++                        <nvpair id="a-meta_attributes-attr2"
++                            name="attr2" value="value2"
++                        />
++                    </meta_attributes>
++                </context>
++            """,
++            etree_to_str(context_element),
++        )
++
++    def test_rule(self):
++        context_element = etree.fromstring("""<context id="a" />""")
++        id_provider = IdProvider(context_element)
++        nvpair_multi.nvset_append_new(
++            context_element,
++            id_provider,
++            nvpair_multi.NVSET_META,
++            {},
++            {},
++            nvset_rule=BoolExpr(
++                BOOL_AND,
++                [RscExpr("ocf", "pacemaker", "Dummy"), OpExpr("start", None)],
++            ),
++        )
++        assert_xml_equal(
++            """
++                <context id="a">
++                    <meta_attributes id="a-meta_attributes">
++                        <rule id="a-meta_attributes-rule"
++                            boolean-op="and" score="INFINITY"
++                        >
++                            <rsc_expression
++                                id="a-meta_attributes-rule-rsc-ocf-pacemaker-Dummy"
++                                class="ocf" provider="pacemaker" type="Dummy"
++                            />
++                            <op_expression id="a-meta_attributes-rule-op-start" 
++                                name="start"
++                            />
++                        </rule>
++                    </meta_attributes>
++                </context>
++            """,
++            etree_to_str(context_element),
++        )
++
++    def test_custom_id(self):
++        context_element = etree.fromstring("""<context id="a" />""")
++        id_provider = IdProvider(context_element)
++        nvpair_multi.nvset_append_new(
++            context_element,
++            id_provider,
++            nvpair_multi.NVSET_META,
++            {},
++            {"id": "custom-id"},
++        )
++        assert_xml_equal(
++            """
++                <context id="a">
++                    <meta_attributes id="custom-id" />
++                </context>
++            """,
++            etree_to_str(context_element),
++        )
++
++    def test_options(self):
++        context_element = etree.fromstring("""<context id="a" />""")
++        id_provider = IdProvider(context_element)
++        nvpair_multi.nvset_append_new(
++            context_element,
++            id_provider,
++            nvpair_multi.NVSET_META,
++            {},
++            {"score": "INFINITY", "empty-attr": ""},
++        )
++        assert_xml_equal(
++            """
++                <context id="a">
++                    <meta_attributes id="a-meta_attributes" score="INFINITY" />
++                </context>
++            """,
++            etree_to_str(context_element),
++        )
++
++    def test_everything(self):
++        context_element = etree.fromstring("""<context id="a" />""")
++        id_provider = IdProvider(context_element)
++        nvpair_multi.nvset_append_new(
++            context_element,
++            id_provider,
++            nvpair_multi.NVSET_META,
++            {"attr1": "value1", "attr-empty": "", "attr2": "value2"},
++            {"id": "custom-id", "score": "INFINITY", "empty-attr": ""},
++            nvset_rule=BoolExpr(
++                BOOL_AND,
++                [RscExpr("ocf", "pacemaker", "Dummy"), OpExpr("start", None)],
++            ),
++        )
++        assert_xml_equal(
++            """
++                <context id="a">
++                    <meta_attributes id="custom-id" score="INFINITY">
++                        <rule id="custom-id-rule"
++                            boolean-op="and" score="INFINITY"
++                        >
++                            <rsc_expression id="custom-id-rule-rsc-ocf-pacemaker-Dummy"
++                                class="ocf" provider="pacemaker" type="Dummy"
++                            />
++                            <op_expression id="custom-id-rule-op-start" 
++                                name="start"
++                            />
++                        </rule>
++                        <nvpair id="custom-id-attr1"
++                            name="attr1" value="value1"
++                        />
++                        <nvpair id="custom-id-attr2"
++                            name="attr2" value="value2"
++                        />
++                    </meta_attributes>
++                </context>
++            """,
++            etree_to_str(context_element),
++        )
++
++
++class NvsetRemove(TestCase):
++    # pylint: disable=no-self-use
++    def test_success(self):
++        xml = etree.fromstring(
++            """
++            <parent>
++                <meta_attributes id="set1" />
++                <instance_attributes id="set2" />
++                <not_an_nvset id="set3" />
++                <meta_attributes id="set4" />
++            </parent>
++        """
++        )
++        nvpair_multi.nvset_remove(
++            [xml.find(".//*[@id='set2']"), xml.find(".//*[@id='set4']")]
++        )
++        assert_xml_equal(
++            """
++            <parent>
++                <meta_attributes id="set1" />
++                <not_an_nvset id="set3" />
++            </parent>
++            """,
++            etree_to_str(xml),
++        )
++
++
++class NvsetUpdate(TestCase):
++    # pylint: disable=no-self-use
++    def test_success_nvpair_all_cases(self):
++        nvset_element = etree.fromstring(
++            """
++            <meta_attributes id="set1">
++                <nvpair id="pair1" name="name1" value="value1" />
++                <nvpair id="pair2" name="name2" value="value2" />
++                <nvpair id="pair3" name="name 3" value="value 3" />
++                <nvpair id="pair4" name="name4" value="value4" />
++                <nvpair id="pair4A" name="name4" value="value4A" />
++                <nvpair id="pair4B" name="name4" value="value4B" />
++            </meta_attributes>
++        """
++        )
++        id_provider = IdProvider(nvset_element)
++        nvpair_multi.nvset_update(
++            nvset_element,
++            id_provider,
++            {
++                "name2": "",  # delete
++                "name 3": "value 3 new",  # change and escaping spaces
++                "name4": "value4new",  # change and make unique
++                "name5": "",  # do not add empty
++                "name'6'": 'value"6"',  # escaping
++            },
++        )
++        assert_xml_equal(
++            """
++            <meta_attributes id="set1">
++                <nvpair id="pair1" name="name1" value="value1" />
++                <nvpair id="pair3" name="name 3" value="value 3 new" />
++                <nvpair id="pair4" name="name4" value="value4new" />
++                <nvpair id="set1-name6"
++                    name="name&#x27;6&#x27;" value="value&quot;6&quot;"
++                />
++            </meta_attributes>
++            """,
++            etree_to_str(nvset_element),
++        )
+diff --git a/pcs_test/tier0/lib/cib/test_tools.py b/pcs_test/tier0/lib/cib/test_tools.py
+index 376012a1..56f29148 100644
+--- a/pcs_test/tier0/lib/cib/test_tools.py
++++ b/pcs_test/tier0/lib/cib/test_tools.py
+@@ -233,8 +233,8 @@ class FindUniqueIdTest(CibToolsTest):
+         )
+ 
+ 
+-class CreateNvsetIdTest(TestCase):
+-    def test_create_plain_id_when_no_confilicting_id_there(self):
++class CreateSubelementId(TestCase):
++    def test_create_plain_id_when_no_conflicting_id_there(self):
+         context = etree.fromstring('<cib><a id="b"/></cib>')
+         self.assertEqual(
+             "b-name",
+@@ -252,6 +252,15 @@ class CreateNvsetIdTest(TestCase):
+             ),
+         )
+ 
++    def test_parent_has_no_id(self):
++        context = etree.fromstring("<cib><a/></cib>")
++        self.assertEqual(
++            "a-name",
++            lib.create_subelement_id(
++                context.find(".//a"), "name", lib.IdProvider(context)
++            ),
++        )
++
+ 
+ class GetConfigurationTest(CibToolsTest):
+     def test_success_if_exists(self):
+diff --git a/pcs_test/tier0/lib/commands/cib_options/test_operations_defaults.py b/pcs_test/tier0/lib/commands/cib_options/test_operations_defaults.py
+deleted file mode 100644
+index 2542043a..00000000
+--- a/pcs_test/tier0/lib/commands/cib_options/test_operations_defaults.py
++++ /dev/null
+@@ -1,120 +0,0 @@
+-from unittest import TestCase
+-
+-from pcs_test.tools import fixture
+-from pcs_test.tools.command_env import get_env_tools
+-
+-from pcs.common.reports import codes as report_codes
+-from pcs.lib.commands import cib_options
+-
+-FIXTURE_INITIAL_DEFAULTS = """
+-    <op_defaults>
+-        <meta_attributes id="op_defaults-options">
+-            <nvpair id="op_defaults-options-a" name="a" value="b"/>
+-            <nvpair id="op_defaults-options-b" name="b" value="c"/>
+-        </meta_attributes>
+-    </op_defaults>
+-"""
+-
+-
+-class SetOperationsDefaults(TestCase):
+-    def setUp(self):
+-        self.env_assist, self.config = get_env_tools(test_case=self)
+-
+-    def tearDown(self):
+-        self.env_assist.assert_reports(
+-            [fixture.warn(report_codes.DEFAULTS_CAN_BE_OVERRIDEN)]
+-        )
+-
+-    def test_change(self):
+-        self.config.runner.cib.load(optional_in_conf=FIXTURE_INITIAL_DEFAULTS)
+-        self.config.env.push_cib(
+-            optional_in_conf="""
+-            <op_defaults>
+-                <meta_attributes id="op_defaults-options">
+-                    <nvpair id="op_defaults-options-a" name="a" value="B"/>
+-                    <nvpair id="op_defaults-options-b" name="b" value="C"/>
+-                </meta_attributes>
+-            </op_defaults>
+-        """
+-        )
+-        cib_options.set_operations_defaults(
+-            self.env_assist.get_env(), {"a": "B", "b": "C",}
+-        )
+-
+-    def test_add(self):
+-        self.config.runner.cib.load(optional_in_conf=FIXTURE_INITIAL_DEFAULTS)
+-        self.config.env.push_cib(
+-            optional_in_conf="""
+-            <op_defaults>
+-                <meta_attributes id="op_defaults-options">
+-                    <nvpair id="op_defaults-options-a" name="a" value="b"/>
+-                    <nvpair id="op_defaults-options-b" name="b" value="c"/>
+-                    <nvpair id="op_defaults-options-c" name="c" value="d"/>
+-                </meta_attributes>
+-            </op_defaults>
+-        """
+-        )
+-        cib_options.set_operations_defaults(
+-            self.env_assist.get_env(), {"c": "d"},
+-        )
+-
+-    def test_remove(self):
+-        self.config.runner.cib.load(optional_in_conf=FIXTURE_INITIAL_DEFAULTS)
+-        self.config.env.push_cib(
+-            remove=(
+-                "./configuration/op_defaults/meta_attributes/nvpair[@name='a']"
+-            )
+-        )
+-        cib_options.set_operations_defaults(
+-            self.env_assist.get_env(), {"a": ""},
+-        )
+-
+-    def test_add_section_if_missing(self):
+-        self.config.runner.cib.load()
+-        self.config.env.push_cib(
+-            optional_in_conf="""
+-            <op_defaults>
+-                <meta_attributes id="op_defaults-options">
+-                    <nvpair id="op_defaults-options-a" name="a" value="A"/>
+-                </meta_attributes>
+-            </op_defaults>
+-        """
+-        )
+-        cib_options.set_operations_defaults(
+-            self.env_assist.get_env(), {"a": "A",}
+-        )
+-
+-    def test_add_meta_if_missing(self):
+-        self.config.runner.cib.load(optional_in_conf="<op_defaults />")
+-        self.config.env.push_cib(
+-            optional_in_conf="""
+-            <op_defaults>
+-                <meta_attributes id="op_defaults-options">
+-                    <nvpair id="op_defaults-options-a" name="a" value="A"/>
+-                </meta_attributes>
+-            </op_defaults>
+-        """
+-        )
+-        cib_options.set_operations_defaults(
+-            self.env_assist.get_env(), {"a": "A",}
+-        )
+-
+-    def test_dont_add_section_if_only_removing(self):
+-        self.config.runner.cib.load()
+-        cib_options.set_operations_defaults(
+-            self.env_assist.get_env(), {"a": "", "b": "",}
+-        )
+-
+-    def test_dont_add_meta_if_only_removing(self):
+-        self.config.runner.cib.load(optional_in_conf="<op_defaults />")
+-        self.config.env.push_cib(optional_in_conf="<op_defaults />")
+-        cib_options.set_operations_defaults(
+-            self.env_assist.get_env(), {"a": "", "b": "",}
+-        )
+-
+-    def test_keep_section_when_empty(self):
+-        self.config.runner.cib.load(optional_in_conf=FIXTURE_INITIAL_DEFAULTS)
+-        self.config.env.push_cib(remove="./configuration/op_defaults//nvpair")
+-        cib_options.set_operations_defaults(
+-            self.env_assist.get_env(), {"a": "", "b": "",}
+-        )
+diff --git a/pcs_test/tier0/lib/commands/cib_options/test_resources_defaults.py b/pcs_test/tier0/lib/commands/cib_options/test_resources_defaults.py
+deleted file mode 100644
+index 51d8abf4..00000000
+--- a/pcs_test/tier0/lib/commands/cib_options/test_resources_defaults.py
++++ /dev/null
+@@ -1,120 +0,0 @@
+-from unittest import TestCase
+-
+-from pcs_test.tools import fixture
+-from pcs_test.tools.command_env import get_env_tools
+-
+-from pcs.common.reports import codes as report_codes
+-from pcs.lib.commands import cib_options
+-
+-FIXTURE_INITIAL_DEFAULTS = """
+-    <rsc_defaults>
+-        <meta_attributes id="rsc_defaults-options">
+-            <nvpair id="rsc_defaults-options-a" name="a" value="b"/>
+-            <nvpair id="rsc_defaults-options-b" name="b" value="c"/>
+-        </meta_attributes>
+-    </rsc_defaults>
+-"""
+-
+-
+-class SetResourcesDefaults(TestCase):
+-    def setUp(self):
+-        self.env_assist, self.config = get_env_tools(test_case=self)
+-
+-    def tearDown(self):
+-        self.env_assist.assert_reports(
+-            [fixture.warn(report_codes.DEFAULTS_CAN_BE_OVERRIDEN)]
+-        )
+-
+-    def test_change(self):
+-        self.config.runner.cib.load(optional_in_conf=FIXTURE_INITIAL_DEFAULTS)
+-        self.config.env.push_cib(
+-            optional_in_conf="""
+-            <rsc_defaults>
+-                <meta_attributes id="rsc_defaults-options">
+-                    <nvpair id="rsc_defaults-options-a" name="a" value="B"/>
+-                    <nvpair id="rsc_defaults-options-b" name="b" value="C"/>
+-                </meta_attributes>
+-            </rsc_defaults>
+-        """
+-        )
+-        cib_options.set_resources_defaults(
+-            self.env_assist.get_env(), {"a": "B", "b": "C",}
+-        )
+-
+-    def test_add(self):
+-        self.config.runner.cib.load(optional_in_conf=FIXTURE_INITIAL_DEFAULTS)
+-        self.config.env.push_cib(
+-            optional_in_conf="""
+-            <rsc_defaults>
+-                <meta_attributes id="rsc_defaults-options">
+-                    <nvpair id="rsc_defaults-options-a" name="a" value="b"/>
+-                    <nvpair id="rsc_defaults-options-b" name="b" value="c"/>
+-                    <nvpair id="rsc_defaults-options-c" name="c" value="d"/>
+-                </meta_attributes>
+-            </rsc_defaults>
+-        """
+-        )
+-        cib_options.set_resources_defaults(
+-            self.env_assist.get_env(), {"c": "d"},
+-        )
+-
+-    def test_remove(self):
+-        self.config.runner.cib.load(optional_in_conf=FIXTURE_INITIAL_DEFAULTS)
+-        self.config.env.push_cib(
+-            remove=(
+-                "./configuration/rsc_defaults/meta_attributes/nvpair[@name='a']"
+-            )
+-        )
+-        cib_options.set_resources_defaults(
+-            self.env_assist.get_env(), {"a": ""},
+-        )
+-
+-    def test_add_section_if_missing(self):
+-        self.config.runner.cib.load()
+-        self.config.env.push_cib(
+-            optional_in_conf="""
+-            <rsc_defaults>
+-                <meta_attributes id="rsc_defaults-options">
+-                    <nvpair id="rsc_defaults-options-a" name="a" value="A"/>
+-                </meta_attributes>
+-            </rsc_defaults>
+-        """
+-        )
+-        cib_options.set_resources_defaults(
+-            self.env_assist.get_env(), {"a": "A",}
+-        )
+-
+-    def test_add_meta_if_missing(self):
+-        self.config.runner.cib.load(optional_in_conf="<rsc_defaults />")
+-        self.config.env.push_cib(
+-            optional_in_conf="""
+-            <rsc_defaults>
+-                <meta_attributes id="rsc_defaults-options">
+-                    <nvpair id="rsc_defaults-options-a" name="a" value="A"/>
+-                </meta_attributes>
+-            </rsc_defaults>
+-        """
+-        )
+-        cib_options.set_resources_defaults(
+-            self.env_assist.get_env(), {"a": "A",}
+-        )
+-
+-    def test_dont_add_section_if_only_removing(self):
+-        self.config.runner.cib.load()
+-        cib_options.set_resources_defaults(
+-            self.env_assist.get_env(), {"a": "", "b": "",}
+-        )
+-
+-    def test_dont_add_meta_if_only_removing(self):
+-        self.config.runner.cib.load(optional_in_conf="<rsc_defaults />")
+-        self.config.env.push_cib(optional_in_conf="<rsc_defaults />")
+-        cib_options.set_resources_defaults(
+-            self.env_assist.get_env(), {"a": "", "b": "",}
+-        )
+-
+-    def test_keep_section_when_empty(self):
+-        self.config.runner.cib.load(optional_in_conf=FIXTURE_INITIAL_DEFAULTS)
+-        self.config.env.push_cib(remove="./configuration/rsc_defaults//nvpair")
+-        cib_options.set_resources_defaults(
+-            self.env_assist.get_env(), {"a": "", "b": "",}
+-        )
+diff --git a/pcs_test/tier0/lib/commands/test_cib_options.py b/pcs_test/tier0/lib/commands/test_cib_options.py
+new file mode 100644
+index 00000000..c7c8cb1f
+--- /dev/null
++++ b/pcs_test/tier0/lib/commands/test_cib_options.py
+@@ -0,0 +1,669 @@
++from unittest import TestCase
++
++from pcs_test.tools import fixture
++from pcs_test.tools.command_env import get_env_tools
++
++from pcs.common import reports
++from pcs.common.pacemaker.nvset import (
++    CibNvpairDto,
++    CibNvsetDto,
++)
++from pcs.common.pacemaker.rule import CibRuleExpressionDto
++from pcs.common.types import (
++    CibNvsetType,
++    CibRuleExpressionType,
++)
++from pcs.lib.commands import cib_options
++
++
++class DefaultsCreateMixin:
++    command = lambda *args, **kwargs: None
++    tag = ""
++
++    def setUp(self):
++        # pylint: disable=invalid-name
++        self.env_assist, self.config = get_env_tools(self)
++        self.config.runner.cib.load(filename="cib-empty-1.2.xml")
++
++    def test_success_minimal(self):
++        defaults_xml = f"""
++            <{self.tag}>
++                <meta_attributes id="{self.tag}-meta_attributes" />
++            </{self.tag}>
++        """
++        self.config.env.push_cib(optional_in_conf=defaults_xml)
++
++        self.command(self.env_assist.get_env(), {}, {})
++
++        self.env_assist.assert_reports(
++            [fixture.warn(reports.codes.DEFAULTS_CAN_BE_OVERRIDEN)]
++        )
++
++    def test_success_one_set_already_there(self):
++        defaults_xml_1 = f"""
++            <{self.tag}>
++                <meta_attributes id="{self.tag}-meta_attributes" />
++            </{self.tag}>
++        """
++        defaults_xml_2 = f"""
++            <{self.tag}>
++                <meta_attributes id="{self.tag}-meta_attributes" />
++                <meta_attributes id="{self.tag}-meta_attributes-1" />
++            </{self.tag}>
++        """
++        self.config.runner.cib.load(
++            instead="runner.cib.load", optional_in_conf=defaults_xml_1
++        )
++        self.config.env.push_cib(optional_in_conf=defaults_xml_2)
++
++        self.command(self.env_assist.get_env(), {}, {})
++
++        self.env_assist.assert_reports(
++            [fixture.warn(reports.codes.DEFAULTS_CAN_BE_OVERRIDEN)]
++        )
++
++    def test_success_cib_upgrade(self):
++        defaults_xml = f"""
++            <{self.tag}>
++                <meta_attributes id="{self.tag}-meta_attributes">
++                    <rule id="{self.tag}-meta_attributes-rule"
++                        boolean-op="and" score="INFINITY"
++                    >
++                        <rsc_expression
++                            id="{self.tag}-meta_attributes-rule-rsc-ocf-pacemaker-Dummy"
++                            class="ocf" provider="pacemaker" type="Dummy"
++                        />
++                    </rule>
++                </meta_attributes>
++            </{self.tag}>
++        """
++        self.config.runner.cib.load(
++            name="load_cib_old_version",
++            filename="cib-empty-3.3.xml",
++            before="runner.cib.load",
++        )
++        self.config.runner.cib.upgrade(before="runner.cib.load")
++        self.config.runner.cib.load(
++            filename="cib-empty-3.4.xml", instead="runner.cib.load"
++        )
++        self.config.env.push_cib(optional_in_conf=defaults_xml)
++
++        self.command(
++            self.env_assist.get_env(),
++            {},
++            {},
++            nvset_rule="resource ocf:pacemaker:Dummy",
++        )
++
++        self.env_assist.assert_reports(
++            [
++                fixture.info(reports.codes.CIB_UPGRADE_SUCCESSFUL),
++                fixture.warn(reports.codes.DEFAULTS_CAN_BE_OVERRIDEN),
++            ]
++        )
++
++    def test_success_full(self):
++        defaults_xml = f"""
++            <{self.tag}>
++                <meta_attributes id="my-id" score="10">
++                    <rule id="my-id-rule" boolean-op="and" score="INFINITY">
++                        <rsc_expression id="my-id-rule-rsc-ocf-pacemaker-Dummy"
++                            class="ocf" provider="pacemaker" type="Dummy"
++                        />
++                    </rule>
++                    <nvpair id="my-id-name1" name="name1" value="value1" />
++                    <nvpair id="my-id-2name" name="2na#me" value="value2" />
++                </meta_attributes>
++            </{self.tag}>
++        """
++        self.config.runner.cib.load(
++            filename="cib-empty-3.4.xml", instead="runner.cib.load"
++        )
++        self.config.env.push_cib(optional_in_conf=defaults_xml)
++
++        self.command(
++            self.env_assist.get_env(),
++            {"name1": "value1", "2na#me": "value2"},
++            {"id": "my-id", "score": "10"},
++            nvset_rule="resource ocf:pacemaker:Dummy",
++        )
++
++        self.env_assist.assert_reports(
++            [fixture.warn(reports.codes.DEFAULTS_CAN_BE_OVERRIDEN)]
++        )
++
++    def test_validation(self):
++        self.config.runner.cib.load(
++            filename="cib-empty-3.4.xml", instead="runner.cib.load"
++        )
++        self.env_assist.assert_raise_library_error(
++            lambda: self.command(
++                self.env_assist.get_env(),
++                {},
++                {"unknown-option": "value"},
++                "bad rule",
++            )
++        )
++        self.env_assist.assert_reports(
++            [
++                fixture.error(
++                    reports.codes.INVALID_OPTIONS,
++                    force_code=reports.codes.FORCE_OPTIONS,
++                    option_names=["unknown-option"],
++                    allowed=["id", "score"],
++                    option_type=None,
++                    allowed_patterns=[],
++                ),
++                fixture.error(
++                    reports.codes.RULE_EXPRESSION_PARSE_ERROR,
++                    rule_string="bad rule",
++                    reason='Expected "resource"',
++                    rule_line="bad rule",
++                    line_number=1,
++                    column_number=1,
++                    position=0,
++                ),
++            ]
++        )
++
++    def test_validation_forced(self):
++        defaults_xml = f"""
++            <{self.tag}>
++                <meta_attributes id="{self.tag}-meta_attributes"
++                    unknown-option="value"
++                />
++            </{self.tag}>
++        """
++        self.config.env.push_cib(optional_in_conf=defaults_xml)
++
++        self.command(
++            self.env_assist.get_env(),
++            {},
++            {"unknown-option": "value"},
++            force_flags={reports.codes.FORCE_OPTIONS},
++        )
++
++        self.env_assist.assert_reports(
++            [
++                fixture.warn(
++                    reports.codes.INVALID_OPTIONS,
++                    option_names=["unknown-option"],
++                    allowed=["id", "score"],
++                    option_type=None,
++                    allowed_patterns=[],
++                ),
++                fixture.warn(reports.codes.DEFAULTS_CAN_BE_OVERRIDEN),
++            ]
++        )
++
++
++class ResourceDefaultsCreate(DefaultsCreateMixin, TestCase):
++    command = staticmethod(cib_options.resource_defaults_create)
++    tag = "rsc_defaults"
++
++    def test_rule_op_expression_not_allowed(self):
++        self.config.runner.cib.load(
++            filename="cib-empty-3.4.xml", instead="runner.cib.load"
++        )
++        self.env_assist.assert_raise_library_error(
++            lambda: self.command(
++                self.env_assist.get_env(), {}, {}, "op monitor"
++            )
++        )
++        self.env_assist.assert_reports(
++            [
++                fixture.error(
++                    reports.codes.RULE_EXPRESSION_NOT_ALLOWED,
++                    expression_type=CibRuleExpressionType.OP_EXPRESSION,
++                ),
++            ]
++        )
++
++
++class OperationDefaultsCreate(DefaultsCreateMixin, TestCase):
++    command = staticmethod(cib_options.operation_defaults_create)
++    tag = "op_defaults"
++
++
++class DefaultsConfigMixin:
++    command = lambda *args, **kwargs: None
++    tag = ""
++
++    def setUp(self):
++        # pylint: disable=invalid-name
++        self.env_assist, self.config = get_env_tools(self)
++
++    def test_empty(self):
++        defaults_xml = f"""<{self.tag} />"""
++        self.config.runner.cib.load(
++            filename="cib-empty-3.4.xml", optional_in_conf=defaults_xml
++        )
++        self.assertEqual([], self.command(self.env_assist.get_env()))
++
++    def test_full(self):
++        defaults_xml = f"""
++            <{self.tag}>
++                <meta_attributes id="{self.tag}-meta_attributes">
++                    <rule id="{self.tag}-meta_attributes-rule"
++                        boolean-op="and" score="INFINITY"
++                    >
++                        <rsc_expression
++                            id="{self.tag}-meta_attributes-rule-rsc-Dummy"
++                            class="ocf" provider="pacemaker" type="Dummy"
++                        />
++                    </rule>
++                    <nvpair id="my-id-pair1" name="name1" value="value1" />
++                    <nvpair id="my-id-pair2" name="name2" value="value2" />
++                </meta_attributes>
++                <instance_attributes id="instance">
++                    <nvpair id="instance-pair" name="inst" value="ance" />
++                </instance_attributes>
++                <meta_attributes id="meta-plain" score="123">
++                    <nvpair id="my-id-pair3" name="name1" value="value1" />
++                </meta_attributes>
++            </{self.tag}>
++        """
++        self.config.runner.cib.load(
++            filename="cib-empty-3.4.xml", optional_in_conf=defaults_xml
++        )
++        self.assertEqual(
++            [
++                CibNvsetDto(
++                    f"{self.tag}-meta_attributes",
++                    CibNvsetType.META,
++                    {},
++                    CibRuleExpressionDto(
++                        f"{self.tag}-meta_attributes-rule",
++                        CibRuleExpressionType.RULE,
++                        False,
++                        {"boolean-op": "and", "score": "INFINITY"},
++                        None,
++                        None,
++                        [
++                            CibRuleExpressionDto(
++                                f"{self.tag}-meta_attributes-rule-rsc-Dummy",
++                                CibRuleExpressionType.RSC_EXPRESSION,
++                                False,
++                                {
++                                    "class": "ocf",
++                                    "provider": "pacemaker",
++                                    "type": "Dummy",
++                                },
++                                None,
++                                None,
++                                [],
++                                "resource ocf:pacemaker:Dummy",
++                            ),
++                        ],
++                        "resource ocf:pacemaker:Dummy",
++                    ),
++                    [
++                        CibNvpairDto("my-id-pair1", "name1", "value1"),
++                        CibNvpairDto("my-id-pair2", "name2", "value2"),
++                    ],
++                ),
++                CibNvsetDto(
++                    "instance",
++                    CibNvsetType.INSTANCE,
++                    {},
++                    None,
++                    [CibNvpairDto("instance-pair", "inst", "ance")],
++                ),
++                CibNvsetDto(
++                    "meta-plain",
++                    CibNvsetType.META,
++                    {"score": "123"},
++                    None,
++                    [CibNvpairDto("my-id-pair3", "name1", "value1")],
++                ),
++            ],
++            self.command(self.env_assist.get_env()),
++        )
++
++
++class ResourceDefaultsConfig(DefaultsConfigMixin, TestCase):
++    command = staticmethod(cib_options.resource_defaults_config)
++    tag = "rsc_defaults"
++
++
++class OperationDefaultsConfig(DefaultsConfigMixin, TestCase):
++    command = staticmethod(cib_options.operation_defaults_config)
++    tag = "op_defaults"
++
++
++class DefaultsRemoveMixin:
++    command = lambda *args, **kwargs: None
++    tag = ""
++
++    def setUp(self):
++        # pylint: disable=invalid-name
++        self.env_assist, self.config = get_env_tools(self)
++
++    def test_nothing_to_delete(self):
++        self.command(self.env_assist.get_env(), [])
++
++    def test_defaults_section_missing(self):
++        self.config.runner.cib.load(filename="cib-empty-1.2.xml")
++        self.env_assist.assert_raise_library_error(
++            lambda: self.command(self.env_assist.get_env(), ["set1"])
++        )
++        self.env_assist.assert_reports(
++            [
++                fixture.report_not_found(
++                    "set1",
++                    context_type=self.tag,
++                    expected_types=["options set"],
++                ),
++            ]
++        )
++
++    def test_success(self):
++        self.config.runner.cib.load(
++            filename="cib-empty-1.2.xml",
++            optional_in_conf=f"""
++                <{self.tag}>
++                    <meta_attributes id="set1" />
++                    <instance_attributes id="set2" />
++                    <not_an_nvset id="set3" />
++                    <meta_attributes id="set4" />
++                    <instance_attributes id="set5" />
++                </{self.tag}>
++            """,
++        )
++        self.config.env.push_cib(
++            optional_in_conf=f"""
++                <{self.tag}>
++                    <meta_attributes id="set1" />
++                    <not_an_nvset id="set3" />
++                    <meta_attributes id="set4" />
++                </{self.tag}>
++        """
++        )
++        self.command(self.env_assist.get_env(), ["set2", "set5"])
++
++    def test_delete_all_keep_the_section(self):
++        self.config.runner.cib.load(
++            filename="cib-empty-1.2.xml",
++            optional_in_conf=f"""
++                <{self.tag}>
++                    <meta_attributes id="set1" />
++                </{self.tag}>
++            """,
++        )
++        self.config.env.push_cib(optional_in_conf=f"<{self.tag} />")
++        self.command(self.env_assist.get_env(), ["set1"])
++
++    def test_nvset_not_found(self):
++        self.config.runner.cib.load(
++            filename="cib-empty-1.2.xml",
++            optional_in_conf=f"""
++                <{self.tag}>
++                    <meta_attributes id="set1" />
++                    <instance_attributes id="set2" />
++                    <not_an_nvset id="set3" />
++                    <meta_attributes id="set4" />
++                    <instance_attributes id="set5" />
++                </{self.tag}>
++            """,
++        )
++        self.env_assist.assert_raise_library_error(
++            lambda: self.command(
++                self.env_assist.get_env(), ["set2", "set3", "setX"]
++            )
++        )
++        self.env_assist.assert_reports(
++            [
++                fixture.report_unexpected_element(
++                    "set3", "not_an_nvset", ["options set"]
++                ),
++                fixture.report_not_found(
++                    "setX",
++                    context_type=self.tag,
++                    expected_types=["options set"],
++                ),
++            ]
++        )
++
++
++class ResourceDefaultsRemove(DefaultsRemoveMixin, TestCase):
++    command = staticmethod(cib_options.resource_defaults_remove)
++    tag = "rsc_defaults"
++
++
++class OperationDefaultsRemove(DefaultsRemoveMixin, TestCase):
++    command = staticmethod(cib_options.operation_defaults_remove)
++    tag = "op_defaults"
++
++
++class DefaultsUpdateLegacyMixin:
++    # This class tests legacy use cases of not providing an nvset ID
++    command = lambda *args, **kwargs: None
++    tag = ""
++    command_for_report = None
++
++    def setUp(self):
++        # pylint: disable=invalid-name
++        self.env_assist, self.config = get_env_tools(self)
++        self.reports = [fixture.warn(reports.codes.DEFAULTS_CAN_BE_OVERRIDEN)]
++
++    def tearDown(self):
++        # pylint: disable=invalid-name
++        self.env_assist.assert_reports(self.reports)
++
++    def fixture_initial_defaults(self):
++        return f"""
++            <{self.tag}>
++                <meta_attributes id="{self.tag}-options">
++                    <nvpair id="{self.tag}-options-a" name="a" value="b"/>
++                    <nvpair id="{self.tag}-options-b" name="b" value="c"/>
++                </meta_attributes>
++            </{self.tag}>
++        """
++
++    def test_change(self):
++        self.config.runner.cib.load(
++            optional_in_conf=self.fixture_initial_defaults()
++        )
++        self.config.env.push_cib(
++            optional_in_conf=f"""
++            <{self.tag}>
++                <meta_attributes id="{self.tag}-options">
++                    <nvpair id="{self.tag}-options-a" name="a" value="B"/>
++                    <nvpair id="{self.tag}-options-b" name="b" value="C"/>
++                </meta_attributes>
++            </{self.tag}>
++        """
++        )
++        self.command(self.env_assist.get_env(), None, {"a": "B", "b": "C"})
++
++    def test_add(self):
++        self.config.runner.cib.load(
++            optional_in_conf=self.fixture_initial_defaults()
++        )
++        self.config.env.push_cib(
++            optional_in_conf=f"""
++            <{self.tag}>
++                <meta_attributes id="{self.tag}-options">
++                    <nvpair id="{self.tag}-options-a" name="a" value="b"/>
++                    <nvpair id="{self.tag}-options-b" name="b" value="c"/>
++                    <nvpair id="{self.tag}-options-c" name="c" value="d"/>
++                </meta_attributes>
++            </{self.tag}>
++        """
++        )
++        self.command(self.env_assist.get_env(), None, {"c": "d"})
++
++    def test_remove(self):
++        self.config.runner.cib.load(
++            optional_in_conf=self.fixture_initial_defaults()
++        )
++        self.config.env.push_cib(
++            remove=(
++                f"./configuration/{self.tag}/meta_attributes/nvpair[@name='a']"
++            )
++        )
++        self.command(self.env_assist.get_env(), None, {"a": ""})
++
++    def test_add_section_if_missing(self):
++        self.config.runner.cib.load()
++        self.config.env.push_cib(
++            optional_in_conf=f"""
++            <{self.tag}>
++                <meta_attributes id="{self.tag}-meta_attributes">
++                    <nvpair id="{self.tag}-meta_attributes-a" name="a" value="A"/>
++                </meta_attributes>
++            </{self.tag}>
++        """
++        )
++        self.command(self.env_assist.get_env(), None, {"a": "A"})
++
++    def test_add_meta_if_missing(self):
++        self.config.runner.cib.load(optional_in_conf=f"<{self.tag} />")
++        self.config.env.push_cib(
++            optional_in_conf=f"""
++            <{self.tag}>
++                <meta_attributes id="{self.tag}-meta_attributes">
++                    <nvpair id="{self.tag}-meta_attributes-a" name="a" value="A"/>
++                </meta_attributes>
++            </{self.tag}>
++        """
++        )
++        self.command(self.env_assist.get_env(), None, {"a": "A"})
++
++    def test_dont_add_section_if_only_removing(self):
++        self.config.runner.cib.load()
++        self.command(self.env_assist.get_env(), None, {"a": "", "b": ""})
++
++    def test_dont_add_meta_if_only_removing(self):
++        self.config.runner.cib.load(optional_in_conf=f"<{self.tag} />")
++        self.command(self.env_assist.get_env(), None, {"a": "", "b": ""})
++
++    def test_keep_section_when_empty(self):
++        self.config.runner.cib.load(
++            optional_in_conf=self.fixture_initial_defaults()
++        )
++        self.config.env.push_cib(remove=f"./configuration/{self.tag}//nvpair")
++        self.command(self.env_assist.get_env(), None, {"a": "", "b": ""})
++
++    def test_ambiguous(self):
++        self.config.runner.cib.load(
++            optional_in_conf=f"""
++                <{self.tag}>
++                    <meta_attributes id="{self.tag}-options">
++                        <nvpair id="{self.tag}-options-a" name="a" value="b"/>
++                        <nvpair id="{self.tag}-options-b" name="b" value="c"/>
++                    </meta_attributes>
++                    <meta_attributes id="{self.tag}-options-1">
++                        <nvpair id="{self.tag}-options-c" name="c" value="d"/>
++                    </meta_attributes>
++                </{self.tag}>
++            """
++        )
++        self.env_assist.assert_raise_library_error(
++            lambda: self.command(self.env_assist.get_env(), None, {"x": "y"})
++        )
++        self.reports = [
++            fixture.error(
++                reports.codes.CIB_NVSET_AMBIGUOUS_PROVIDE_NVSET_ID,
++                pcs_command=self.command_for_report,
++            )
++        ]
++
++
++class DefaultsUpdateMixin:
++    command = lambda *args, **kwargs: None
++    tag = ""
++
++    def setUp(self):
++        # pylint: disable=invalid-name
++        self.env_assist, self.config = get_env_tools(self)
++
++    def fixture_initial_defaults(self):
++        return f"""
++            <{self.tag}>
++                <meta_attributes id="{self.tag}-options">
++                    <nvpair id="{self.tag}-options-a" name="a" value="b"/>
++                    <nvpair id="{self.tag}-options-b" name="b" value="c"/>
++                    <nvpair id="{self.tag}-options-c" name="c" value="d"/>
++                </meta_attributes>
++            </{self.tag}>
++        """
++
++    def test_success(self):
++        self.config.runner.cib.load(
++            optional_in_conf=self.fixture_initial_defaults()
++        )
++        self.config.env.push_cib(
++            optional_in_conf=f"""
++            <{self.tag}>
++                <meta_attributes id="{self.tag}-options">
++                    <nvpair id="{self.tag}-options-a" name="a" value="B"/>
++                    <nvpair id="{self.tag}-options-b" name="b" value="c"/>
++                    <nvpair id="{self.tag}-options-d" name="d" value="e"/>
++                </meta_attributes>
++            </{self.tag}>
++        """
++        )
++        self.command(
++            self.env_assist.get_env(),
++            f"{self.tag}-options",
++            {"a": "B", "c": "", "d": "e"},
++        )
++        self.env_assist.assert_reports(
++            [fixture.warn(reports.codes.DEFAULTS_CAN_BE_OVERRIDEN)]
++        )
++
++    def test_nvset_doesnt_exist(self):
++        self.config.runner.cib.load(
++            optional_in_conf=self.fixture_initial_defaults()
++        )
++        self.env_assist.assert_raise_library_error(
++            lambda: self.command(
++                self.env_assist.get_env(), "wrong-nvset-id", {},
++            )
++        )
++        self.env_assist.assert_reports(
++            [
++                fixture.report_not_found(
++                    "wrong-nvset-id",
++                    context_type=self.tag,
++                    expected_types=["options set"],
++                ),
++            ]
++        )
++
++    def test_keep_elements_when_empty(self):
++        self.config.runner.cib.load(
++            optional_in_conf=self.fixture_initial_defaults()
++        )
++        self.config.env.push_cib(remove=f"./configuration/{self.tag}//nvpair")
++        self.command(
++            self.env_assist.get_env(),
++            f"{self.tag}-options",
++            {"a": "", "b": "", "c": ""},
++        )
++        self.env_assist.assert_reports(
++            [fixture.warn(reports.codes.DEFAULTS_CAN_BE_OVERRIDEN)]
++        )
++
++
++class ResourceDefaultsUpdateLegacy(DefaultsUpdateLegacyMixin, TestCase):
++    command = staticmethod(cib_options.resource_defaults_update)
++    tag = "rsc_defaults"
++    command_for_report = reports.const.PCS_COMMAND_RESOURCE_DEFAULTS_UPDATE
++
++
++class OperationDefaultsUpdateLegacy(DefaultsUpdateLegacyMixin, TestCase):
++    command = staticmethod(cib_options.operation_defaults_update)
++    tag = "op_defaults"
++    command_for_report = reports.const.PCS_COMMAND_OPERATION_DEFAULTS_UPDATE
++
++
++class ResourceDefaultsUpdate(DefaultsUpdateMixin, TestCase):
++    command = staticmethod(cib_options.resource_defaults_update)
++    tag = "rsc_defaults"
++
++
++class OperationDefaultsUpdate(DefaultsUpdateMixin, TestCase):
++    command = staticmethod(cib_options.operation_defaults_update)
++    tag = "op_defaults"
+diff --git a/pcs_test/tier0/lib/test_validate.py b/pcs_test/tier0/lib/test_validate.py
+index 002fd8ed..8c0e0261 100644
+--- a/pcs_test/tier0/lib/test_validate.py
++++ b/pcs_test/tier0/lib/test_validate.py
+@@ -1238,6 +1238,33 @@ class ValuePositiveInteger(TestCase):
+         )
+ 
+ 
++class ValueScore(TestCase):
++    def test_valid_score(self):
++        for score in [
++            "1",
++            "-1",
++            "+1",
++            "123",
++            "-123",
++            "+123",
++            "INFINITY",
++            "-INFINITY",
++            "+INFINITY",
++        ]:
++            with self.subTest(score=score):
++                assert_report_item_list_equal(
++                    validate.ValueScore("a").validate({"a": score}), [],
++                )
++
++    def test_not_valid_score(self):
++        for score in ["something", "++1", "--1", "++INFINITY"]:
++            with self.subTest(score=score):
++                assert_report_item_list_equal(
++                    validate.ValueScore("a").validate({"a": score}),
++                    [fixture.error(report_codes.INVALID_SCORE, score=score,),],
++                )
++
++
+ class ValueTimeInterval(TestCase):
+     def test_no_reports_for_valid_time_interval(self):
+         for interval in ["0", "1s", "2sec", "3m", "4min", "5h", "6hr"]:
+diff --git a/pcs_test/tier1/legacy/test_resource.py b/pcs_test/tier1/legacy/test_resource.py
+index 5770d81a..7ffcc83b 100644
+--- a/pcs_test/tier1/legacy/test_resource.py
++++ b/pcs_test/tier1/legacy/test_resource.py
+@@ -1421,9 +1421,9 @@ monitor interval=20 (A-monitor-interval-20)
+              No alerts defined
+ 
+             Resources Defaults:
+-             No defaults set
++              No defaults set
+             Operations Defaults:
+-             No defaults set
++              No defaults set
+ 
+             Cluster Properties:
+ 
+@@ -1657,9 +1657,9 @@ monitor interval=20 (A-monitor-interval-20)
+              No alerts defined
+ 
+             Resources Defaults:
+-             No defaults set
++              No defaults set
+             Operations Defaults:
+-             No defaults set
++              No defaults set
+ 
+             Cluster Properties:
+ 
+diff --git a/pcs_test/tier1/legacy/test_stonith.py b/pcs_test/tier1/legacy/test_stonith.py
+index 3fc2c4d5..c51a02b5 100644
+--- a/pcs_test/tier1/legacy/test_stonith.py
++++ b/pcs_test/tier1/legacy/test_stonith.py
+@@ -293,9 +293,9 @@ class StonithTest(TestCase, AssertPcsMixin):
+              No alerts defined
+ 
+             Resources Defaults:
+-             No defaults set
++              No defaults set
+             Operations Defaults:
+-             No defaults set
++              No defaults set
+ 
+             Cluster Properties:
+ 
+@@ -1305,9 +1305,9 @@ class LevelConfig(LevelTestsBase):
+          No alerts defined
+ 
+         Resources Defaults:
+-         No defaults set
++          No defaults set
+         Operations Defaults:
+-         No defaults set
++          No defaults set
+ 
+         Cluster Properties:
+ 
+diff --git a/pcs_test/tier1/test_cib_options.py b/pcs_test/tier1/test_cib_options.py
+new file mode 100644
+index 00000000..ba8f3515
+--- /dev/null
++++ b/pcs_test/tier1/test_cib_options.py
+@@ -0,0 +1,571 @@
++from textwrap import dedent
++from unittest import TestCase
++
++from lxml import etree
++
++from pcs_test.tools.assertions import AssertPcsMixin
++from pcs_test.tools.cib import get_assert_pcs_effect_mixin
++from pcs_test.tools.misc import (
++    get_test_resource as rc,
++    get_tmp_file,
++    skip_unless_pacemaker_supports_rsc_and_op_rules,
++    write_data_to_tmpfile,
++    write_file_to_tmpfile,
++)
++from pcs_test.tools.pcs_runner import PcsRunner
++from pcs_test.tools.xml import XmlManipulation
++
++
++empty_cib = rc("cib-empty-2.0.xml")
++empty_cib_rules = rc("cib-empty-3.4.xml")
++
++
++class TestDefaultsMixin:
++    def setUp(self):
++        # pylint: disable=invalid-name
++        self.temp_cib = get_tmp_file("tier1_cib_options")
++        self.pcs_runner = PcsRunner(self.temp_cib.name)
++
++    def tearDown(self):
++        # pylint: disable=invalid-name
++        self.temp_cib.close()
++
++
++class DefaultsConfigMixin(TestDefaultsMixin, AssertPcsMixin):
++    cli_command = ""
++    prefix = ""
++
++    def test_success(self):
++        xml_rsc = """
++            <rsc_defaults>
++                <meta_attributes id="rsc-set1" score="10">
++                    <nvpair id="rsc-set1-nv1" name="name1" value="rsc1"/>
++                    <nvpair id="rsc-set1-nv2" name="name2" value="rsc2"/>
++                </meta_attributes>
++                <meta_attributes id="rsc-setA">
++                    <nvpair id="rsc-setA-nv1" name="name1" value="rscA"/>
++                    <nvpair id="rsc-setA-nv2" name="name2" value="rscB"/>
++                </meta_attributes>
++            </rsc_defaults>
++        """
++        xml_op = """
++            <op_defaults>
++                <meta_attributes id="op-set1" score="10">
++                    <nvpair id="op-set1-nv1" name="name1" value="op1"/>
++                    <nvpair id="op-set1-nv2" name="name2" value="op2"/>
++                </meta_attributes>
++                <meta_attributes id="op-setA">
++                    <nvpair id="op-setA-nv1" name="name1" value="opA"/>
++                    <nvpair id="op-setA-nv2" name="name2" value="opB"/>
++                </meta_attributes>
++            </op_defaults>
++        """
++        xml_manip = XmlManipulation.from_file(empty_cib)
++        xml_manip.append_to_first_tag_name("configuration", xml_rsc, xml_op)
++        write_data_to_tmpfile(str(xml_manip), self.temp_cib)
++
++        self.assert_pcs_success(
++            self.cli_command,
++            stdout_full=dedent(
++                f"""\
++                Meta Attrs: {self.prefix}-set1 score=10
++                  name1={self.prefix}1
++                  name2={self.prefix}2
++                Meta Attrs: {self.prefix}-setA
++                  name1={self.prefix}A
++                  name2={self.prefix}B
++            """
++            ),
++        )
++
++
++class RscDefaultsConfig(
++    DefaultsConfigMixin, TestCase,
++):
++    cli_command = "resource defaults"
++    prefix = "rsc"
++
++    @skip_unless_pacemaker_supports_rsc_and_op_rules()
++    def test_success_rules(self):
++        xml = """
++            <rsc_defaults>
++                <meta_attributes id="X">
++                    <rule id="X-rule" boolean-op="and" score="INFINITY">
++                        <rsc_expression id="X-rule-rsc-Dummy" type="Dummy"/>
++                    </rule>
++                    <nvpair id="X-nam1" name="nam1" value="val1"/>
++                </meta_attributes>
++            </rsc_defaults>
++        """
++        xml_manip = XmlManipulation.from_file(empty_cib_rules)
++        xml_manip.append_to_first_tag_name("configuration", xml)
++        write_data_to_tmpfile(str(xml_manip), self.temp_cib)
++
++        self.assert_pcs_success(
++            self.cli_command,
++            stdout_full=dedent(
++                """\
++                Meta Attrs: X
++                  nam1=val1
++                  Rule: boolean-op=and score=INFINITY
++                    Expression: resource ::Dummy
++            """
++            ),
++        )
++
++
++class OpDefaultsConfig(
++    DefaultsConfigMixin, TestCase,
++):
++    cli_command = "resource op defaults"
++    prefix = "op"
++
++    @skip_unless_pacemaker_supports_rsc_and_op_rules()
++    def test_success_rules(self):
++        xml = """
++            <op_defaults>
++                <meta_attributes id="X">
++                    <rule id="X-rule" boolean-op="and" score="INFINITY">
++                        <rsc_expression id="X-rule-rsc-Dummy" type="Dummy"/>
++                        <op_expression id="X-rule-op-monitor" name="monitor"/>
++                    </rule>
++                    <nvpair id="X-nam1" name="nam1" value="val1"/>
++                </meta_attributes>
++            </op_defaults>
++        """
++        xml_manip = XmlManipulation.from_file(empty_cib_rules)
++        xml_manip.append_to_first_tag_name("configuration", xml)
++        write_data_to_tmpfile(str(xml_manip), self.temp_cib)
++
++        self.assert_pcs_success(
++            self.cli_command,
++            stdout_full=dedent(
++                """\
++                Meta Attrs: X
++                  nam1=val1
++                  Rule: boolean-op=and score=INFINITY
++                    Expression: resource ::Dummy
++                    Expression: op monitor
++            """
++            ),
++        )
++
++
++class DefaultsSetCreateMixin(TestDefaultsMixin):
++    cli_command = ""
++    cib_tag = ""
++
++    def setUp(self):
++        super().setUp()
++        write_file_to_tmpfile(empty_cib, self.temp_cib)
++
++    def test_no_args(self):
++        self.assert_effect(
++            f"{self.cli_command} set create",
++            dedent(
++                f"""\
++                <{self.cib_tag}>
++                    <meta_attributes id="{self.cib_tag}-meta_attributes"/>
++                </{self.cib_tag}>
++            """
++            ),
++            output=(
++                "Warning: Defaults do not apply to resources which override "
++                "them with their own defined values\n"
++            ),
++        )
++
++    def test_success(self):
++        self.assert_effect(
++            (
++                f"{self.cli_command} set create id=mine score=10 "
++                "meta nam1=val1 nam2=val2 --force"
++            ),
++            dedent(
++                f"""\
++                <{self.cib_tag}>
++                    <meta_attributes id="mine" score="10">
++                        <nvpair id="mine-nam1" name="nam1" value="val1"/>
++                        <nvpair id="mine-nam2" name="nam2" value="val2"/>
++                    </meta_attributes>
++                </{self.cib_tag}>
++            """
++            ),
++            output=(
++                "Warning: Defaults do not apply to resources which override "
++                "them with their own defined values\n"
++            ),
++        )
++
++
++class RscDefaultsSetCreate(
++    get_assert_pcs_effect_mixin(
++        lambda cib: etree.tostring(
++            # pylint:disable=undefined-variable
++            etree.parse(cib).findall(".//rsc_defaults")[0]
++        )
++    ),
++    DefaultsSetCreateMixin,
++    TestCase,
++):
++    cli_command = "resource defaults"
++    cib_tag = "rsc_defaults"
++
++    @skip_unless_pacemaker_supports_rsc_and_op_rules()
++    def test_success_rules(self):
++        self.assert_effect(
++            (
++                f"{self.cli_command} set create id=X meta nam1=val1 "
++                "rule resource ::Dummy"
++            ),
++            f"""\
++            <{self.cib_tag}>
++                <meta_attributes id="X">
++                    <rule id="X-rule" boolean-op="and" score="INFINITY">
++                        <rsc_expression id="X-rule-rsc-Dummy" type="Dummy"/>
++                    </rule>
++                    <nvpair id="X-nam1" name="nam1" value="val1"/>
++                </meta_attributes>
++            </{self.cib_tag}>
++            """,
++            output=(
++                "CIB has been upgraded to the latest schema version.\n"
++                "Warning: Defaults do not apply to resources which override "
++                "them with their own defined values\n"
++            ),
++        )
++
++
++class OpDefaultsSetCreate(
++    get_assert_pcs_effect_mixin(
++        lambda cib: etree.tostring(
++            # pylint:disable=undefined-variable
++            etree.parse(cib).findall(".//op_defaults")[0]
++        )
++    ),
++    DefaultsSetCreateMixin,
++    TestCase,
++):
++    cli_command = "resource op defaults"
++    cib_tag = "op_defaults"
++
++    @skip_unless_pacemaker_supports_rsc_and_op_rules()
++    def test_success_rules(self):
++        self.assert_effect(
++            (
++                f"{self.cli_command} set create id=X meta nam1=val1 "
++                "rule resource ::Dummy and op monitor"
++            ),
++            f"""\
++            <{self.cib_tag}>
++                <meta_attributes id="X">
++                    <rule id="X-rule" boolean-op="and" score="INFINITY">
++                        <rsc_expression id="X-rule-rsc-Dummy" type="Dummy"/>
++                        <op_expression id="X-rule-op-monitor" name="monitor"/>
++                    </rule>
++                    <nvpair id="X-nam1" name="nam1" value="val1"/>
++                </meta_attributes>
++            </{self.cib_tag}>
++            """,
++            output=(
++                "CIB has been upgraded to the latest schema version.\n"
++                "Warning: Defaults do not apply to resources which override "
++                "them with their own defined values\n"
++            ),
++        )
++
++
++class DefaultsSetDeleteMixin(TestDefaultsMixin, AssertPcsMixin):
++    cli_command = ""
++    prefix = ""
++    cib_tag = ""
++
++    def setUp(self):
++        super().setUp()
++        xml_rsc = """
++            <rsc_defaults>
++                <meta_attributes id="rsc-set1" />
++                <meta_attributes id="rsc-set2" />
++                <meta_attributes id="rsc-set3" />
++                <meta_attributes id="rsc-set4" />
++            </rsc_defaults>
++        """
++        xml_op = """
++            <op_defaults>
++                <meta_attributes id="op-set1" />
++                <meta_attributes id="op-set2" />
++                <meta_attributes id="op-set3" />
++                <meta_attributes id="op-set4" />
++            </op_defaults>
++        """
++        xml_manip = XmlManipulation.from_file(empty_cib)
++        xml_manip.append_to_first_tag_name("configuration", xml_rsc, xml_op)
++        write_data_to_tmpfile(str(xml_manip), self.temp_cib)
++
++    def test_success(self):
++        self.assert_effect(
++            [
++                f"{self.cli_command} set delete {self.prefix}-set1 "
++                f"{self.prefix}-set3",
++                f"{self.cli_command} set remove {self.prefix}-set1 "
++                f"{self.prefix}-set3",
++            ],
++            dedent(
++                f"""\
++                <{self.cib_tag}>
++                    <meta_attributes id="{self.prefix}-set2" />
++                    <meta_attributes id="{self.prefix}-set4" />
++                </{self.cib_tag}>
++            """
++            ),
++        )
++
++
++class RscDefaultsSetDelete(
++    get_assert_pcs_effect_mixin(
++        lambda cib: etree.tostring(
++            # pylint:disable=undefined-variable
++            etree.parse(cib).findall(".//rsc_defaults")[0]
++        )
++    ),
++    DefaultsSetDeleteMixin,
++    TestCase,
++):
++    cli_command = "resource defaults"
++    prefix = "rsc"
++    cib_tag = "rsc_defaults"
++
++
++class OpDefaultsSetDelete(
++    get_assert_pcs_effect_mixin(
++        lambda cib: etree.tostring(
++            # pylint:disable=undefined-variable
++            etree.parse(cib).findall(".//op_defaults")[0]
++        )
++    ),
++    DefaultsSetDeleteMixin,
++    TestCase,
++):
++    cli_command = "resource op defaults"
++    prefix = "op"
++    cib_tag = "op_defaults"
++
++
++class DefaultsSetUpdateMixin(TestDefaultsMixin, AssertPcsMixin):
++    cli_command = ""
++    prefix = ""
++    cib_tag = ""
++
++    def test_success(self):
++        xml = f"""
++            <{self.cib_tag}>
++                <meta_attributes id="my-set">
++                    <nvpair id="my-set-name1" name="name1" value="value1" />
++                    <nvpair id="my-set-name2" name="name2" value="value2" />
++                    <nvpair id="my-set-name3" name="name3" value="value3" />
++                </meta_attributes>
++            </{self.cib_tag}>
++        """
++        xml_manip = XmlManipulation.from_file(empty_cib)
++        xml_manip.append_to_first_tag_name("configuration", xml)
++        write_data_to_tmpfile(str(xml_manip), self.temp_cib)
++        warnings = (
++            "Warning: Defaults do not apply to resources which override "
++            "them with their own defined values\n"
++        )
++
++        self.assert_effect(
++            f"{self.cli_command} set update my-set meta name2=value2A name3=",
++            dedent(
++                f"""\
++                <{self.cib_tag}>
++                    <meta_attributes id="my-set">
++                        <nvpair id="my-set-name1" name="name1" value="value1" />
++                        <nvpair id="my-set-name2" name="name2" value="value2A" />
++                    </meta_attributes>
++                </{self.cib_tag}>
++            """
++            ),
++            output=warnings,
++        )
++
++        self.assert_effect(
++            f"{self.cli_command} set update my-set meta name1= name2=",
++            dedent(
++                f"""\
++                <{self.cib_tag}>
++                    <meta_attributes id="my-set" />
++                </{self.cib_tag}>
++            """
++            ),
++            output=warnings,
++        )
++
++
++class RscDefaultsSetUpdate(
++    get_assert_pcs_effect_mixin(
++        lambda cib: etree.tostring(
++            # pylint:disable=undefined-variable
++            etree.parse(cib).findall(".//rsc_defaults")[0]
++        )
++    ),
++    DefaultsSetUpdateMixin,
++    TestCase,
++):
++    cli_command = "resource defaults"
++    prefix = "rsc"
++    cib_tag = "rsc_defaults"
++
++
++class OpDefaultsSetUpdate(
++    get_assert_pcs_effect_mixin(
++        lambda cib: etree.tostring(
++            # pylint:disable=undefined-variable
++            etree.parse(cib).findall(".//op_defaults")[0]
++        )
++    ),
++    DefaultsSetUpdateMixin,
++    TestCase,
++):
++    cli_command = "resource op defaults"
++    prefix = "op"
++    cib_tag = "op_defaults"
++
++
++class DefaultsSetUsageMixin(TestDefaultsMixin, AssertPcsMixin):
++    cli_command = ""
++
++    def test_no_args(self):
++        self.assert_pcs_fail(
++            f"{self.cli_command} set",
++            stdout_start=f"\nUsage: pcs {self.cli_command} set...\n",
++        )
++
++    def test_bad_command(self):
++        self.assert_pcs_fail(
++            f"{self.cli_command} set bad-command",
++            stdout_start=f"\nUsage: pcs {self.cli_command} set ...\n",
++        )
++
++
++class RscDefaultsSetUsage(
++    DefaultsSetUsageMixin, TestCase,
++):
++    cli_command = "resource defaults"
++
++
++class OpDefaultsSetUsage(
++    DefaultsSetUsageMixin, TestCase,
++):
++    cli_command = "resource op defaults"
++
++
++class DefaultsUpdateMixin(TestDefaultsMixin, AssertPcsMixin):
++    cli_command = ""
++    prefix = ""
++    cib_tag = ""
++
++    def assert_success_legacy(self, update_keyword):
++        write_file_to_tmpfile(empty_cib, self.temp_cib)
++        warning_lines = []
++        if not update_keyword:
++            warning_lines.append(
++                "Warning: This command is deprecated and will be removed. "
++                f"Please use 'pcs {self.cli_command} update' instead.\n"
++            )
++        warning_lines.append(
++            "Warning: Defaults do not apply to resources which override "
++            "them with their own defined values\n"
++        )
++        warnings = "".join(warning_lines)
++
++        update = "update" if update_keyword else ""
++
++        self.assert_effect(
++            f"{self.cli_command} {update} name1=value1 name2=value2 name3=value3",
++            dedent(
++                f"""\
++                <{self.cib_tag}>
++                    <meta_attributes id="{self.cib_tag}-meta_attributes">
++                        <nvpair id="{self.cib_tag}-meta_attributes-name1"
++                            name="name1" value="value1"
++                        />
++                        <nvpair id="{self.cib_tag}-meta_attributes-name2"
++                            name="name2" value="value2"
++                        />
++                        <nvpair id="{self.cib_tag}-meta_attributes-name3"
++                            name="name3" value="value3"
++                        />
++                    </meta_attributes>
++                </{self.cib_tag}>
++            """
++            ),
++            output=warnings,
++        )
++
++        self.assert_effect(
++            f"{self.cli_command} {update} name2=value2A name3=",
++            dedent(
++                f"""\
++                <{self.cib_tag}>
++                    <meta_attributes id="{self.cib_tag}-meta_attributes">
++                        <nvpair id="{self.cib_tag}-meta_attributes-name1"
++                            name="name1" value="value1"
++                        />
++                        <nvpair id="{self.cib_tag}-meta_attributes-name2"
++                            name="name2" value="value2A"
++                        />
++                    </meta_attributes>
++                </{self.cib_tag}>
++            """
++            ),
++            output=warnings,
++        )
++
++        self.assert_effect(
++            f"{self.cli_command} {update} name1= name2=",
++            dedent(
++                f"""\
++                <{self.cib_tag}>
++                    <meta_attributes id="{self.cib_tag}-meta_attributes" />
++                </{self.cib_tag}>
++            """
++            ),
++            output=warnings,
++        )
++
++    def test_deprecated(self):
++        self.assert_success_legacy(False)
++
++    def test_legacy(self):
++        self.assert_success_legacy(True)
++
++
++class RscDefaultsUpdate(
++    get_assert_pcs_effect_mixin(
++        lambda cib: etree.tostring(
++            # pylint:disable=undefined-variable
++            etree.parse(cib).findall(".//rsc_defaults")[0]
++        )
++    ),
++    DefaultsUpdateMixin,
++    TestCase,
++):
++    cli_command = "resource defaults"
++    prefix = "rsc"
++    cib_tag = "rsc_defaults"
++
++
++class OpDefaultsUpdate(
++    get_assert_pcs_effect_mixin(
++        lambda cib: etree.tostring(
++            # pylint:disable=undefined-variable
++            etree.parse(cib).findall(".//op_defaults")[0]
++        )
++    ),
++    DefaultsUpdateMixin,
++    TestCase,
++):
++    cli_command = "resource op defaults"
++    prefix = "op"
++    cib_tag = "op_defaults"
+diff --git a/pcs_test/tier1/test_tag.py b/pcs_test/tier1/test_tag.py
+index d28d3ae5..8057476a 100644
+--- a/pcs_test/tier1/test_tag.py
++++ b/pcs_test/tier1/test_tag.py
+@@ -246,9 +246,9 @@ class PcsConfigTagsTest(TestTagMixin, TestCase):
+          No alerts defined
+ 
+         Resources Defaults:
+-         No defaults set
++          No defaults set
+         Operations Defaults:
+-         No defaults set
++          No defaults set
+ 
+         Cluster Properties:
+         {tags}
+diff --git a/pcs_test/tools/fixture.py b/pcs_test/tools/fixture.py
+index a460acc7..6480617e 100644
+--- a/pcs_test/tools/fixture.py
++++ b/pcs_test/tools/fixture.py
+@@ -245,14 +245,14 @@ def report_resource_running(resource, roles, severity=severities.INFO):
+     )
+ 
+ 
+-def report_unexpected_element(element_id, elemet_type, expected_types):
++def report_unexpected_element(element_id, element_type, expected_types):
+     return (
+         severities.ERROR,
+         report_codes.ID_BELONGS_TO_UNEXPECTED_TYPE,
+         {
+             "id": element_id,
+             "expected_types": expected_types,
+-            "current_type": elemet_type,
++            "current_type": element_type,
+         },
+         None,
+     )
+diff --git a/pcs_test/tools/misc.py b/pcs_test/tools/misc.py
+index f481a267..33d78002 100644
+--- a/pcs_test/tools/misc.py
++++ b/pcs_test/tools/misc.py
+@@ -5,6 +5,8 @@ import re
+ import tempfile
+ from unittest import mock, skipUnless
+ 
++from lxml import etree
++
+ from pcs_test.tools.custom_mock import MockLibraryReportProcessor
+ 
+ from pcs import settings
+@@ -128,12 +130,12 @@ def compare_version(a, b):
+ 
+ def is_minimum_pacemaker_version(major, minor, rev):
+     return is_version_sufficient(
+-        get_current_pacemaker_version(), (major, minor, rev)
++        _get_current_pacemaker_version(), (major, minor, rev)
+     )
+ 
+ 
+ @lru_cache()
+-def get_current_pacemaker_version():
++def _get_current_pacemaker_version():
+     output, dummy_stderr, dummy_retval = runner.run(
+         [os.path.join(settings.pacemaker_binaries, "crm_mon"), "--version",]
+     )
+@@ -146,6 +148,26 @@ def get_current_pacemaker_version():
+     return major, minor, rev
+ 
+ 
++@lru_cache()
++def _get_current_cib_schema_version():
++    regexp = re.compile(r"pacemaker-((\d+)\.(\d+))")
++    all_versions = set()
++    xml = etree.parse("/usr/share/pacemaker/versions.rng").getroot()
++    for value_el in xml.xpath(
++        ".//x:attribute[@name='validate-with']//x:value",
++        namespaces={"x": "http://relaxng.org/ns/structure/1.0"},
++    ):
++        match = re.match(regexp, value_el.text)
++        if match:
++            all_versions.add((int(match.group(2)), int(match.group(3))))
++    return sorted(all_versions)[-1]
++
++
++def _is_minimum_cib_schema_version(cmajor, cminor, crev):
++    major, minor = _get_current_cib_schema_version()
++    return compare_version((major, minor, 0), (cmajor, cminor, crev)) > -1
++
++
+ def is_version_sufficient(current_version, minimal_version):
+     return compare_version(current_version, minimal_version) > -1
+ 
+@@ -174,7 +196,7 @@ def _get_current_pacemaker_features():
+ 
+ 
+ def skip_unless_pacemaker_version(version_tuple, feature):
+-    current_version = get_current_pacemaker_version()
++    current_version = _get_current_pacemaker_version()
+     return skipUnless(
+         is_version_sufficient(current_version, version_tuple),
+         (
+@@ -188,12 +210,6 @@ def skip_unless_pacemaker_version(version_tuple, feature):
+     )
+ 
+ 
+-def skip_unless_crm_rule():
+-    return skip_unless_pacemaker_version(
+-        (2, 0, 2), "listing of constraints that might be expired"
+-    )
+-
+-
+ def skip_unless_pacemaker_features(version_tuple, feature):
+     return skipUnless(
+         is_minimum_pacemaker_features(*version_tuple),
+@@ -204,12 +220,39 @@ def skip_unless_pacemaker_features(version_tuple, feature):
+     )
+ 
+ 
++def skip_unless_cib_schema_version(version_tuple, feature):
++    current_version = _get_current_cib_schema_version()
++    return skipUnless(
++        _is_minimum_cib_schema_version(*version_tuple),
++        (
++            "Pacemaker supported CIB schema version is too low (current: "
++            "{current_version}, must be >= {minimal_version}) to test {feature}"
++        ).format(
++            current_version=format_version(current_version),
++            minimal_version=format_version(version_tuple),
++            feature=feature,
++        ),
++    )
++
++
++def skip_unless_crm_rule():
++    return skip_unless_pacemaker_version(
++        (2, 0, 2), "listing of constraints that might be expired"
++    )
++
++
+ def skip_unless_pacemaker_supports_bundle():
+     return skip_unless_pacemaker_features(
+         (3, 1, 0), "bundle resources with promoted-max attribute"
+     )
+ 
+ 
++def skip_unless_pacemaker_supports_rsc_and_op_rules():
++    return skip_unless_cib_schema_version(
++        (3, 4, 0), "rsc_expression and op_expression elements in rule elements"
++    )
++
++
+ def skip_if_service_enabled(service_name):
+     return skipUnless(
+         not is_service_enabled(runner, service_name),
+diff --git a/pcsd/capabilities.xml b/pcsd/capabilities.xml
+index daf23e5a..6e1886cb 100644
+--- a/pcsd/capabilities.xml
++++ b/pcsd/capabilities.xml
+@@ -964,6 +964,21 @@
+         pcs commands: resource op defaults
+       </description>
+     </capability>
++    <capability id="pcmk.properties.operation-defaults.multiple" in-pcs="1" in-pcsd="0">
++      <description>
++        Support for managing multiple sets of resource operations defaults.
++
++        pcs commands: resource op defaults set create | delete | remove | update
++      </description>
++    </capability>
++    <capability id="pcmk.properties.operation-defaults.rule-rsc-op" in-pcs="1" in-pcsd="0">
++      <description>
++        Support for rules with 'resource' and 'op' expressions in sets of
++        resource operations defaults.
++
++        pcs commands: resource op defaults set create
++      </description>
++    </capability>
+     <capability id="pcmk.properties.resource-defaults" in-pcs="1" in-pcsd="0">
+       <description>
+         Show and set resources defaults, can set multiple defaults at once.
+@@ -971,6 +986,21 @@
+         pcs commands: resource defaults
+       </description>
+     </capability>
++    <capability id="pcmk.properties.resource-defaults.multiple" in-pcs="1" in-pcsd="0">
++      <description>
++        Support for managing multiple sets of resources defaults.
++
++        pcs commands: resource defaults set create | delete | remove | update
++      </description>
++    </capability>
++    <capability id="pcmk.properties.resource-defaults.rule-rsc-op" in-pcs="1" in-pcsd="0">
++      <description>
++        Support for rules with 'resource' and 'op' expressions in sets of
++        resources defaults.
++
++        pcs commands: resource defaults set create
++      </description>
++    </capability>
+ 
+ 
+ 
+diff --git a/test/centos8/Dockerfile b/test/centos8/Dockerfile
+index bcdfadef..753f0ca7 100644
+--- a/test/centos8/Dockerfile
++++ b/test/centos8/Dockerfile
+@@ -12,6 +12,7 @@ RUN dnf install -y \
+         python3-pip \
+         python3-pycurl \
+         python3-pyOpenSSL \
++        python3-pyparsing \
+         # ruby
+         ruby \
+         ruby-devel \
+diff --git a/test/fedora30/Dockerfile b/test/fedora30/Dockerfile
+index 60aad892..7edbfe5b 100644
+--- a/test/fedora30/Dockerfile
++++ b/test/fedora30/Dockerfile
+@@ -9,6 +9,7 @@ RUN dnf install -y \
+         python3-mock \
+         python3-pycurl \
+         python3-pyOpenSSL \
++        python3-pyparsing \
+         # ruby
+         ruby \
+         ruby-devel \
+diff --git a/test/fedora31/Dockerfile b/test/fedora31/Dockerfile
+index eb24bb1c..6750e222 100644
+--- a/test/fedora31/Dockerfile
++++ b/test/fedora31/Dockerfile
+@@ -10,6 +10,7 @@ RUN dnf install -y \
+         python3-pip \
+         python3-pycurl \
+         python3-pyOpenSSL \
++        python3-pyparsing \
+         # ruby
+         ruby \
+         ruby-devel \
+diff --git a/test/fedora32/Dockerfile b/test/fedora32/Dockerfile
+index 61a0a439..c6cc2146 100644
+--- a/test/fedora32/Dockerfile
++++ b/test/fedora32/Dockerfile
+@@ -11,6 +11,7 @@ RUN dnf install -y \
+         python3-pip \
+         python3-pycurl \
+         python3-pyOpenSSL \
++        python3-pyparsing \
+         # ruby
+         ruby \
+         ruby-devel \
+-- 
+2.25.4
+
diff --git a/SOURCES/do-not-support-cluster-setup-with-udp-u-transport.patch b/SOURCES/do-not-support-cluster-setup-with-udp-u-transport.patch
index 4da46c4..800145e 100644
--- a/SOURCES/do-not-support-cluster-setup-with-udp-u-transport.patch
+++ b/SOURCES/do-not-support-cluster-setup-with-udp-u-transport.patch
@@ -1,4 +1,4 @@
-From b919e643ff75fa47dcecbf60fd4938ae9b076ce4 Mon Sep 17 00:00:00 2001
+From c0fff964cc07e3a9fbdea85da33abe3329c653a3 Mon Sep 17 00:00:00 2001
 From: Ivan Devat <idevat@redhat.com>
 Date: Tue, 20 Nov 2018 15:03:56 +0100
 Subject: [PATCH 3/3] do not support cluster setup with udp(u) transport
@@ -10,10 +10,10 @@ Subject: [PATCH 3/3] do not support cluster setup with udp(u) transport
  3 files changed, 6 insertions(+)
 
 diff --git a/pcs/pcs.8 b/pcs/pcs.8
-index 651fda83..9a4673dd 100644
+index 3efc5bb2..20247774 100644
 --- a/pcs/pcs.8
 +++ b/pcs/pcs.8
-@@ -283,6 +283,8 @@ By default, encryption is enabled with cipher=aes256 and hash=sha256. To disable
+@@ -376,6 +376,8 @@ By default, encryption is enabled with cipher=aes256 and hash=sha256. To disable
  
  Transports udp and udpu:
  .br
@@ -23,10 +23,10 @@ index 651fda83..9a4673dd 100644
  .br
  Transport options are: ip_version, netmtu
 diff --git a/pcs/usage.py b/pcs/usage.py
-index e4f5af32..63e1c061 100644
+index 0f3c95a3..51bc1196 100644
 --- a/pcs/usage.py
 +++ b/pcs/usage.py
-@@ -689,6 +689,7 @@ Commands:
+@@ -796,6 +796,7 @@ Commands:
              hash=sha256. To disable encryption, set cipher=none and hash=none.
  
          Transports udp and udpu:
@@ -49,5 +49,5 @@ index b857cbae..b8d48d92 100644
  #csetup-transport-options.knet .without-knet
  {
 -- 
-2.21.0
+2.25.4
 
diff --git a/SPECS/pcs.spec b/SPECS/pcs.spec
index bcdf69a..3a207a6 100644
--- a/SPECS/pcs.spec
+++ b/SPECS/pcs.spec
@@ -1,11 +1,17 @@
 Name: pcs
-Version: 0.10.4
-Release: 3%{?dist}
+Version: 0.10.6
+Release: 2%{?dist}
+# https://docs.fedoraproject.org/en-US/packaging-guidelines/LicensingGuidelines/
 # https://fedoraproject.org/wiki/Licensing:Main?rd=Licensing#Good_Licenses
 # GPLv2: pcs
-# ASL 2.0: tornado
-# MIT: handlebars
-License: GPLv2 and ASL 2.0 and MIT
+# ASL 2.0: dataclasses, tornado
+# MIT: handlebars, backports, dacite, daemons, ethon, mustermann, rack,
+#      rack-protection, rack-test, sinatra, tilt
+# GPLv2 or Ruby: eventmachne, json
+# (GPLv2 or Ruby) and BSD: thin
+# BSD or Ruby: open4, ruby2_keywords
+# BSD and MIT: ffi
+License: GPLv2 and ASL 2.0 and MIT and BSD and (GPLv2 or Ruby) and (BSD or Ruby)
 URL: https://github.com/ClusterLabs/pcs
 Group: System Environment/Base
 Summary: Pacemaker Configuration System
@@ -18,24 +24,30 @@ ExclusiveArch: i686 x86_64 s390x ppc64le aarch64
 %global pcs_source_name %{name}-%{version_or_commit}
 
 # ui_commit can be determined by hash, tag or branch 
-%global ui_commit 0.1.2
+%global ui_commit 0.1.3
 %global ui_src_name pcs-web-ui-%{ui_commit}
 
 %global pcs_snmp_pkg_name  pcs-snmp
 
 %global pyagentx_version   0.4.pcs.2
-%global tornado_version    6.0.3
-%global version_rubygem_backports  3.11.4
-%global version_rubygem_ethon  0.11.0
-%global version_rubygem_ffi  1.9.25
-%global version_rubygem_json  2.1.0
-%global version_rubygem_mustermann  1.0.3
+%global tornado_version    6.0.4
+%global dataclasses_version 0.6
+%global dacite_version  1.5.0
+%global version_rubygem_backports  3.17.2
+%global version_rubygem_daemons  1.3.1
+%global version_rubygem_ethon  0.12.0
+%global version_rubygem_eventmachine  1.2.7
+%global version_rubygem_ffi  1.13.1
+%global version_rubygem_json  2.3.0
+%global version_rubygem_mustermann  1.1.1
 %global version_rubygem_open4  1.3.4
-%global version_rubygem_rack  2.0.6
-%global version_rubygem_rack_protection  2.0.4
-%global version_rubygem_rack_test  1.0.0
-%global version_rubygem_sinatra  2.0.4
-%global version_rubygem_tilt  2.0.9
+%global version_rubygem_rack  2.2.3
+%global version_rubygem_rack_protection  2.0.8.1
+%global version_rubygem_rack_test  1.1.0
+%global version_rubygem_ruby2_keywords  0.0.2
+%global version_rubygem_sinatra  2.0.8.1
+%global version_rubygem_thin  1.7.2
+%global version_rubygem_tilt  2.0.10
 
 # We do not use _libdir macro because upstream is not prepared for it.
 # Pcs does not include binaries and thus it should live in /usr/lib. Tornado
@@ -70,6 +82,8 @@ Source2: pcsd-bundle-config-2
 
 Source41: https://github.com/ondrejmular/pyagentx/archive/v%{pyagentx_version}/pyagentx-%{pyagentx_version}.tar.gz
 Source42: https://github.com/tornadoweb/tornado/archive/v%{tornado_version}/tornado-%{tornado_version}.tar.gz
+Source43: https://github.com/ericvsmith/dataclasses/archive/%{dataclasses_version}/dataclasses-%{dataclasses_version}.tar.gz
+Source44: https://github.com/konradhalas/dacite/archive/v%{dacite_version}/dacite-%{dacite_version}.tar.gz
 
 Source81: https://rubygems.org/downloads/backports-%{version_rubygem_backports}.gem
 Source82: https://rubygems.org/downloads/ethon-%{version_rubygem_ethon}.gem
@@ -85,6 +99,10 @@ Source89: https://rubygems.org/downloads/rack-protection-%{version_rubygem_rack_
 Source90: https://rubygems.org/downloads/rack-test-%{version_rubygem_rack_test}.gem
 Source91: https://rubygems.org/downloads/sinatra-%{version_rubygem_sinatra}.gem
 Source92: https://rubygems.org/downloads/tilt-%{version_rubygem_tilt}.gem
+Source93: https://rubygems.org/downloads/eventmachine-%{version_rubygem_eventmachine}.gem
+Source94: https://rubygems.org/downloads/daemons-%{version_rubygem_daemons}.gem
+Source95: https://rubygems.org/downloads/thin-%{version_rubygem_thin}.gem
+Source96: https://rubygems.org/downloads/ruby2_keywords-%{version_rubygem_ruby2_keywords}.gem
 
 Source100: https://github.com/idevat/pcs-web-ui/archive/%{ui_commit}/%{ui_src_name}.tar.gz
 Source101: https://github.com/idevat/pcs-web-ui/releases/download/%{ui_commit}/pcs-web-ui-node-modules-%{ui_commit}.tar.xz
@@ -93,8 +111,9 @@ Source101: https://github.com/idevat/pcs-web-ui/releases/download/%{ui_commit}/p
 # They should come before downstream patches to avoid unnecessary conflicts.
 # Z-streams are exception here: they can come from upstream but should be
 # applied at the end to keep z-stream changes as straightforward as possible.
-Patch1: bz1676431-01-Display-status-of-disaster-recovery.patch
-Patch2: bz1743731-01-fix-error-msg-when-cluster-is-not-set-up.patch
+# Patch1: name.patch
+Patch1: bz1817547-01-resource-and-operation-defaults.patch
+Patch2: bz1805082-01-fix-resource-stonith-refresh-documentation.patch
 
 # Downstream patches do not come from upstream. They adapt pcs for specific
 # RHEL needs.
@@ -104,12 +123,12 @@ Patch101: do-not-support-cluster-setup-with-udp-u-transport.patch
 BuildRequires: git
 #printf from coreutils is used in makefile
 BuildRequires: coreutils
-BuildRequires: execstack
 # python for pcs
 BuildRequires: platform-python
 BuildRequires: python3-devel
 BuildRequires: platform-python-setuptools
 BuildRequires: python3-pycurl
+BuildRequires: python3-pyparsing
 # gcc for compiling custom rubygems
 BuildRequires: gcc
 BuildRequires: gcc-c++
@@ -126,16 +145,6 @@ BuildRequires: systemd
 # for tests
 BuildRequires: python3-lxml
 BuildRequires: python3-pyOpenSSL
-BuildRequires: pacemaker-cli >= 2.0.0
-# BuildRequires: fence-agents-all
-BuildRequires: fence-agents-apc
-BuildRequires: fence-agents-scsi
-BuildRequires: fence-agents-ipmilan
-# for tests
-%ifarch i686 x86_64
-BuildRequires: fence-virt
-%endif
-BuildRequires: booth-site
 # pcsd fonts and font management tools for creating symlinks to fonts
 BuildRequires: fontconfig
 BuildRequires: liberation-sans-fonts
@@ -152,6 +161,7 @@ Requires: python3-lxml
 Requires: platform-python-setuptools
 Requires: python3-clufter => 0.70.0
 Requires: python3-pycurl
+Requires: python3-pyparsing
 # ruby and gems for pcsd
 Requires: ruby >= 2.2.0
 Requires: rubygems
@@ -179,18 +189,26 @@ Requires: liberation-sans-fonts
 Requires: overpass-fonts
 # favicon Red Hat logo
 Requires: redhat-logos
+# needs logrotate for /etc/logrotate.d/pcsd
+Requires: logrotate
 
 Provides: bundled(tornado) = %{tornado_version}
+Provides: bundled(dataclasses) = %{dataclasses_version}
+Provides: bundled(dacite) = %{dacite_version}
 Provides: bundled(backports) = %{version_rubygem_backports}
+Provides: bundled(daemons) = %{version_rubygem_daemons}
 Provides: bundled(ethon) = %{version_rubygem_ethon}
+Provides: bundled(eventmachine) = %{version_rubygem_eventmachine}
 Provides: bundled(ffi) = %{version_rubygem_ffi}
 Provides: bundled(json) = %{version_rubygem_json}
 Provides: bundled(mustermann) = %{version_rubygem_mustermann}
 Provides: bundled(open4) = %{version_rubygem_open4}
 Provides: bundled(rack) = %{version_rubygem_rack}
-Provides: bundled(rack) = %{version_rubygem_rack_protection}
-Provides: bundled(rack) = %{version_rubygem_rack_test}
+Provides: bundled(rack_protection) = %{version_rubygem_rack_protection}
+Provides: bundled(rack_test) = %{version_rubygem_rack_test}
+Provides: bundled(ruby2_keywords) = %{version_rubygem_ruby2_keywords}
 Provides: bundled(sinatra) = %{version_rubygem_sinatra}
+Provides: bundled(thin) = %{version_rubygem_thin}
 Provides: bundled(tilt) = %{version_rubygem_tilt}
 
 %description
@@ -238,7 +256,11 @@ update_times(){
   unset file_list[0]
 
   for fname in ${file_list[@]}; do
-    touch -r $reference_file $fname
+    # some files could be deleted by a patch therefore we test file for
+    # existance before touch to avoid exit with error: No such file or
+    # directory
+    # diffstat cannot create list of files without deleted files
+    test -e $fname && touch -r $reference_file $fname
   done
 }
 
@@ -257,6 +279,7 @@ update_times_patch(){
   update_times ${patch_file_name} `diffstat -p1 -l ${patch_file_name}`
 }
 
+# update_times_patch %%{PATCH1}
 update_times_patch %{PATCH1}
 update_times_patch %{PATCH2}
 update_times_patch %{PATCH101}
@@ -288,6 +311,10 @@ cp -f %SOURCE89 pcsd/vendor/cache
 cp -f %SOURCE90 pcsd/vendor/cache
 cp -f %SOURCE91 pcsd/vendor/cache
 cp -f %SOURCE92 pcsd/vendor/cache
+cp -f %SOURCE93 pcsd/vendor/cache
+cp -f %SOURCE94 pcsd/vendor/cache
+cp -f %SOURCE95 pcsd/vendor/cache
+cp -f %SOURCE96 pcsd/vendor/cache
 
 
 # 3) dir for python bundles
@@ -308,6 +335,20 @@ update_times %SOURCE42 `find %{bundled_src_dir}/tornado -follow`
 cp %{bundled_src_dir}/tornado/LICENSE tornado_LICENSE
 cp %{bundled_src_dir}/tornado/README.rst tornado_README.rst
 
+# 6) sources for python dataclasses
+tar -xzf %SOURCE43 -C %{bundled_src_dir}
+mv %{bundled_src_dir}/dataclasses-%{dataclasses_version} %{bundled_src_dir}/dataclasses
+update_times %SOURCE43 `find %{bundled_src_dir}/dataclasses -follow`
+cp %{bundled_src_dir}/dataclasses/LICENSE.txt dataclasses_LICENSE.txt
+cp %{bundled_src_dir}/dataclasses/README.rst dataclasses_README.rst
+
+# 7) sources for python dacite
+tar -xzf %SOURCE44 -C %{bundled_src_dir}
+mv %{bundled_src_dir}/dacite-%{dacite_version} %{bundled_src_dir}/dacite
+update_times %SOURCE44 `find %{bundled_src_dir}/dacite -follow`
+cp %{bundled_src_dir}/dacite/LICENSE dacite_LICENSE
+cp %{bundled_src_dir}/dacite/README.md dacite_README.md
+
 %build
 %define debug_package %{nil}
 
@@ -321,22 +362,47 @@ gem install \
   --force --verbose --no-rdoc --no-ri -l --no-user-install \
   -i %{rubygem_bundle_dir} \
   %{rubygem_cache_dir}/backports-%{version_rubygem_backports}.gem \
+  %{rubygem_cache_dir}/daemons-%{version_rubygem_daemons}.gem \
   %{rubygem_cache_dir}/ethon-%{version_rubygem_ethon}.gem \
+  %{rubygem_cache_dir}/eventmachine-%{version_rubygem_eventmachine}.gem \
   %{rubygem_cache_dir}/ffi-%{version_rubygem_ffi}.gem \
   %{rubygem_cache_dir}/json-%{version_rubygem_json}.gem \
   %{rubygem_cache_dir}/mustermann-%{version_rubygem_mustermann}.gem \
   %{rubygem_cache_dir}/open4-%{version_rubygem_open4}.gem \
-  %{rubygem_cache_dir}/rack-%{version_rubygem_rack}.gem \
   %{rubygem_cache_dir}/rack-protection-%{version_rubygem_rack_protection}.gem \
   %{rubygem_cache_dir}/rack-test-%{version_rubygem_rack_test}.gem \
+  %{rubygem_cache_dir}/rack-%{version_rubygem_rack}.gem \
+  %{rubygem_cache_dir}/ruby2_keywords-%{version_rubygem_ruby2_keywords}.gem \
   %{rubygem_cache_dir}/sinatra-%{version_rubygem_sinatra}.gem \
+  %{rubygem_cache_dir}/thin-%{version_rubygem_thin}.gem \
   %{rubygem_cache_dir}/tilt-%{version_rubygem_tilt}.gem \
   -- '--with-ldflags="-Wl,-z,relro -Wl,-z,ibt -Wl,-z,now -Wl,--gc-sections"' \
      '--with-cflags="-O2 -ffunction-sections"'
 
+# prepare license files
+# some rubygems do not have a license file (ruby2_keywords, thin)
+mv %{rubygem_bundle_dir}/gems/backports-%{version_rubygem_backports}/LICENSE.txt backports_LICENSE.txt
+mv %{rubygem_bundle_dir}/gems/daemons-%{version_rubygem_daemons}/LICENSE daemons_LICENSE
+mv %{rubygem_bundle_dir}/gems/ethon-%{version_rubygem_ethon}/LICENSE ethon_LICENSE
+mv %{rubygem_bundle_dir}/gems/eventmachine-%{version_rubygem_eventmachine}/LICENSE eventmachine_LICENSE
+mv %{rubygem_bundle_dir}/gems/eventmachine-%{version_rubygem_eventmachine}/GNU eventmachine_GNU
+mv %{rubygem_bundle_dir}/gems/ffi-%{version_rubygem_ffi}/COPYING ffi_COPYING
+mv %{rubygem_bundle_dir}/gems/ffi-%{version_rubygem_ffi}/LICENSE ffi_LICENSE
+mv %{rubygem_bundle_dir}/gems/ffi-%{version_rubygem_ffi}/LICENSE.SPECS ffi_LICENSE.SPECS
+mv %{rubygem_bundle_dir}/gems/json-%{version_rubygem_json}/LICENSE json_LICENSE
+mv %{rubygem_bundle_dir}/gems/mustermann-%{version_rubygem_mustermann}/LICENSE mustermann_LICENSE
+mv %{rubygem_bundle_dir}/gems/open4-%{version_rubygem_open4}/LICENSE open4_LICENSE
+mv %{rubygem_bundle_dir}/gems/rack-%{version_rubygem_rack}/MIT-LICENSE rack_MIT-LICENSE
+mv %{rubygem_bundle_dir}/gems/rack-protection-%{version_rubygem_rack_protection}/License rack-protection_License
+mv %{rubygem_bundle_dir}/gems/rack-test-%{version_rubygem_rack_test}/MIT-LICENSE.txt rack-test_MIT-LICENSE.txt
+mv %{rubygem_bundle_dir}/gems/sinatra-%{version_rubygem_sinatra}/LICENSE sinatra_LICENSE
+mv %{rubygem_bundle_dir}/gems/tilt-%{version_rubygem_tilt}/COPYING tilt_COPYING
+
 # We can remove files required for gem compilation
+rm -rf %{rubygem_bundle_dir}/gems/eventmachine-%{version_rubygem_eventmachine}/ext
 rm -rf %{rubygem_bundle_dir}/gems/ffi-%{version_rubygem_ffi}/ext
 rm -rf %{rubygem_bundle_dir}/gems/json-%{version_rubygem_json}/ext
+rm -rf %{rubygem_bundle_dir}/gems/thin-%{version_rubygem_thin}/ext
 
 
 # With this file there is "File is not stripped" problem during rpmdiff
@@ -361,6 +427,8 @@ make install \
   BASH_COMPLETION_DIR=%{_datadir}/bash-completion/completions \
   BUNDLE_PYAGENTX_SRC_DIR=`readlink -f %{bundled_src_dir}/pyagentx` \
   BUNDLE_TORNADO_SRC_DIR=`readlink -f %{bundled_src_dir}/tornado` \
+  BUNDLE_DACITE_SRC_DIR=`readlink -f %{bundled_src_dir}/dacite` \
+  BUNDLE_DATACLASSES_SRC_DIR=`readlink -f %{bundled_src_dir}/dataclasses` \
   BUILD_GEMS=false \
   SYSTEMCTL_OVERRIDE=true \
   hdrdir="%{_includedir}" \
@@ -401,7 +469,7 @@ run_all_tests(){
   #   TODO: Investigate the issue
 
   BUNDLED_LIB_LOCATION=$RPM_BUILD_ROOT%{pcs_libdir}/pcs/bundled/packages \
-    %{__python3} pcs_test/suite.py -v --vanilla --all-but \
+    %{__python3} pcs_test/suite.py --tier0 -v --vanilla --all-but \
     pcs_test.tier0.lib.commands.test_resource_agent.DescribeAgentUtf8.test_describe \
     pcs_test.tier0.daemon.app.test_app_remote.SyncConfigMutualExclusive.test_get_not_locked \
     pcs_test.tier0.daemon.app.test_app_remote.SyncConfigMutualExclusive.test_post_not_locked \
@@ -434,20 +502,31 @@ remove_all_tests() {
 run_all_tests
 remove_all_tests
 
+%posttrans
+# Make sure the new version of the daemon is runnning.
+# Also, make sure to start pcsd-ruby if it hasn't been started or even
+# installed before. This is done by restarting pcsd.service.
+%{_bindir}/systemctl daemon-reload
+%{_bindir}/systemctl try-restart pcsd.service
+
+
 %post
 %systemd_post pcsd.service
+%systemd_post pcsd-ruby.service
 
 %post -n %{pcs_snmp_pkg_name}
 %systemd_post pcs_snmp_agent.service
 
 %preun
 %systemd_preun pcsd.service
+%systemd_preun pcsd-ruby.service
 
 %preun -n %{pcs_snmp_pkg_name}
 %systemd_preun pcs_snmp_agent.service
 
 %postun
 %systemd_postun_with_restart pcsd.service
+%systemd_postun_with_restart pcsd-ruby.service
 
 %postun -n %{pcs_snmp_pkg_name}
 %systemd_postun_with_restart pcs_snmp_agent.service
@@ -456,8 +535,29 @@ remove_all_tests
 %doc CHANGELOG.md
 %doc README.md
 %doc tornado_README.rst
+%doc dacite_README.md
+%doc dataclasses_README.rst
 %license tornado_LICENSE
+%license dacite_LICENSE
+%license dataclasses_LICENSE.txt
 %license COPYING
+# rugygem licenses
+%license backports_LICENSE.txt
+%license daemons_LICENSE
+%license ethon_LICENSE
+%license eventmachine_LICENSE
+%license eventmachine_GNU
+%license ffi_COPYING
+%license ffi_LICENSE
+%license ffi_LICENSE.SPECS
+%license json_LICENSE
+%license mustermann_LICENSE
+%license open4_LICENSE
+%license rack_MIT-LICENSE
+%license rack-protection_License
+%license rack-test_MIT-LICENSE.txt
+%license sinatra_LICENSE
+%license tilt_COPYING
 %{python3_sitelib}/pcs
 %{python3_sitelib}/pcs-%{version}-py3.*.egg-info
 %{_sbindir}/pcs
@@ -466,10 +566,14 @@ remove_all_tests
 %{pcs_libdir}/pcsd/*
 %{pcs_libdir}/pcsd/.bundle/config
 %{pcs_libdir}/pcs/bundled/packages/tornado*
+%{pcs_libdir}/pcs/bundled/packages/dacite*
+%{pcs_libdir}/pcs/bundled/packages/dataclasses*
+%{pcs_libdir}/pcs/bundled/packages/__pycache__/dataclasses.cpython-36.pyc
 %{_unitdir}/pcsd.service
+%{_unitdir}/pcsd-ruby.service
 %{_datadir}/bash-completion/completions/pcs
 %{_sharedstatedir}/pcsd
-%{_sysconfdir}/pam.d/pcsd
+%config(noreplace) %{_sysconfdir}/pam.d/pcsd
 %dir %{_var}/log/pcsd
 %config(noreplace) %{_sysconfdir}/logrotate.d/pcsd
 %config(noreplace) %{_sysconfdir}/sysconfig/pcsd
@@ -484,6 +588,7 @@ remove_all_tests
 %{_mandir}/man8/pcsd.*
 %exclude %{pcs_libdir}/pcsd/*.debian
 %exclude %{pcs_libdir}/pcsd/pcsd.service
+%exclude %{pcs_libdir}/pcsd/pcsd-ruby.service
 %exclude %{pcs_libdir}/pcsd/pcsd.conf
 %exclude %{pcs_libdir}/pcsd/pcsd.8
 %exclude %{pcs_libdir}/pcsd/public/js/dev/*
@@ -508,6 +613,45 @@ remove_all_tests
 %license pyagentx_LICENSE.txt
 
 %changelog
+* Thu Jun 25 2020 Miroslav Lisik <mlisik@redhat.com> - 0.10.6-2
+- Added resource and operation defaults that apply to specific resource/operation types
+- Added Requires/BuildRequires: python3-pyparsing
+- Added Requires: logrotate
+- Fixed resource and stonith documentation
+- Fixed rubygem licenses
+- Fixed update_times()
+- Updated rubygem rack to version 2.2.3
+- Removed BuildRequires execstack (it is not needed)
+- Resolves: rhbz#1805082 rhbz#1817547
+
+* Thu Jun 11 2020 Miroslav Lisik <mlisik@redhat.com> - 0.10.6-1
+- Rebased to latest upstream sources (see CHANGELOG.md)
+- Added python bundled dependencies: dacite, dataclasses
+- Added new bundled rubygem ruby2_keywords
+- Updated rubygem bundled packages: backports, ethon, ffi, json, mustermann, rack, rack_protection, rack_test, sinatra, tilt
+- Updated pcs-web-ui
+- Updated test run, only tier0 tests are running during build
+- Removed BuildRequires needed for tier1 tests which were removed for build (pacemaker-cli, fence_agents-*, fence_virt, booth-site)
+- Resolves: rhbz#1387358 rhbz#1684676 rhbz#1722970 rhbz#1778672 rhbz#1782553 rhbz#1790460 rhbz#1805082 rhbz#1810017 rhbz#1817547 rhbz#1830552 rhbz#1832973 rhbz#1833114 rhbz#1833506 rhbz#1838853 rhbz#1839637
+
+* Fri Mar 20 2020 Miroslav Lisik <mlisik@redhat.com> - 0.10.4-6
+- Fixed communication between python and ruby daemons
+- Resolves: rhbz#1783106
+
+* Thu Feb 13 2020 Miroslav Lisik <mlisik@redhat.com> - 0.10.4-5
+- Fixed link to sbd man page from `sbd enable` doc
+- Fixed safe-disabling clones, groups, bundles
+- Fixed sinatra wrapper performance issue
+- Fixed detecting fence history support
+- Fixed cookie options
+- Updated hint for 'resource create ... master'
+- Updated gating tests execution, smoke tests run from upstream sources
+- Resolves: rhbz#1750427 rhbz#1781303 rhbz#1783106 rhbz#1793574
+
+* Mon Jan 20 2020 Tomas Jelinek <tojeline@redhat.com> - 0.10.4-4
+- Fix testsuite for pacemaker-2.0.3-4
+- Resolves: rhbz#1792946
+
 * Mon Dec 02 2019 Ivan Devat <idevat@redhat.com> - 0.10.4-3
 - Added basic resource views in new webUI
 - Resolves: rhbz#1744060