diff --git a/.gitignore b/.gitignore
index 0fd37ba..0fe35dd 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,17 +1,20 @@
 SOURCES/HAM-logo.png
 SOURCES/backports-3.11.4.gem
+SOURCES/daemons-1.3.1.gem
 SOURCES/ethon-0.11.0.gem
+SOURCES/eventmachine-1.2.7.gem
 SOURCES/ffi-1.9.25.gem
 SOURCES/json-2.1.0.gem
 SOURCES/mustermann-1.0.3.gem
 SOURCES/open4-1.3.4-1.gem
-SOURCES/pcs-0.10.2.tar.gz
-SOURCES/pcs-web-ui-0.1.1.tar.gz
-SOURCES/pcs-web-ui-node-modules-0.1.1.tar.xz
+SOURCES/pcs-0.10.4.tar.gz
+SOURCES/pcs-web-ui-0.1.2.tar.gz
+SOURCES/pcs-web-ui-node-modules-0.1.2.tar.xz
 SOURCES/pyagentx-0.4.pcs.2.tar.gz
 SOURCES/rack-2.0.6.gem
 SOURCES/rack-protection-2.0.4.gem
 SOURCES/rack-test-1.0.0.gem
 SOURCES/sinatra-2.0.4.gem
+SOURCES/thin-1.7.2.gem
 SOURCES/tilt-2.0.9.gem
-SOURCES/tornado-5.0.2.tar.gz
+SOURCES/tornado-6.0.3.tar.gz
diff --git a/.pcs.metadata b/.pcs.metadata
index ccc1089..2ceec9c 100644
--- a/.pcs.metadata
+++ b/.pcs.metadata
@@ -1,17 +1,20 @@
 679a4ce22a33ffd4d704261a17c00cff98d9499a SOURCES/HAM-logo.png
 edf08f3a0d9e202048857d78ddda44e59294084c SOURCES/backports-3.11.4.gem
+e28c1e78d1a6e34e80f4933b494f1e0501939dd3 SOURCES/daemons-1.3.1.gem
 3c921ceeb2847be8cfa25704be74923e233786bd SOURCES/ethon-0.11.0.gem
+7a5b2896e210fac9759c786ee4510f265f75b481 SOURCES/eventmachine-1.2.7.gem
 86fa011857f977254ccf39f507587310f9ade768 SOURCES/ffi-1.9.25.gem
 8b9e81a2a6ff57f97bec1f65940c61cc6b6d81be SOURCES/json-2.1.0.gem
 2d090e7d3cd2a35efeaeacf006100fb83b828686 SOURCES/mustermann-1.0.3.gem
 41a7fe9f8e3e02da5ae76c821b89c5b376a97746 SOURCES/open4-1.3.4-1.gem
-0d10fd24bb7268013766c01867395486bad62dcb SOURCES/pcs-0.10.2.tar.gz
-c81162a6dc4811a8b988c51182cf675938bcf227 SOURCES/pcs-web-ui-0.1.1.tar.gz
-175427fbf15f292a0a3454eda132543a952cca96 SOURCES/pcs-web-ui-node-modules-0.1.1.tar.xz
+d2b649f271580b18d39efffa93f62b55291ef55d SOURCES/pcs-0.10.4.tar.gz
+8ac1291ce8f56073b74149ac56acc094337a3298 SOURCES/pcs-web-ui-0.1.2.tar.gz
+52599fe9c17bda8cc0cad1acf830a9114b8b6db6 SOURCES/pcs-web-ui-node-modules-0.1.2.tar.xz
 3176b2f2b332c2b6bf79fe882e83feecf3d3f011 SOURCES/pyagentx-0.4.pcs.2.tar.gz
 b15267e1f94e69238a00a6f1bd48fb7683c03a78 SOURCES/rack-2.0.6.gem
 c1376e5678322b401d988d261762a78bf2cf3361 SOURCES/rack-protection-2.0.4.gem
 4c99cf0a82372a1bc5968c1551d9e606b68b4879 SOURCES/rack-test-1.0.0.gem
 1c85f05c874bc8c0bf9c40291ea2d430090cdfd9 SOURCES/sinatra-2.0.4.gem
+41395e86322ffd31f3a7aef1f697bda3e1e2d6b9 SOURCES/thin-1.7.2.gem
 55a75a80e29731d072fe44dfaf865479b65c27fd SOURCES/tilt-2.0.9.gem
-c8690c8108ce9edd6c55151f66ade61e0a11ab10 SOURCES/tornado-5.0.2.tar.gz
+126c66189fc5b26a39c9b54eb17254652cca8b27 SOURCES/tornado-6.0.3.tar.gz
diff --git a/SOURCES/bz1657166-01-Updating-a-bundle-is-a-bit-cumber.patch b/SOURCES/bz1657166-01-Updating-a-bundle-is-a-bit-cumber.patch
deleted file mode 100644
index f973ced..0000000
--- a/SOURCES/bz1657166-01-Updating-a-bundle-is-a-bit-cumber.patch
+++ /dev/null
@@ -1,1100 +0,0 @@
-From 1ae4be91077978e324f2e463eaa97dcccfdb7057 Mon Sep 17 00:00:00 2001
-From: Ivan Devat <idevat@redhat.com>
-Date: Thu, 20 Jun 2019 11:44:46 +0200
-Subject: [PATCH 2/3] squash bz1657166 Updating a bundle is a bit cumber
-
-disallow to specify container type in bundle reset
-
-fix id conflict in bundle reset
----
- pcs/cli/resource/parse_args.py                |  21 ++
- pcs/lib/cib/resource/bundle.py                | 216 +++++++-------
- pcs/lib/commands/resource.py                  |  41 +--
- pcs/lib/xml_tools.py                          |  15 +
- pcs/pcs.8                                     |   2 +-
- pcs/resource.py                               |  35 ++-
- pcs/usage.py                                  |   2 +-
- pcs_test/tier0/cib_resource/test_bundle.py    |   2 +-
- .../lib/commands/resource/bundle_common.py    |   4 +-
- .../commands/resource/test_bundle_create.py   |   6 +-
- .../commands/resource/test_bundle_reset.py    | 263 +++++++++++++++---
- 11 files changed, 402 insertions(+), 205 deletions(-)
-
-diff --git a/pcs/cli/resource/parse_args.py b/pcs/cli/resource/parse_args.py
-index 88da12a6..ff86f477 100644
---- a/pcs/cli/resource/parse_args.py
-+++ b/pcs/cli/resource/parse_args.py
-@@ -101,6 +101,27 @@ def parse_bundle_create_options(arg_list):
-     }
-     return parts
- 
-+def parse_bundle_reset_options(arg_list):
-+    """
-+    Commandline options: no options
-+    """
-+    groups = _parse_bundle_groups(arg_list)
-+    container_options = groups.get("container", [])
-+    parts = {
-+        "container": prepare_options(container_options),
-+        "network": prepare_options(groups.get("network", [])),
-+        "port_map": [
-+            prepare_options(port_map)
-+            for port_map in groups.get("port-map", [])
-+        ],
-+        "storage_map": [
-+            prepare_options(storage_map)
-+            for storage_map in groups.get("storage-map", [])
-+        ],
-+        "meta": prepare_options(groups.get("meta", []))
-+    }
-+    return parts
-+
- def _split_bundle_map_update_op_and_options(
-     map_arg_list, result_parts, map_name
- ):
-diff --git a/pcs/lib/cib/resource/bundle.py b/pcs/lib/cib/resource/bundle.py
-index 5c2910c8..2b80608e 100644
---- a/pcs/lib/cib/resource/bundle.py
-+++ b/pcs/lib/cib/resource/bundle.py
-@@ -18,6 +18,7 @@ from pcs.lib.xml_tools import (
-     append_when_useful,
-     get_sub_element,
-     update_attributes_remove_empty,
-+    reset_element,
- )
- 
- TAG = "bundle"
-@@ -84,15 +85,13 @@ def validate_new(
-             id_provider=id_provider
-         ).validate({"id": bundle_id})
-         +
--        validate_reset(
--            id_provider,
--            container_type,
--            container_options,
--            network_options,
--            port_map,
--            storage_map,
--            force_options
--        )
-+        _validate_container(container_type, container_options, force_options)
-+        +
-+        _validate_network_options_new(network_options, force_options)
-+        +
-+        _validate_port_map_list(port_map, id_provider, force_options)
-+        +
-+        _validate_storage_map_list(storage_map, id_provider, force_options)
-     )
- 
- def append_new(
-@@ -130,14 +129,14 @@ def append_new(
-     return bundle_element
- 
- def validate_reset(
--    id_provider, container_type, container_options, network_options,
--    port_map, storage_map, force_options=False
-+    id_provider, bundle_el, container_options, network_options, port_map,
-+    storage_map, force_options=False
- ):
-     """
-     Validate bundle parameters, return list of report items
- 
-     IdProvider id_provider -- elements' ids generator and uniqueness checker
--    string container_type -- bundle container type
-+    etree bundle_el -- the bundle to be reset
-     dict container_options -- container options
-     dict network_options -- network options
-     list of dict port_map -- list of port mapping options
-@@ -145,7 +144,7 @@ def validate_reset(
-     bool force_options -- return warnings instead of forceable errors
-     """
-     return (
--        _validate_container(container_type, container_options, force_options)
-+        _validate_container_reset(bundle_el, container_options, force_options)
-         +
-         _validate_network_options_new(network_options, force_options)
-         +
-@@ -154,72 +153,46 @@ def validate_reset(
-         _validate_storage_map_list(storage_map, id_provider, force_options)
-     )
- 
--def reset(
--    bundle_element, id_provider, bundle_id, container_type, container_options,
--    network_options, port_map, storage_map, meta_attributes
--):
-+def validate_reset_to_minimal(bundle_element):
-     """
--    Remove configuration of bundle_element and create new one.
-+    Validate removing configuration of bundle_element and keep the minimal one.
- 
-     etree bundle_element -- the bundle element that will be reset
--    IdProvider id_provider -- elements' ids generator
--    string bundle_id -- id of the bundle
--    string container_type -- bundle container type
--    dict container_options -- container options
--    dict network_options -- network options
--    list of dict port_map -- list of port mapping options
--    list of dict storage_map -- list of storage mapping options
--    dict meta_attributes -- meta attributes
-     """
--    # pylint: disable=too-many-arguments
-+    if not _is_supported_container(_get_container_element(bundle_element)):
-+        return [_get_report_unsupported_container(bundle_element)]
-+    return []
- 
--    # Old bundle configuration is removed and re-created. We aren't trying
--    # to keep ids:
--    # * It doesn't make sense to reference these ids.
--    # * Newly created ids are based on (are prefixed by) the bundle element id,
--    #   which does not change. Therefore, it is VERY HIGHLY probable the newly
--    #   created ids will be the same as the original ones.
--    elements_without_reset_impact = []
-+def reset_to_minimal(bundle_element):
-+    """
-+    Remove configuration of bundle_element and keep the minimal one.
- 
-+    etree bundle_element -- the bundle element that will be reset
-+    """
-     # Elements network, storage and meta_attributes must be kept even if they
-     # are without children.
-     # See https://bugzilla.redhat.com/show_bug.cgi?id=1642514
--    #
--    # The only scenario that makes sense is that these elements are empty
--    # and no attributes or children are requested for them. So we collect only
--    # deleted tags and we will ensure creation minimal relevant elements at
--    # least.
--    indelible_tags = []
--    for child in list(bundle_element):
--        if child.tag in ["network", "storage", META_ATTRIBUTES_TAG]:
--            indelible_tags.append(child.tag)
--        elif child.tag not in list(GENERIC_CONTAINER_TYPES):
--            # Only primitive should be found here, currently.
--            # The order of various element tags has no practical impact so we
--            # don't care about it here.
--            elements_without_reset_impact.append(child)
--        bundle_element.remove(child)
-+    # Element of container type is required.
- 
--    _append_container(bundle_element, container_type, container_options)
--    if network_options or port_map or "network" in indelible_tags:
--        _append_network(
--            bundle_element,
--            id_provider,
--            bundle_id,
--            network_options,
--            port_map,
--        )
--    if storage_map or "storage" in indelible_tags:
--        _append_storage(bundle_element, id_provider, bundle_id, storage_map)
--    if meta_attributes or META_ATTRIBUTES_TAG in indelible_tags:
--        append_new_meta_attributes(
--            bundle_element,
--            meta_attributes,
--            id_provider,
--            enforce_append=True,
--        )
--    for element in elements_without_reset_impact:
--        bundle_element.append(element)
-+    # There can be other elements beside bundle configuration (e.g. primitive).
-+    # These elements stay untouched.
-+    # Like any function that manipulates with cib, this also assumes prior
-+    # validation that container is supported.
-+    for child in list(bundle_element):
-+        if child.tag in ["network", "storage"]:
-+            reset_element(child)
-+        if child.tag == META_ATTRIBUTES_TAG:
-+            reset_element(child, keep_attrs=["id"])
-+        if child.tag in list(GENERIC_CONTAINER_TYPES):
-+            # GENERIC_CONTAINER_TYPES elements require the "image" attribute to
-+            # be set.
-+            reset_element(child, keep_attrs=["image"])
-+
-+def _get_report_unsupported_container(bundle_el):
-+    return reports.resource_bundle_unsupported_container_type(
-+        bundle_el.get("id"),
-+        GENERIC_CONTAINER_TYPES,
-+    )
- 
- def validate_update(
-     id_provider, bundle_el, container_options, network_options,
-@@ -240,65 +213,26 @@ def validate_update(
-     list of string storage_map_remove -- list of storage mapping ids to remove
-     bool force_options -- return warnings instead of forceable errors
-     """
--    report_list = []
--
--    # validate container options only if they are being updated
--    if container_options:
--        container_el = _get_container_element(bundle_el)
--        if (
--            container_el is not None
--            and
--            container_el.tag in GENERIC_CONTAINER_TYPES
--        ):
--            report_list.extend(
--                _validate_generic_container_options_update(
--                    container_el,
--                    container_options,
--                    force_options
--                )
--            )
--        else:
--            report_list.append(
--                reports.resource_bundle_unsupported_container_type(
--                    bundle_el.get("id"), GENERIC_CONTAINER_TYPES
--                )
--            )
--
--    network_el = bundle_el.find("network")
--    if network_el is None:
--        report_list.extend(
--            _validate_network_options_new(network_options, force_options)
--        )
--    else:
--        report_list.extend(
--            _validate_network_options_update(
--                bundle_el,
--                network_el,
--                network_options,
--                force_options
--            )
--        )
--
-     # TODO It will probably be needed to split the following validators to
-     # create and update variants. It should be done once the need exists and
-     # not sooner.
--    report_list.extend(
-+    return (
-+        _validate_container_update(bundle_el, container_options, force_options)
-+        +
-+        _validate_network_update(bundle_el, network_options, force_options)
-+        +
-         _validate_port_map_list(port_map_add, id_provider, force_options)
--    )
--    report_list.extend(
-+        +
-         _validate_storage_map_list(storage_map_add, id_provider, force_options)
--    )
--    report_list.extend(
-+        +
-         _validate_map_ids_exist(
-             bundle_el, "port-mapping", "port-map", port_map_remove
-         )
--    )
--    report_list.extend(
-+        +
-         _validate_map_ids_exist(
-             bundle_el, "storage-mapping", "storage-map", storage_map_remove
-         )
-     )
--    return report_list
- 
- def update(
-     id_provider, bundle_el, container_options, network_options,
-@@ -420,8 +354,15 @@ def get_inner_resource(bundle_el):
-         return resources[0]
-     return None
- 
-+def _is_supported_container(container_el):
-+    return (
-+        container_el is not None
-+        and
-+        container_el.tag in GENERIC_CONTAINER_TYPES
-+    )
-+
- def _validate_container(container_type, container_options, force_options=False):
--    if not container_type in GENERIC_CONTAINER_TYPES:
-+    if container_type not in GENERIC_CONTAINER_TYPES:
-         return [
-             reports.invalid_option_value(
-                 "container type",
-@@ -429,7 +370,10 @@ def _validate_container(container_type, container_options, force_options=False):
-                 GENERIC_CONTAINER_TYPES,
-             )
-         ]
-+    return _validate_generic_container_options(container_options, force_options)
-+
- 
-+def _validate_generic_container_options(container_options, force_options=False):
-     validators = [
-         validate.NamesIn(
-             GENERIC_CONTAINER_OPTIONS,
-@@ -463,8 +407,32 @@ def _validate_container(container_type, container_options, force_options=False):
-         deprecation_reports
-     )
- 
-+def _validate_container_reset(bundle_el, container_options, force_options):
-+    # Unlike in the case of update, in reset empty options are not necessary
-+    # valid - user MUST set everything (including required options e.g. image).
-+    if (
-+        container_options
-+        and
-+        not _is_supported_container(_get_container_element(bundle_el))
-+    ):
-+        return [_get_report_unsupported_container(bundle_el)]
-+    return _validate_generic_container_options(container_options, force_options)
-+
-+def _validate_container_update(bundle_el, options, force_options):
-+    # Validate container options only if they are being updated. Empty options
-+    # are valid - user DOESN'T NEED to change anything.
-+    if not options:
-+        return []
-+
-+    container_el = _get_container_element(bundle_el)
-+    if not _is_supported_container(container_el):
-+        return [_get_report_unsupported_container(bundle_el)]
-+    return _validate_generic_container_options_update(
-+        container_el, options, force_options
-+    )
-+
- def _validate_generic_container_options_update(
--    docker_el, options, force_options
-+    container_el, options, force_options
- ):
-     validators_optional_options = [
-         validate.ValueNonnegativeInteger("masters"),
-@@ -517,7 +485,7 @@ def _validate_generic_container_options_update(
-     if (
-         options.get("masters")
-         and
--        docker_el.get("promoted-max") and options.get("promoted-max") != ""
-+        container_el.get("promoted-max") and options.get("promoted-max") != ""
-     ):
-         deprecation_reports.append(
-             reports.prerequisite_option_must_not_be_set(
-@@ -527,7 +495,7 @@ def _validate_generic_container_options_update(
-     if (
-         options.get("promoted-max")
-         and
--        docker_el.get("masters") and options.get("masters") != ""
-+        container_el.get("masters") and options.get("masters") != ""
-     ):
-         deprecation_reports.append(
-             reports.prerequisite_option_must_not_be_set(
-@@ -571,6 +539,14 @@ def _is_pcmk_remote_acccessible_after_update(network_el, options):
- 
-     return not (case1 or case2 or case3)
- 
-+def _validate_network_update(bundle_el, options, force_options):
-+    network_el = bundle_el.find("network")
-+    if network_el is None:
-+        return _validate_network_options_new(options, force_options)
-+    return _validate_network_options_update(
-+        bundle_el, network_el, options, force_options
-+    )
-+
- def _validate_network_options_update(
-     bundle_el, network_el, options, force_options
- ):
-diff --git a/pcs/lib/commands/resource.py b/pcs/lib/commands/resource.py
-index 89e7f225..af648022 100644
---- a/pcs/lib/commands/resource.py
-+++ b/pcs/lib/commands/resource.py
-@@ -180,7 +180,9 @@ def _check_special_cases(
- 
- _find_bundle = partial(find_element_by_tag_and_id, resource.bundle.TAG)
- 
--def _get_required_cib_version_for_container(container_type, container_options):
-+def _get_required_cib_version_for_container(
-+    container_options, container_type=None
-+):
-     if container_type == "podman":
-         return Version(3, 2, 0)
- 
-@@ -567,8 +569,8 @@ def bundle_create(
-             resource.common.are_meta_disabled(meta_attributes)
-         ),
-         required_cib_version=_get_required_cib_version_for_container(
-+            container_options,
-             container_type,
--            container_options
-         ),
-     ) as resources_section:
-         # no need to run validations related to remote and guest nodes as those
-@@ -602,7 +604,7 @@ def bundle_create(
-             resource.common.disable(bundle_element, id_provider)
- 
- def bundle_reset(
--    env, bundle_id, container_type, container_options=None,
-+    env, bundle_id, container_options=None,
-     network_options=None, port_map=None, storage_map=None, meta_attributes=None,
-     force_options=False,
-     ensure_disabled=False,
-@@ -614,7 +616,6 @@ def bundle_reset(
- 
-     LibraryEnvironment env -- provides communication with externals
-     string bundle_id -- id of the bundle to reset
--    string container_type -- container engine name (docker, lxc...)
-     dict container_options -- container options
-     dict network_options -- network options
-     list of dict port_map -- a list of port mapping options
-@@ -640,15 +641,20 @@ def bundle_reset(
-             resource.common.are_meta_disabled(meta_attributes)
-         ),
-         required_cib_version=_get_required_cib_version_for_container(
--            container_type,
-             container_options
-         ),
-     ) as resources_section:
-+        bundle_element = _find_bundle(resources_section, bundle_id)
-+        env.report_processor.process_list(
-+            resource.bundle.validate_reset_to_minimal(bundle_element)
-+        )
-+        resource.bundle.reset_to_minimal(bundle_element)
-+
-         id_provider = IdProvider(resources_section)
-         env.report_processor.process_list(
-             resource.bundle.validate_reset(
-                 id_provider,
--                container_type,
-+                bundle_element,
-                 container_options,
-                 network_options,
-                 port_map,
-@@ -658,23 +664,21 @@ def bundle_reset(
-             )
-         )
- 
--        bundle_element = _find_bundle(resources_section, bundle_id)
--        resource.bundle.reset(
--            bundle_element,
-+        resource.bundle.update(
-             id_provider,
--            bundle_id,
--            container_type,
-+            bundle_element,
-             container_options,
-             network_options,
--            port_map,
--            storage_map,
--            meta_attributes,
-+            port_map_add=port_map,
-+            port_map_remove=[],
-+            storage_map_add=storage_map,
-+            storage_map_remove=[],
-+            meta_attributes=meta_attributes,
-         )
- 
-         if ensure_disabled:
-             resource.common.disable(bundle_element, id_provider)
- 
--
- def bundle_update(
-     env, bundle_id, container_options=None, network_options=None,
-     port_map_add=None, port_map_remove=None, storage_map_add=None,
-@@ -706,14 +710,13 @@ def bundle_update(
-     storage_map_remove = storage_map_remove or []
-     meta_attributes = meta_attributes or {}
- 
--    required_cib_version = Version(2, 8, 0)
--    if "promoted-max" in container_options:
--        required_cib_version = Version(3, 0, 0)
-     with resource_environment(
-         env,
-         wait,
-         [bundle_id],
--        required_cib_version=required_cib_version
-+        required_cib_version=_get_required_cib_version_for_container(
-+            container_options
-+        ),
-     ) as resources_section:
-         # no need to run validations related to remote and guest nodes as those
-         # nodes can only be created from primitive resources
-diff --git a/pcs/lib/xml_tools.py b/pcs/lib/xml_tools.py
-index 43cde3b5..c058c288 100644
---- a/pcs/lib/xml_tools.py
-+++ b/pcs/lib/xml_tools.py
-@@ -154,3 +154,18 @@ def remove_when_pointless(element, attribs_important=True):
-     """
-     if not is_element_useful(element, attribs_important):
-         element.getparent().remove(element)
-+
-+def reset_element(element, keep_attrs=None):
-+    """
-+    Remove all subelements and all attributes (except mentioned in keep_attrs)
-+    of given element.
-+
-+    lxml.etree.element element -- element to reset
-+    list keep_attrs -- names of attributes thas should be kept
-+    """
-+    keep_attrs = keep_attrs or []
-+    for child in list(element):
-+        element.remove(child)
-+    for key in element.attrib.keys():
-+        if key not in keep_attrs:
-+            del element.attrib[key]
-diff --git a/pcs/pcs.8 b/pcs/pcs.8
-index 1e794c60..4ae646e2 100644
---- a/pcs/pcs.8
-+++ b/pcs/pcs.8
-@@ -191,7 +191,7 @@ Remove the clone which contains the specified group or resource (the resource or
- bundle create <bundle id> container <container type> [<container options>] [network <network options>] [port\-map <port options>]... [storage\-map <storage options>]... [meta <meta options>] [\fB\-\-disabled\fR] [\fB\-\-wait\fR[=n]]
- Create a new bundle encapsulating no resources. The bundle can be used either as it is or a resource may be put into it at any time. If \fB\-\-disabled\fR is specified, the bundle is not started automatically. If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the bundle to start and then return 0 on success or 1 on error. If 'n' is not specified it defaults to 60 minutes.
- .TP
--bundle reset <bundle id> container <container type> [<container options>] [network <network options>] [port\-map <port options>]... [storage\-map <storage options>]... [meta <meta options>] [\fB\-\-disabled\fR] [\fB\-\-wait\fR[=n]]
-+bundle reset <bundle id> [container <container options>] [network <network options>] [port\-map <port options>]... [storage\-map <storage options>]... [meta <meta options>] [\fB\-\-disabled\fR] [\fB\-\-wait\fR[=n]]
- Configure specified bundle with given options. Unlike bundle update, this command resets the bundle according given options - no previous options are kept. Resources inside the bundle are kept as they are. If \fB\-\-disabled\fR is specified, the bundle is not started automatically. If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the bundle to start and then return 0 on success or 1 on error. If 'n' is not specified it defaults to 60 minutes.
- .TP
- bundle update <bundle id> [container <container options>] [network <network options>] [port\-map (add <port options>) | (delete | remove <id>...)]... [storage\-map (add <storage options>) | (delete | remove <id>...)]... [meta <meta options>] [\fB\-\-wait\fR[=n]]
-diff --git a/pcs/resource.py b/pcs/resource.py
-index a29ace5f..973f9e64 100644
---- a/pcs/resource.py
-+++ b/pcs/resource.py
-@@ -22,6 +22,7 @@ from pcs.cli.common.parse_args import (
- )
- from pcs.cli.resource.parse_args import (
-     parse_bundle_create_options,
-+    parse_bundle_reset_options,
-     parse_bundle_update_options,
-     parse_create as parse_create_args,
- )
-@@ -2929,19 +2930,26 @@ def resource_bundle_create_cmd(lib, argv, modifiers):
-       * --wait
-       * -f - CIB file
-     """
--    _resource_bundle_configure(lib.resource.bundle_create, argv, modifiers)
-+    modifiers.ensure_only_supported("--force", "--disabled", "--wait", "-f")
-+    if not argv:
-+        raise CmdLineInputError()
- 
--def resource_bundle_reset_cmd(lib, argv, modifiers):
--    """
--    Options:
--      * --force - allow unknown options
--      * --disabled - create as a stopped bundle
--      * --wait
--      * -f - CIB file
--    """
--    _resource_bundle_configure(lib.resource.bundle_reset, argv, modifiers)
-+    bundle_id = argv[0]
-+    parts = parse_bundle_create_options(argv[1:])
-+    lib.resource.bundle_create(
-+        bundle_id,
-+        parts["container_type"],
-+        container_options=parts["container"],
-+        network_options=parts["network"],
-+        port_map=parts["port_map"],
-+        storage_map=parts["storage_map"],
-+        meta_attributes=parts["meta"],
-+        force_options=modifiers.get("--force"),
-+        ensure_disabled=modifiers.get("--disabled"),
-+        wait=modifiers.get("--wait"),
-+    )
- 
--def _resource_bundle_configure(call_lib, argv, modifiers):
-+def resource_bundle_reset_cmd(lib, argv, modifiers):
-     """
-     Options:
-       * --force - allow unknown options
-@@ -2954,10 +2962,9 @@ def _resource_bundle_configure(call_lib, argv, modifiers):
-         raise CmdLineInputError()
- 
-     bundle_id = argv[0]
--    parts = parse_bundle_create_options(argv[1:])
--    call_lib(
-+    parts = parse_bundle_reset_options(argv[1:])
-+    lib.resource.bundle_reset(
-         bundle_id,
--        parts["container_type"],
-         container_options=parts["container"],
-         network_options=parts["network"],
-         port_map=parts["port_map"],
-diff --git a/pcs/usage.py b/pcs/usage.py
-index 2566d522..c0c6d712 100644
---- a/pcs/usage.py
-+++ b/pcs/usage.py
-@@ -476,7 +476,7 @@ Commands:
-         to start and then return 0 on success or 1 on error. If 'n' is not
-         specified it defaults to 60 minutes.
- 
--    bundle reset <bundle id> container <container type> [<container options>]
-+    bundle reset <bundle id> [container <container options>]
-             [network <network options>] [port-map <port options>]...
-             [storage-map <storage options>]... [meta <meta options>]
-             [--disabled] [--wait[=n]]
-diff --git a/pcs_test/tier0/cib_resource/test_bundle.py b/pcs_test/tier0/cib_resource/test_bundle.py
-index b80b0606..96fcc082 100644
---- a/pcs_test/tier0/cib_resource/test_bundle.py
-+++ b/pcs_test/tier0/cib_resource/test_bundle.py
-@@ -79,7 +79,7 @@ class BundleReset(BundleCreateCommon):
-             "resource bundle create B2 container docker image=pcs:test"
-         )
-         self.assert_effect(
--            "resource bundle reset B1 container docker image=pcs:new",
-+            "resource bundle reset B1 container image=pcs:new",
-             """
-                 <resources>
-                     <bundle id="B1">
-diff --git a/pcs_test/tier0/lib/commands/resource/bundle_common.py b/pcs_test/tier0/lib/commands/resource/bundle_common.py
-index 4fbf00e4..5d7a4b42 100644
---- a/pcs_test/tier0/lib/commands/resource/bundle_common.py
-+++ b/pcs_test/tier0/lib/commands/resource/bundle_common.py
-@@ -50,13 +50,13 @@ class SetUpMixin:
-         )
- 
- class UpgradeMixin(FixturesMixin):
--    upgraded_cib_filename = None
-+    old_version_cib_filename = None
- 
-     def test_cib_upgrade(self):
-         (self.config
-             .runner.cib.load(
-                 name="load_cib_old_version",
--                filename=self.upgraded_cib_filename,
-+                filename=self.old_version_cib_filename,
-                 before="runner.cib.load"
-             )
-             .runner.cib.upgrade(before="runner.cib.load")
-diff --git a/pcs_test/tier0/lib/commands/resource/test_bundle_create.py b/pcs_test/tier0/lib/commands/resource/test_bundle_create.py
-index 0bc9dc65..ba1ee0ac 100644
---- a/pcs_test/tier0/lib/commands/resource/test_bundle_create.py
-+++ b/pcs_test/tier0/lib/commands/resource/test_bundle_create.py
-@@ -82,16 +82,16 @@ class CreateParametrizedContainerMixin(
- 
- class CreateDocker(CreateParametrizedContainerMixin, TestCase):
-     container_type = "docker"
--    upgraded_cib_filename = "cib-empty-2.0.xml"
-+    old_version_cib_filename = "cib-empty-2.0.xml"
- 
- class CreatePodman(CreateParametrizedContainerMixin, TestCase):
-     container_type = "podman"
--    upgraded_cib_filename = "cib-empty-3.1.xml"
-+    old_version_cib_filename = "cib-empty-3.1.xml"
- 
- 
- class CreateRkt(CreateParametrizedContainerMixin, TestCase):
-     container_type = "rkt"
--    upgraded_cib_filename = "cib-empty-2.9.xml"
-+    old_version_cib_filename = "cib-empty-2.9.xml"
- 
- class CreateWithNetwork(CreateCommandMixin, NetworkMixin, TestCase):
-     container_type = "docker"
-diff --git a/pcs_test/tier0/lib/commands/resource/test_bundle_reset.py b/pcs_test/tier0/lib/commands/resource/test_bundle_reset.py
-index e5d6ef16..cfc84500 100644
---- a/pcs_test/tier0/lib/commands/resource/test_bundle_reset.py
-+++ b/pcs_test/tier0/lib/commands/resource/test_bundle_reset.py
-@@ -12,6 +12,7 @@ from pcs_test.tier0.lib.commands.resource.bundle_common import(
-     AllOptionsMixin,
-     WaitMixin,
- )
-+from pcs_test.tools import fixture
- 
- from pcs.common import report_codes
- from pcs.lib.commands.resource import bundle_reset
-@@ -26,16 +27,13 @@ class BaseMixin(FixturesMixin):
-     def initial_resources(self):
-         return self.fixture_resources_bundle_simple
- 
--    def bundle_reset(
--        self, bundle_id=None, container_type=None, **params
--    ):
-+    def bundle_reset(self, bundle_id=None, **params):
-         if "container_options" not in params:
-             params["container_options"] = {"image": self.image}
- 
-         bundle_reset(
-             self.env_assist.get_env(),
-             bundle_id=bundle_id or self.bundle_id,
--            container_type=container_type or self.container_type,
-             **params
-         )
- 
-@@ -44,10 +42,11 @@ class BaseMixin(FixturesMixin):
- 
- class MinimalMixin(BaseMixin, SetUpMixin):
-     container_type = None
--    new_container_type = None
-     initial_cib_filename = "cib-empty-3.2.xml"
- 
-     def test_success_zero_change(self):
-+        # Resets a bundle with only an image set to a bundle with the same
-+        # image set and no other options.
-         self.config.env.push_cib(resources=self.initial_resources)
-         self.bundle_reset()
- 
-@@ -63,13 +62,12 @@ class MinimalMixin(BaseMixin, SetUpMixin):
-                 """
-                 .format(
-                     bundle_id=self.bundle_id,
--                    container_type=self.new_container_type,
-+                    container_type=self.container_type,
-                     image=new_image,
-                 )
-             ,
-         })
-         self.bundle_reset(
--            container_type=self.new_container_type,
-             container_options={"image": new_image},
-         )
- 
-@@ -92,9 +90,20 @@ class MinimalMixin(BaseMixin, SetUpMixin):
-             expected_in_processor=False,
-         )
- 
-+    def test_no_options_set(self):
-+        self.env_assist.assert_raise_library_error(
-+            lambda: bundle_reset(self.env_assist.get_env(), self.bundle_id),
-+            [
-+                fixture.error(
-+                    report_codes.REQUIRED_OPTIONS_ARE_MISSING,
-+                    option_names=["image"],
-+                    option_type="container",
-+                ),
-+            ]
-+        )
-+
- class FullMixin(SetUpMixin, BaseMixin):
-     container_type = None
--    new_container_type = None
-     fixture_primitive = """
-         <primitive class="ocf" id="A" provider="heartbeat" type="Dummy"/>
-     """
-@@ -104,25 +113,12 @@ class FullMixin(SetUpMixin, BaseMixin):
-         return """
-             <resources>
-                 <bundle id="{bundle_id}">
--                    <meta_attributes id="{bundle_id}-meta_attributes">
--                        <nvpair id="{bundle_id}-meta_attributes-target-role"
--                            name="target-role"
--                            value="Stopped"
--                        />
--                    </meta_attributes>
-                     <{container_type}
-                         image="{image}"
-                         promoted-max="0"
--                        replicas="0"
--                        replicas-per-host="0"
-+                        replicas="1"
-+                        replicas-per-host="1"
-                     />
--                    <meta_attributes id="{bundle_id}-meta_attributes">
--                        <nvpair
--                            id="{bundle_id}-meta_attributes-is-managed"
--                            name="is-managed"
--                            value="false"
--                        />
--                    </meta_attributes>
-                     <network
-                         control-port="12345"
-                         host-interface="eth0"
-@@ -147,6 +143,13 @@ class FullMixin(SetUpMixin, BaseMixin):
-                             target-dir="/tmp/{container_type}2b"
-                         />
-                     </storage>
-+                    <meta_attributes id="{bundle_id}-meta_attributes">
-+                        <nvpair
-+                            id="{bundle_id}-meta_attributes-target-role"
-+                            name="target-role"
-+                            value="Stopped"
-+                        />
-+                    </meta_attributes>
-                     {fixture_primitive}
-                 </bundle>
-             </resources>
-@@ -174,7 +177,7 @@ class FullMixin(SetUpMixin, BaseMixin):
-                     </bundle>
-                 """
-                 .format(
--                    container_type=self.new_container_type,
-+                    container_type=self.container_type,
-                     bundle_id=self.bundle_id,
-                     fixture_primitive=self.fixture_primitive,
-                     image=new_image,
-@@ -183,7 +186,6 @@ class FullMixin(SetUpMixin, BaseMixin):
-         })
- 
-         self.bundle_reset(
--            container_type=self.new_container_type,
-             container_options={"image": new_image},
-         )
- 
-@@ -220,12 +222,13 @@ class FullMixin(SetUpMixin, BaseMixin):
-                             <storage-mapping
-                                 id="{bundle_id}-storage-map"
-                                 options="extra options 2"
--                                source-dir="/tmp/{container_type}2a"
--                                target-dir="/tmp/{container_type}2b"
-+                                source-dir="/tmp/{container_type}2aa"
-+                                target-dir="/tmp/{container_type}2bb"
-                             />
-                         </storage>
-                         <meta_attributes id="{bundle_id}-meta_attributes">
--                            <nvpair id="{bundle_id}-meta_attributes-target-role"
-+                            <nvpair
-+                                id="{bundle_id}-meta_attributes-target-role"
-                                 name="target-role"
-                                 value="Started"
-                             />
-@@ -234,7 +237,7 @@ class FullMixin(SetUpMixin, BaseMixin):
-                     </bundle>
-                 """
-                 .format(
--                    container_type=self.new_container_type,
-+                    container_type=self.container_type,
-                     bundle_id=self.bundle_id,
-                     fixture_primitive=self.fixture_primitive,
-                     image=new_image,
-@@ -242,7 +245,6 @@ class FullMixin(SetUpMixin, BaseMixin):
-             ,
-         })
-         self.bundle_reset(
--            container_type=self.new_container_type,
-             container_options={
-                 "image": new_image,
-                 "promoted-max": "1",
-@@ -262,8 +264,8 @@ class FullMixin(SetUpMixin, BaseMixin):
-             storage_map=[
-                 {
-                     "options": "extra options 2",
--                    "source-dir": f"/tmp/{self.new_container_type}2a",
--                    "target-dir": f"/tmp/{self.new_container_type}2b",
-+                    "source-dir": f"/tmp/{self.container_type}2aa",
-+                    "target-dir": f"/tmp/{self.container_type}2bb",
-                 },
-             ],
-             meta_attributes={
-@@ -271,6 +273,80 @@ class FullMixin(SetUpMixin, BaseMixin):
-             }
-         )
- 
-+    def test_success_keep_map_ids(self):
-+        self.config.env.push_cib(replace={
-+            ".//resources/bundle/network":
-+                f"""
-+                    <network
-+                        control-port="12345"
-+                        host-interface="eth0"
-+                        host-netmask="24"
-+                        ip-range-start="192.168.100.200"
-+                    >
-+                        <port-mapping
-+                            id="{self.bundle_id}-port-map-1001"
-+                            internal-port="3002"
-+                            port="3000"
-+                        />
-+                        <port-mapping
-+                            id="{self.bundle_id}-port-map-3000-3300"
-+                            range="4000-4400"
-+                        />
-+                    </network>
-+                """
-+            ,
-+            ".//resources/bundle/storage":
-+                f"""
-+                    <storage>
-+                        <storage-mapping
-+                            id="{self.bundle_id}-storage-map"
-+                            options="extra options 2"
-+                            source-dir="/tmp/{self.container_type}2aa"
-+                            target-dir="/tmp/{self.container_type}2bb"
-+                        />
-+                    </storage>
-+                """
-+            ,
-+        })
-+
-+        # Every value is kept as before except port_map and storage_map.
-+        self.bundle_reset(
-+            container_options={
-+                "image": self.image,
-+                "promoted-max": "0",
-+                "replicas": "1",
-+                "replicas-per-host": "1",
-+            },
-+            network_options={
-+                "control-port": "12345",
-+                "host-interface": "eth0",
-+                "host-netmask": "24",
-+                "ip-range-start": "192.168.100.200",
-+            },
-+            port_map=[
-+                {
-+                    "id": f"{self.bundle_id}-port-map-1001",
-+                    "internal-port": "3002",
-+                    "port": "3000",
-+                },
-+                {
-+                    "id": f"{self.bundle_id}-port-map-3000-3300",
-+                    "range": "4000-4400",
-+                },
-+            ],
-+            storage_map=[
-+                {
-+                    "id": f"{self.bundle_id}-storage-map",
-+                    "options": "extra options 2",
-+                    "source-dir": f"/tmp/{self.container_type}2aa",
-+                    "target-dir": f"/tmp/{self.container_type}2bb",
-+                },
-+            ],
-+            meta_attributes={
-+                "target-role": "Stopped",
-+            }
-+        )
-+
- class ResetParametrizedContainerMixin(
-     BaseMixin, ParametrizedContainerMixin, UpgradeMixin
- ):
-@@ -278,39 +354,33 @@ class ResetParametrizedContainerMixin(
- 
- class MinimalRkt(MinimalMixin, TestCase):
-     container_type = "rkt"
--    new_container_type = "docker"
- 
- class MinimalPodman(MinimalMixin, TestCase):
-     container_type = "podman"
--    new_container_type = "rkt"
- 
- class MinimalDocker(MinimalMixin, TestCase):
-     container_type = "docker"
--    new_container_type = "rkt"
- 
- class FullRkt(FullMixin, TestCase):
-     container_type = "rkt"
--    new_container_type = "docker"
- 
- class FullPodman(FullMixin, TestCase):
-     container_type = "podman"
--    new_container_type = "rkt"
- 
- class FullDocker(FullMixin, TestCase):
-     container_type = "docker"
--    new_container_type = "docker"
- 
--class CreateParametrizedPodman(ResetParametrizedContainerMixin, TestCase):
-+class ResetParametrizedPodman(ResetParametrizedContainerMixin, TestCase):
-     container_type = "podman"
--    upgraded_cib_filename = "cib-empty-3.1.xml"
-+    old_version_cib_filename = "cib-empty-2.6.xml"
- 
--class CreateParametrizedDocker(ResetParametrizedContainerMixin, TestCase):
-+class ResetParametrizedDocker(ResetParametrizedContainerMixin, TestCase):
-     container_type = "docker"
--    upgraded_cib_filename = "cib-empty-2.0.xml"
-+    old_version_cib_filename = "cib-empty-2.0.xml"
- 
--class CreateParametrizedRkt(ResetParametrizedContainerMixin, TestCase):
-+class ResetParametrizedRkt(ResetParametrizedContainerMixin, TestCase):
-     container_type = "rkt"
--    upgraded_cib_filename = "cib-empty-2.9.xml"
-+    old_version_cib_filename = "cib-empty-2.6.xml"
- 
- class ResetWithNetwork(BaseMixin, NetworkMixin, TestCase):
-     container_type = "docker"
-@@ -323,9 +393,114 @@ class ResetWithStorageMap(BaseMixin, StorageMapMixin, TestCase):
- 
- class ResetWithMetaMap(BaseMixin, MetaMixin, TestCase):
-     container_type = "docker"
-+    def test_success(self):
-+        # When there is no meta attributes the new one are put on the first
-+        # possition (since reset now uses update internally). This is the reason
-+        # for reimplementation of this MetaMixin test.
-+        self.config.env.push_cib(
-+            resources="""
-+                <resources>
-+                    <bundle id="{bundle_id}">
-+                        <meta_attributes id="{bundle_id}-meta_attributes">
-+                            <nvpair id="{bundle_id}-meta_attributes-is-managed"
-+                                name="is-managed" value="false" />
-+                            <nvpair id="{bundle_id}-meta_attributes-target-role"
-+                                name="target-role" value="Stopped" />
-+                        </meta_attributes>
-+                        <{container_type} image="{image}" />
-+                    </bundle>
-+                </resources>
-+            """
-+            .format(
-+                container_type=self.container_type,
-+                bundle_id=self.bundle_id,
-+                image=self.image,
-+            )
-+        )
-+        self.run_bundle_cmd(
-+            meta_attributes={
-+                "target-role": "Stopped",
-+                "is-managed": "false",
-+            }
-+        )
- 
- class ResetWithAllOptions(BaseMixin, AllOptionsMixin, TestCase):
-     container_type = "docker"
- 
- class ResetWithWait(BaseMixin, WaitMixin, TestCase):
-     container_type = "docker"
-+
-+class ResetUnknownContainerType(BaseMixin, SetUpMixin, TestCase):
-+    container_type = "unknown"
-+    def test_error_or_unknown_container(self):
-+        self.env_assist.assert_raise_library_error(
-+            lambda: bundle_reset(self.env_assist.get_env(), self.bundle_id),
-+            [
-+                fixture.error(
-+                    report_codes.RESOURCE_BUNDLE_UNSUPPORTED_CONTAINER_TYPE,
-+                    bundle_id="B1",
-+                    supported_container_types=["docker", "podman", "rkt"],
-+                ),
-+            ]
-+        )
-+
-+class NoMetaIdRegenerationMixin(BaseMixin, SetUpMixin):
-+    @property
-+    def initial_resources(self):
-+        return """
-+            <resources>
-+                <bundle id="{bundle_id}">
-+                    <{container_type}
-+                        image="{image}"
-+                        promoted-max="0"
-+                        replicas="1"
-+                        replicas-per-host="1"
-+                    />
-+                    <meta_attributes id="CUSTOM_ID">
-+                        <nvpair
-+                            id="ANOTHER_ID-target-role"
-+                            name="target-role"
-+                            value="Stopped"
-+                        />
-+                    </meta_attributes>
-+                </bundle>
-+            </resources>
-+        """.format(
-+            container_type=self.container_type,
-+            bundle_id=self.bundle_id,
-+            image=self.image,
-+        )
-+    def test_dont_regenerate_meta_attributes_id(self):
-+        self.config.env.push_cib(replace={
-+            ".//resources/bundle/meta_attributes":
-+                f"""
-+                    <meta_attributes id="CUSTOM_ID">
-+                        <nvpair
-+                            id="CUSTOM_ID-target-role"
-+                            name="target-role"
-+                            value="Stopped"
-+                        />
-+                    </meta_attributes>
-+                """
-+            ,
-+        })
-+        self.bundle_reset(
-+            container_options={
-+                "image": self.image,
-+                "promoted-max": "0",
-+                "replicas": "1",
-+                "replicas-per-host": "1",
-+            },
-+            meta_attributes={
-+                "target-role": "Stopped",
-+            }
-+        )
-+
-+class NoMetaIdRegenerationDocker(NoMetaIdRegenerationMixin, TestCase):
-+    container_type = "docker"
-+
-+class NoMetaIdRegenerationPodman(NoMetaIdRegenerationMixin, TestCase):
-+    container_type = "podman"
-+
-+class NoMetaIdRegenerationRkt(NoMetaIdRegenerationMixin, TestCase):
-+    container_type = "rkt"
--- 
-2.21.0
-
diff --git a/SOURCES/bz1676431-01-Display-status-of-disaster-recovery.patch b/SOURCES/bz1676431-01-Display-status-of-disaster-recovery.patch
new file mode 100644
index 0000000..bd37518
--- /dev/null
+++ b/SOURCES/bz1676431-01-Display-status-of-disaster-recovery.patch
@@ -0,0 +1,5055 @@
+From 7cf137380bc80653c50747a1d4d70783d593fcb5 Mon Sep 17 00:00:00 2001
+From: Miroslav Lisik <mlisik@redhat.com>
+Date: Fri, 29 Nov 2019 12:16:11 +0100
+Subject: [PATCH 1/3] squash bz1676431 Display status of disaster recovery site
+
+support DR config in node add, node remove, cluster destroy
+
+dr: add command for setting recovery site
+
+improve typing
+
+move tests
+
+dr: add a command for displaying clusters' status
+
+dr: add a command for displaying dr config
+
+dr: add 'destroy' sub-command
+
+dr: review based fixes
+
+update capabilities, changelog
+---
+ CHANGELOG.md                                  |   9 +
+ pcs/app.py                                    |   2 +
+ pcs/cli/common/console_report.py              |  16 +-
+ pcs/cli/common/lib_wrapper.py                 |  13 +
+ pcs/cli/dr.py                                 | 138 ++++
+ pcs/cli/routing/dr.py                         |  15 +
+ pcs/cluster.py                                |   1 +
+ pcs/common/dr.py                              | 109 +++
+ pcs/common/file_type_codes.py                 |  27 +-
+ pcs/common/report_codes.py                    |   3 +
+ pcs/lib/commands/cluster.py                   |  18 +-
+ pcs/lib/commands/dr.py                        | 316 ++++++++
+ pcs/lib/communication/corosync.py             |  28 +
+ pcs/lib/communication/status.py               |  97 +++
+ pcs/lib/dr/__init__.py                        |   0
+ pcs/lib/dr/config/__init__.py                 |   0
+ pcs/lib/dr/config/facade.py                   |  49 ++
+ pcs/lib/dr/env.py                             |  28 +
+ pcs/lib/env.py                                |  17 +
+ pcs/lib/file/instance.py                      |  21 +-
+ pcs/lib/file/metadata.py                      |   8 +
+ pcs/lib/file/toolbox.py                       |  80 +-
+ pcs/lib/node.py                               |   5 +-
+ pcs/lib/node_communication_format.py          |  16 +
+ pcs/lib/reports.py                            |  31 +
+ pcs/pcs.8                                     |  18 +-
+ pcs/pcs_internal.py                           |   1 +
+ pcs/settings_default.py                       |   1 +
+ pcs/usage.py                                  |  32 +-
+ .../tier0/cli/common/test_console_report.py   |  24 +
+ pcs_test/tier0/cli/test_dr.py                 | 293 +++++++
+ pcs_test/tier0/common/test_dr.py              | 167 ++++
+ .../lib/commands/cluster/test_add_nodes.py    | 143 +++-
+ pcs_test/tier0/lib/commands/dr/__init__.py    |   0
+ .../tier0/lib/commands/dr/test_destroy.py     | 342 ++++++++
+ .../tier0/lib/commands/dr/test_get_config.py  | 134 ++++
+ .../lib/commands/dr/test_set_recovery_site.py | 702 ++++++++++++++++
+ pcs_test/tier0/lib/commands/dr/test_status.py | 756 ++++++++++++++++++
+ .../tier0/lib/communication/test_status.py    |   7 +
+ pcs_test/tier0/lib/dr/__init__.py             |   0
+ pcs_test/tier0/lib/dr/test_facade.py          | 138 ++++
+ pcs_test/tier0/lib/test_env.py                |  42 +-
+ .../tools/command_env/config_corosync_conf.py |   9 +-
+ pcs_test/tools/command_env/config_http.py     |   3 +
+ .../tools/command_env/config_http_corosync.py |  24 +
+ .../tools/command_env/config_http_files.py    |  28 +-
+ .../tools/command_env/config_http_status.py   |  52 ++
+ .../mock_get_local_corosync_conf.py           |  12 +-
+ pcsd/capabilities.xml                         |  12 +
+ pcsd/pcsd_file.rb                             |  15 +
+ pcsd/pcsd_remove_file.rb                      |   7 +
+ pcsd/remote.rb                                |  19 +-
+ pcsd/settings.rb                              |   1 +
+ pcsd/settings.rb.debian                       |   1 +
+ pylintrc                                      |   2 +-
+ 55 files changed, 3964 insertions(+), 68 deletions(-)
+ create mode 100644 pcs/cli/dr.py
+ create mode 100644 pcs/cli/routing/dr.py
+ create mode 100644 pcs/common/dr.py
+ create mode 100644 pcs/lib/commands/dr.py
+ create mode 100644 pcs/lib/communication/status.py
+ create mode 100644 pcs/lib/dr/__init__.py
+ create mode 100644 pcs/lib/dr/config/__init__.py
+ create mode 100644 pcs/lib/dr/config/facade.py
+ create mode 100644 pcs/lib/dr/env.py
+ create mode 100644 pcs_test/tier0/cli/test_dr.py
+ create mode 100644 pcs_test/tier0/common/test_dr.py
+ create mode 100644 pcs_test/tier0/lib/commands/dr/__init__.py
+ create mode 100644 pcs_test/tier0/lib/commands/dr/test_destroy.py
+ create mode 100644 pcs_test/tier0/lib/commands/dr/test_get_config.py
+ create mode 100644 pcs_test/tier0/lib/commands/dr/test_set_recovery_site.py
+ create mode 100644 pcs_test/tier0/lib/commands/dr/test_status.py
+ create mode 100644 pcs_test/tier0/lib/communication/test_status.py
+ create mode 100644 pcs_test/tier0/lib/dr/__init__.py
+ create mode 100644 pcs_test/tier0/lib/dr/test_facade.py
+ create mode 100644 pcs_test/tools/command_env/config_http_status.py
+
+diff --git a/CHANGELOG.md b/CHANGELOG.md
+index 69e6da44..889436c3 100644
+--- a/CHANGELOG.md
++++ b/CHANGELOG.md
+@@ -1,5 +1,14 @@
+ # Change Log
+ 
++## [Unreleased]
++
++### Added
++- It is possible to configure a disaster-recovery site and display its status
++  ([rhbz#1676431])
++
++[rhbz#1676431]: https://bugzilla.redhat.com/show_bug.cgi?id=1676431
++
++
+ ## [0.10.4] - 2019-11-28
+ 
+ ### Added
+diff --git a/pcs/app.py b/pcs/app.py
+index 8df07c1d..defc4055 100644
+--- a/pcs/app.py
++++ b/pcs/app.py
+@@ -25,6 +25,7 @@ from pcs.cli.routing import (
+     cluster,
+     config,
+     constraint,
++    dr,
+     host,
+     node,
+     pcsd,
+@@ -245,6 +246,7 @@ def main(argv=None):
+         "booth": booth.booth_cmd,
+         "host": host.host_cmd,
+         "client": client.client_cmd,
++        "dr": dr.dr_cmd,
+         "help": lambda lib, argv, modifiers: usage.main(),
+     }
+     try:
+diff --git a/pcs/cli/common/console_report.py b/pcs/cli/common/console_report.py
+index 0a730cfa..d349c823 100644
+--- a/pcs/cli/common/console_report.py
++++ b/pcs/cli/common/console_report.py
+@@ -2,6 +2,7 @@
+ from collections import defaultdict
+ from collections.abc import Iterable
+ from functools import partial
++from typing import Mapping
+ import sys
+ 
+ from pcs.common import (
+@@ -46,6 +47,7 @@ _file_role_translation = {
+     file_type_codes.BOOTH_CONFIG: "Booth configuration",
+     file_type_codes.BOOTH_KEY: "Booth key",
+     file_type_codes.COROSYNC_AUTHKEY: "Corosync authkey",
++    file_type_codes.PCS_DR_CONFIG: "disaster-recovery configuration",
+     file_type_codes.PACEMAKER_AUTHKEY: "Pacemaker authkey",
+     file_type_codes.PCSD_ENVIRONMENT_CONFIG: "pcsd configuration",
+     file_type_codes.PCSD_SSL_CERT: "pcsd SSL certificate",
+@@ -53,7 +55,7 @@ _file_role_translation = {
+     file_type_codes.PCS_KNOWN_HOSTS: "known-hosts",
+     file_type_codes.PCS_SETTINGS_CONF: "pcs configuration",
+ }
+-_file_role_to_option_translation = {
++_file_role_to_option_translation: Mapping[str, str] = {
+     file_type_codes.BOOTH_CONFIG: "--booth-conf",
+     file_type_codes.BOOTH_KEY: "--booth-key",
+     file_type_codes.CIB: "-f",
+@@ -2284,4 +2286,16 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
+             "resources\n\n{crm_simulate_plaintext_output}"
+         ).format(**info)
+     ,
++
++    codes.DR_CONFIG_ALREADY_EXIST: lambda info: (
++        "Disaster-recovery already configured"
++    ).format(**info),
++
++    codes.DR_CONFIG_DOES_NOT_EXIST: lambda info: (
++        "Disaster-recovery is not configured"
++    ).format(**info),
++
++    codes.NODE_IN_LOCAL_CLUSTER: lambda info: (
++        "Node '{node}' is part of local cluster"
++    ).format(**info),
+ }
+diff --git a/pcs/cli/common/lib_wrapper.py b/pcs/cli/common/lib_wrapper.py
+index 27b7d8b1..4ef6bf2f 100644
+--- a/pcs/cli/common/lib_wrapper.py
++++ b/pcs/cli/common/lib_wrapper.py
+@@ -9,6 +9,7 @@ from pcs.lib.commands import (
+     booth,
+     cib_options,
+     cluster,
++    dr,
+     fencing_topology,
+     node,
+     pcsd,
+@@ -183,6 +184,18 @@ def load_module(env, middleware_factory, name):
+             }
+         )
+ 
++    if name == "dr":
++        return bind_all(
++            env,
++            middleware.build(middleware_factory.corosync_conf_existing),
++            {
++                "get_config": dr.get_config,
++                "destroy": dr.destroy,
++                "set_recovery_site": dr.set_recovery_site,
++                "status_all_sites_plaintext": dr.status_all_sites_plaintext,
++            }
++        )
++
+     if name == "remote_node":
+         return bind_all(
+             env,
+diff --git a/pcs/cli/dr.py b/pcs/cli/dr.py
+new file mode 100644
+index 00000000..c6830aa0
+--- /dev/null
++++ b/pcs/cli/dr.py
+@@ -0,0 +1,138 @@
++from typing import (
++    Any,
++    List,
++    Sequence,
++)
++
++from pcs.cli.common.console_report import error
++from pcs.cli.common.errors import CmdLineInputError
++from pcs.cli.common.parse_args import InputModifiers
++from pcs.common import report_codes
++from pcs.common.dr import (
++    DrConfigDto,
++    DrConfigSiteDto,
++    DrSiteStatusDto,
++)
++from pcs.common.tools import indent
++
++def config(
++    lib: Any,
++    argv: Sequence[str],
++    modifiers: InputModifiers,
++) -> None:
++    """
++    Options: None
++    """
++    modifiers.ensure_only_supported()
++    if argv:
++        raise CmdLineInputError()
++    config_raw = lib.dr.get_config()
++    try:
++        config_dto = DrConfigDto.from_dict(config_raw)
++    except (KeyError, TypeError, ValueError):
++        raise error(
++            "Unable to communicate with pcsd, received response:\n"
++                f"{config_raw}"
++        )
++
++    lines = ["Local site:"]
++    lines.extend(indent(_config_site_lines(config_dto.local_site)))
++    for site_dto in config_dto.remote_site_list:
++        lines.append("Remote site:")
++        lines.extend(indent(_config_site_lines(site_dto)))
++    print("\n".join(lines))
++
++def _config_site_lines(site_dto: DrConfigSiteDto) -> List[str]:
++    lines = [f"Role: {site_dto.site_role.capitalize()}"]
++    if site_dto.node_list:
++        lines.append("Nodes:")
++        lines.extend(indent(sorted([node.name for node in site_dto.node_list])))
++    return lines
++
++
++def set_recovery_site(
++    lib: Any,
++    argv: Sequence[str],
++    modifiers: InputModifiers,
++) -> None:
++    """
++    Options:
++      * --request-timeout - HTTP timeout for node authorization check
++    """
++    modifiers.ensure_only_supported("--request-timeout")
++    if len(argv) != 1:
++        raise CmdLineInputError()
++    lib.dr.set_recovery_site(argv[0])
++
++def status(
++    lib: Any,
++    argv: Sequence[str],
++    modifiers: InputModifiers,
++) -> None:
++    """
++    Options:
++      * --full - show full details, node attributes and failcount
++      * --hide-inactive - hide inactive resources
++      * --request-timeout - HTTP timeout for node authorization check
++    """
++    modifiers.ensure_only_supported(
++        "--full", "--hide-inactive", "--request-timeout",
++    )
++    if argv:
++        raise CmdLineInputError()
++
++    status_list_raw = lib.dr.status_all_sites_plaintext(
++        hide_inactive_resources=modifiers.get("--hide-inactive"),
++        verbose=modifiers.get("--full"),
++    )
++    try:
++        status_list = [
++            DrSiteStatusDto.from_dict(status_raw)
++            for status_raw in status_list_raw
++        ]
++    except (KeyError, TypeError, ValueError):
++        raise error(
++            "Unable to communicate with pcsd, received response:\n"
++                f"{status_list_raw}"
++        )
++
++    has_errors = False
++    plaintext_parts = []
++    for site_status in status_list:
++        plaintext_parts.append(
++            "--- {local_remote} cluster - {role} site ---".format(
++                local_remote=("Local" if site_status.local_site else "Remote"),
++                role=site_status.site_role.capitalize()
++            )
++        )
++        if site_status.status_successfully_obtained:
++            plaintext_parts.append(site_status.status_plaintext.strip())
++            plaintext_parts.extend(["", ""])
++        else:
++            has_errors = True
++            plaintext_parts.extend([
++                "Error: Unable to get status of the cluster from any node",
++                ""
++            ])
++    print("\n".join(plaintext_parts).strip())
++    if has_errors:
++        raise error("Unable to get status of all sites")
++
++
++def destroy(
++    lib: Any,
++    argv: Sequence[str],
++    modifiers: InputModifiers,
++) -> None:
++    """
++    Options:
++      * --skip-offline - skip unreachable nodes (including missing auth token)
++      * --request-timeout - HTTP timeout for node authorization check
++    """
++    modifiers.ensure_only_supported("--skip-offline", "--request-timeout")
++    if argv:
++        raise CmdLineInputError()
++    force_flags = []
++    if modifiers.get("--skip-offline"):
++        force_flags.append(report_codes.SKIP_OFFLINE_NODES)
++    lib.dr.destroy(force_flags=force_flags)
+diff --git a/pcs/cli/routing/dr.py b/pcs/cli/routing/dr.py
+new file mode 100644
+index 00000000..dbf44c1c
+--- /dev/null
++++ b/pcs/cli/routing/dr.py
+@@ -0,0 +1,15 @@
++from pcs import usage
++from pcs.cli import dr
++from pcs.cli.common.routing import create_router
++
++dr_cmd = create_router(
++    {
++        "help": lambda lib, argv, modifiers: usage.dr(argv),
++        "config": dr.config,
++        "destroy": dr.destroy,
++        "set-recovery-site": dr.set_recovery_site,
++        "status": dr.status,
++    },
++    ["dr"],
++    default_cmd="help",
++)
+diff --git a/pcs/cluster.py b/pcs/cluster.py
+index 3a931b60..9473675f 100644
+--- a/pcs/cluster.py
++++ b/pcs/cluster.py
+@@ -1209,6 +1209,7 @@ def cluster_destroy(lib, argv, modifiers):
+             settings.corosync_conf_file,
+             settings.corosync_authkey_file,
+             settings.pacemaker_authkey_file,
++            settings.pcsd_dr_config_location,
+         ])
+         state_files = [
+             "cib-*",
+diff --git a/pcs/common/dr.py b/pcs/common/dr.py
+new file mode 100644
+index 00000000..1648d93d
+--- /dev/null
++++ b/pcs/common/dr.py
+@@ -0,0 +1,109 @@
++from enum import auto
++from typing import (
++    Any,
++    Iterable,
++    Mapping,
++)
++
++from pcs.common.interface.dto import DataTransferObject
++from pcs.common.tools import AutoNameEnum
++
++
++class DrRole(AutoNameEnum):
++    PRIMARY = auto()
++    RECOVERY = auto()
++
++
++class DrConfigNodeDto(DataTransferObject):
++    def __init__(self, name: str):
++        self.name = name
++
++    def to_dict(self) -> Mapping[str, Any]:
++        return dict(name=self.name)
++
++    @classmethod
++    def from_dict(cls, payload: Mapping[str, Any]) -> "DrConfigNodeDto":
++        return cls(payload["name"])
++
++
++class DrConfigSiteDto(DataTransferObject):
++    def __init__(
++        self,
++        site_role: DrRole,
++        node_list: Iterable[DrConfigNodeDto]
++    ):
++        self.site_role = site_role
++        self.node_list = node_list
++
++    def to_dict(self) -> Mapping[str, Any]:
++        return dict(
++            site_role=self.site_role.value,
++            node_list=[node.to_dict() for node in self.node_list]
++        )
++
++    @classmethod
++    def from_dict(cls, payload: Mapping[str, Any]) -> "DrConfigSiteDto":
++        return cls(
++            DrRole(payload["site_role"]),
++            [
++                DrConfigNodeDto.from_dict(payload_node)
++                for payload_node in payload["node_list"]
++            ],
++        )
++
++
++class DrConfigDto(DataTransferObject):
++    def __init__(
++        self,
++        local_site: DrConfigSiteDto,
++        remote_site_list: Iterable[DrConfigSiteDto]
++    ):
++        self.local_site = local_site
++        self.remote_site_list = remote_site_list
++
++    def to_dict(self) -> Mapping[str, Any]:
++        return dict(
++            local_site=self.local_site.to_dict(),
++            remote_site_list=[site.to_dict() for site in self.remote_site_list],
++        )
++
++    @classmethod
++    def from_dict(cls, payload: Mapping[str, Any]) -> "DrConfigDto":
++        return cls(
++            DrConfigSiteDto.from_dict(payload["local_site"]),
++            [
++                DrConfigSiteDto.from_dict(payload_site)
++                for payload_site in payload["remote_site_list"]
++            ],
++        )
++
++
++class DrSiteStatusDto(DataTransferObject):
++    def __init__(
++        self,
++        local_site: bool,
++        site_role: DrRole,
++        status_plaintext: str,
++        status_successfully_obtained: bool
++    ):
++        self.local_site = local_site
++        self.site_role = site_role
++        self.status_plaintext = status_plaintext
++        self.status_successfully_obtained = status_successfully_obtained
++
++    def to_dict(self) -> Mapping[str, Any]:
++        return dict(
++            local_site=self.local_site,
++            site_role=self.site_role.value,
++            status_plaintext=self.status_plaintext,
++            status_successfully_obtained=self.status_successfully_obtained,
++        )
++
++    @classmethod
++    def from_dict(cls, payload: Mapping[str, Any]) -> "DrSiteStatusDto":
++        return cls(
++            payload["local_site"],
++            DrRole(payload["site_role"]),
++            payload["status_plaintext"],
++            payload["status_successfully_obtained"],
++        )
+diff --git a/pcs/common/file_type_codes.py b/pcs/common/file_type_codes.py
+index 9c801180..967aa76b 100644
+--- a/pcs/common/file_type_codes.py
++++ b/pcs/common/file_type_codes.py
+@@ -1,11 +1,16 @@
+-BOOTH_CONFIG = "BOOTH_CONFIG"
+-BOOTH_KEY = "BOOTH_KEY"
+-CIB = "CIB"
+-COROSYNC_AUTHKEY = "COROSYNC_AUTHKEY"
+-COROSYNC_CONF = "COROSYNC_CONF"
+-PACEMAKER_AUTHKEY = "PACEMAKER_AUTHKEY"
+-PCSD_ENVIRONMENT_CONFIG = "PCSD_ENVIRONMENT_CONFIG"
+-PCSD_SSL_CERT = "PCSD_SSL_CERT"
+-PCSD_SSL_KEY = "PCSD_SSL_KEY"
+-PCS_KNOWN_HOSTS = "PCS_KNOWN_HOSTS"
+-PCS_SETTINGS_CONF = "PCS_SETTINGS_CONF"
++from typing import NewType
++
++FileTypeCode = NewType("FileTypeCode", str)
++
++BOOTH_CONFIG = FileTypeCode("BOOTH_CONFIG")
++BOOTH_KEY = FileTypeCode("BOOTH_KEY")
++CIB = FileTypeCode("CIB")
++COROSYNC_AUTHKEY = FileTypeCode("COROSYNC_AUTHKEY")
++COROSYNC_CONF = FileTypeCode("COROSYNC_CONF")
++PACEMAKER_AUTHKEY = FileTypeCode("PACEMAKER_AUTHKEY")
++PCSD_ENVIRONMENT_CONFIG = FileTypeCode("PCSD_ENVIRONMENT_CONFIG")
++PCSD_SSL_CERT = FileTypeCode("PCSD_SSL_CERT")
++PCSD_SSL_KEY = FileTypeCode("PCSD_SSL_KEY")
++PCS_KNOWN_HOSTS = FileTypeCode("PCS_KNOWN_HOSTS")
++PCS_SETTINGS_CONF = FileTypeCode("PCS_SETTINGS_CONF")
++PCS_DR_CONFIG = FileTypeCode("PCS_DR_CONFIG")
+diff --git a/pcs/common/report_codes.py b/pcs/common/report_codes.py
+index 4e3433a8..514ac079 100644
+--- a/pcs/common/report_codes.py
++++ b/pcs/common/report_codes.py
+@@ -141,6 +141,8 @@ COROSYNC_TRANSPORT_UNSUPPORTED_OPTIONS = "COROSYNC_TRANSPORT_UNSUPPORTED_OPTIONS
+ CRM_MON_ERROR = "CRM_MON_ERROR"
+ DEFAULTS_CAN_BE_OVERRIDEN = "DEFAULTS_CAN_BE_OVERRIDEN"
+ DEPRECATED_OPTION = "DEPRECATED_OPTION"
++DR_CONFIG_ALREADY_EXIST = "DR_CONFIG_ALREADY_EXIST"
++DR_CONFIG_DOES_NOT_EXIST = "DR_CONFIG_DOES_NOT_EXIST"
+ DUPLICATE_CONSTRAINTS_EXIST = "DUPLICATE_CONSTRAINTS_EXIST"
+ EMPTY_RESOURCE_SET_LIST = "EMPTY_RESOURCE_SET_LIST"
+ EMPTY_ID = "EMPTY_ID"
+@@ -203,6 +205,7 @@ NONE_HOST_FOUND = "NONE_HOST_FOUND"
+ NODE_USED_AS_TIE_BREAKER = "NODE_USED_AS_TIE_BREAKER"
+ NODES_TO_REMOVE_UNREACHABLE = "NODES_TO_REMOVE_UNREACHABLE"
+ NODE_TO_CLEAR_IS_STILL_IN_CLUSTER = "NODE_TO_CLEAR_IS_STILL_IN_CLUSTER"
++NODE_IN_LOCAL_CLUSTER = "NODE_IN_LOCAL_CLUSTER"
+ OMITTING_NODE = "OMITTING_NODE"
+ OBJECT_WITH_ID_IN_UNEXPECTED_CONTEXT = "OBJECT_WITH_ID_IN_UNEXPECTED_CONTEXT"
+ PACEMAKER_LOCAL_NODE_NAME_NOT_FOUND = "PACEMAKER_LOCAL_NODE_NAME_NOT_FOUND"
+diff --git a/pcs/lib/commands/cluster.py b/pcs/lib/commands/cluster.py
+index 64015864..f30dcb25 100644
+--- a/pcs/lib/commands/cluster.py
++++ b/pcs/lib/commands/cluster.py
+@@ -777,7 +777,7 @@ def add_nodes(
+         skip_wrong_config=force,
+     )
+ 
+-    # distribute corosync and pacemaker authkeys
++    # distribute corosync and pacemaker authkeys and other config files
+     files_action = {}
+     forceable_io_error_creator = reports.get_problem_creator(
+         report_codes.SKIP_FILE_DISTRIBUTION_ERRORS, force
+@@ -814,6 +814,22 @@ def add_nodes(
+                 file_path=settings.pacemaker_authkey_file,
+             ))
+ 
++    if os.path.isfile(settings.pcsd_dr_config_location):
++        try:
++            files_action.update(
++                node_communication_format.pcs_dr_config_file(
++                    open(settings.pcsd_dr_config_location, "rb").read()
++                )
++            )
++        except EnvironmentError as e:
++            report_processor.report(forceable_io_error_creator(
++                reports.file_io_error,
++                file_type_codes.PCS_DR_CONFIG,
++                RawFileError.ACTION_READ,
++                format_environment_error(e),
++                file_path=settings.pcsd_dr_config_location,
++            ))
++
+     # pcs_settings.conf was previously synced using pcsdcli send_local_configs.
+     # This has been changed temporarily until new system for distribution and
+     # syncronization of configs will be introduced.
+diff --git a/pcs/lib/commands/dr.py b/pcs/lib/commands/dr.py
+new file mode 100644
+index 00000000..41ddb5cb
+--- /dev/null
++++ b/pcs/lib/commands/dr.py
+@@ -0,0 +1,316 @@
++from typing import (
++    Any,
++    Container,
++    Iterable,
++    List,
++    Mapping,
++    Tuple,
++)
++
++from pcs.common import file_type_codes, report_codes
++from pcs.common.dr import (
++    DrConfigDto,
++    DrConfigNodeDto,
++    DrConfigSiteDto,
++    DrSiteStatusDto,
++)
++from pcs.common.file import RawFileError
++from pcs.common.node_communicator import RequestTarget
++from pcs.common.reports import SimpleReportProcessor
++
++from pcs.lib import node_communication_format, reports
++from pcs.lib.communication.corosync import GetCorosyncConf
++from pcs.lib.communication.nodes import (
++    DistributeFilesWithoutForces,
++    RemoveFilesWithoutForces,
++)
++from pcs.lib.communication.status import GetFullClusterStatusPlaintext
++from pcs.lib.communication.tools import (
++    run as run_com_cmd,
++    run_and_raise,
++)
++from pcs.lib.corosync.config_facade import ConfigFacade as CorosyncConfigFacade
++from pcs.lib.dr.config.facade import (
++    DrRole,
++    Facade as DrConfigFacade,
++)
++from pcs.lib.env import LibraryEnvironment
++from pcs.lib.errors import LibraryError, ReportItemList
++from pcs.lib.file.instance import FileInstance
++from pcs.lib.file.raw_file import raw_file_error_report
++from pcs.lib.file.toolbox import for_file_type as get_file_toolbox
++from pcs.lib.interface.config import ParserErrorException
++from pcs.lib.node import get_existing_nodes_names
++
++
++def get_config(env: LibraryEnvironment) -> Mapping[str, Any]:
++    """
++    Return local disaster recovery config
++
++    env -- LibraryEnvironment
++    """
++    report_processor = SimpleReportProcessor(env.report_processor)
++    report_list, dr_config = _load_dr_config(env.get_dr_env().config)
++    report_processor.report_list(report_list)
++    if report_processor.has_errors:
++        raise LibraryError()
++
++    return DrConfigDto(
++        DrConfigSiteDto(
++            dr_config.local_role,
++            []
++        ),
++        [
++            DrConfigSiteDto(
++                site.role,
++                [DrConfigNodeDto(name) for name in site.node_name_list]
++            )
++            for site in dr_config.get_remote_site_list()
++        ]
++    ).to_dict()
++
++
++def set_recovery_site(env: LibraryEnvironment, node_name: str) -> None:
++    """
++    Set up disaster recovery with the local cluster being the primary site
++
++    env
++    node_name -- a known host from the recovery site
++    """
++    if env.ghost_file_codes:
++        raise LibraryError(
++            reports.live_environment_required(env.ghost_file_codes)
++        )
++    report_processor = SimpleReportProcessor(env.report_processor)
++    dr_env = env.get_dr_env()
++    if dr_env.config.raw_file.exists():
++        report_processor.report(reports.dr_config_already_exist())
++    target_factory = env.get_node_target_factory()
++
++    local_nodes, report_list = get_existing_nodes_names(
++        env.get_corosync_conf(),
++        error_on_missing_name=True
++    )
++    report_processor.report_list(report_list)
++
++    if node_name in local_nodes:
++        report_processor.report(reports.node_in_local_cluster(node_name))
++
++    report_list, local_targets = target_factory.get_target_list_with_reports(
++        local_nodes, allow_skip=False, report_none_host_found=False
++    )
++    report_processor.report_list(report_list)
++
++    report_list, remote_targets = (
++        target_factory.get_target_list_with_reports(
++            [node_name], allow_skip=False, report_none_host_found=False
++        )
++    )
++    report_processor.report_list(report_list)
++
++    if report_processor.has_errors:
++        raise LibraryError()
++
++    com_cmd = GetCorosyncConf(env.report_processor)
++    com_cmd.set_targets(remote_targets)
++    remote_cluster_nodes, report_list = get_existing_nodes_names(
++        CorosyncConfigFacade.from_string(
++            run_and_raise(env.get_node_communicator(), com_cmd)
++        ),
++        error_on_missing_name=True
++    )
++    if report_processor.report_list(report_list):
++        raise LibraryError()
++
++    # ensure we have tokens for all nodes of remote cluster
++    report_list, remote_targets = target_factory.get_target_list_with_reports(
++        remote_cluster_nodes, allow_skip=False, report_none_host_found=False
++    )
++    if report_processor.report_list(report_list):
++        raise LibraryError()
++    dr_config_exporter = (
++        get_file_toolbox(file_type_codes.PCS_DR_CONFIG).exporter
++    )
++    # create dr config for remote cluster
++    remote_dr_cfg = dr_env.create_facade(DrRole.RECOVERY)
++    remote_dr_cfg.add_site(DrRole.PRIMARY, local_nodes)
++    # send config to all node of remote cluster
++    distribute_file_cmd = DistributeFilesWithoutForces(
++        env.report_processor,
++        node_communication_format.pcs_dr_config_file(
++            dr_config_exporter.export(remote_dr_cfg.config)
++        )
++    )
++    distribute_file_cmd.set_targets(remote_targets)
++    run_and_raise(env.get_node_communicator(), distribute_file_cmd)
++    # create new dr config, with local cluster as primary site
++    local_dr_cfg = dr_env.create_facade(DrRole.PRIMARY)
++    local_dr_cfg.add_site(DrRole.RECOVERY, remote_cluster_nodes)
++    distribute_file_cmd = DistributeFilesWithoutForces(
++        env.report_processor,
++        node_communication_format.pcs_dr_config_file(
++            dr_config_exporter.export(local_dr_cfg.config)
++        )
++    )
++    distribute_file_cmd.set_targets(local_targets)
++    run_and_raise(env.get_node_communicator(), distribute_file_cmd)
++    # Note: No token sync across multiple clusters. Most probably they are in
++    # different subnetworks.
++
++
++def status_all_sites_plaintext(
++    env: LibraryEnvironment,
++    hide_inactive_resources: bool = False,
++    verbose: bool = False,
++) -> List[Mapping[str, Any]]:
++    """
++    Return local site's and all remote sites' status as plaintext
++
++    env -- LibraryEnvironment
++    hide_inactive_resources -- if True, do not display non-running resources
++    verbose -- if True, display more info
++    """
++    # The command does not provide an option to skip offline / unreacheable /
++    # misbehaving nodes.
++    # The point of such skipping is to stop a command if it is unable to make
++    # changes on all nodes. The user can then decide to proceed anyway and
++    # make changes on the skipped nodes later manually.
++    # This command only reads from nodes so it automatically asks other nodes
++    # if one is offline / misbehaving.
++    class SiteData():
++        local: bool
++        role: DrRole
++        target_list: Iterable[RequestTarget]
++        status_loaded: bool
++        status_plaintext: str
++
++        def __init__(self, local, role, target_list):
++            self.local = local
++            self.role = role
++            self.target_list = target_list
++            self.status_loaded = False
++            self.status_plaintext = ""
++
++
++    if env.ghost_file_codes:
++        raise LibraryError(
++            reports.live_environment_required(env.ghost_file_codes)
++        )
++
++    report_processor = SimpleReportProcessor(env.report_processor)
++    report_list, dr_config = _load_dr_config(env.get_dr_env().config)
++    report_processor.report_list(report_list)
++    if report_processor.has_errors:
++        raise LibraryError()
++
++    site_data_list = []
++    target_factory = env.get_node_target_factory()
++
++    # get local nodes
++    local_nodes, report_list = get_existing_nodes_names(env.get_corosync_conf())
++    report_processor.report_list(report_list)
++    report_list, local_targets = target_factory.get_target_list_with_reports(
++        local_nodes,
++        skip_non_existing=True,
++    )
++    report_processor.report_list(report_list)
++    site_data_list.append(SiteData(True, dr_config.local_role, local_targets))
++
++    # get remote sites' nodes
++    for conf_remote_site in dr_config.get_remote_site_list():
++        report_list, remote_targets = (
++            target_factory.get_target_list_with_reports(
++                conf_remote_site.node_name_list,
++                skip_non_existing=True,
++            )
++        )
++        report_processor.report_list(report_list)
++        site_data_list.append(
++            SiteData(False, conf_remote_site.role, remote_targets)
++        )
++    if report_processor.has_errors:
++        raise LibraryError()
++
++    # get all statuses
++    for site_data in site_data_list:
++        com_cmd = GetFullClusterStatusPlaintext(
++            report_processor,
++            hide_inactive_resources=hide_inactive_resources,
++            verbose=verbose,
++        )
++        com_cmd.set_targets(site_data.target_list)
++        site_data.status_loaded, site_data.status_plaintext = run_com_cmd(
++            env.get_node_communicator(), com_cmd
++        )
++
++    return [
++        DrSiteStatusDto(
++            site_data.local,
++            site_data.role,
++            site_data.status_plaintext,
++            site_data.status_loaded,
++        ).to_dict()
++        for site_data in site_data_list
++    ]
++
++def _load_dr_config(
++    config_file: FileInstance,
++) -> Tuple[ReportItemList, DrConfigFacade]:
++    if not config_file.raw_file.exists():
++        return [reports.dr_config_does_not_exist()], DrConfigFacade.empty()
++    try:
++        return [], config_file.read_to_facade()
++    except RawFileError as e:
++        return [raw_file_error_report(e)], DrConfigFacade.empty()
++    except ParserErrorException as e:
++        return (
++            config_file.parser_exception_to_report_list(e),
++            DrConfigFacade.empty()
++        )
++
++
++def destroy(env: LibraryEnvironment, force_flags: Container[str] = ()) -> None:
++    """
++    Destroy disaster-recovery configuration on all sites
++    """
++    if env.ghost_file_codes:
++        raise LibraryError(
++            reports.live_environment_required(env.ghost_file_codes)
++        )
++
++    report_processor = SimpleReportProcessor(env.report_processor)
++    skip_offline = report_codes.SKIP_OFFLINE_NODES in force_flags
++
++    report_list, dr_config = _load_dr_config(env.get_dr_env().config)
++    report_processor.report_list(report_list)
++
++    if report_processor.has_errors:
++        raise LibraryError()
++
++    local_nodes, report_list = get_existing_nodes_names(env.get_corosync_conf())
++    report_processor.report_list(report_list)
++
++    if report_processor.has_errors:
++        raise LibraryError()
++
++    remote_nodes: List[str] = []
++    for conf_remote_site in dr_config.get_remote_site_list():
++        remote_nodes.extend(conf_remote_site.node_name_list)
++
++    target_factory = env.get_node_target_factory()
++    report_list, targets = target_factory.get_target_list_with_reports(
++         remote_nodes + local_nodes, skip_non_existing=skip_offline,
++    )
++    report_processor.report_list(report_list)
++    if report_processor.has_errors:
++        raise LibraryError()
++
++    com_cmd = RemoveFilesWithoutForces(
++        env.report_processor, {
++            "pcs disaster-recovery config": {
++                "type": "pcs_disaster_recovery_conf",
++            },
++        },
++    )
++    com_cmd.set_targets(targets)
++    run_and_raise(env.get_node_communicator(), com_cmd)
+diff --git a/pcs/lib/communication/corosync.py b/pcs/lib/communication/corosync.py
+index 0f3c3787..1a78e0de 100644
+--- a/pcs/lib/communication/corosync.py
++++ b/pcs/lib/communication/corosync.py
+@@ -138,3 +138,31 @@ class ReloadCorosyncConf(
+     def on_complete(self):
+         if not self.__was_successful and self.__has_failures:
+             self._report(reports.unable_to_perform_operation_on_any_node())
++
++
++class GetCorosyncConf(
++    AllSameDataMixin, OneByOneStrategyMixin, RunRemotelyBase
++):
++    __was_successful = False
++    __has_failures = False
++    __corosync_conf = None
++
++    def _get_request_data(self):
++        return RequestData("remote/get_corosync_conf")
++
++    def _process_response(self, response):
++        report = response_to_report_item(
++            response, severity=ReportItemSeverity.WARNING
++        )
++        if report is not None:
++            self.__has_failures = True
++            self._report(report)
++            return self._get_next_list()
++        self.__corosync_conf = response.data
++        self.__was_successful = True
++        return []
++
++    def on_complete(self):
++        if not self.__was_successful and self.__has_failures:
++            self._report(reports.unable_to_perform_operation_on_any_node())
++        return self.__corosync_conf
+diff --git a/pcs/lib/communication/status.py b/pcs/lib/communication/status.py
+new file mode 100644
+index 00000000..3470415a
+--- /dev/null
++++ b/pcs/lib/communication/status.py
+@@ -0,0 +1,97 @@
++import json
++from typing import Tuple
++
++from pcs.common.node_communicator import RequestData
++from pcs.lib import reports
++from pcs.lib.communication.tools import (
++    AllSameDataMixin,
++    OneByOneStrategyMixin,
++    RunRemotelyBase,
++)
++from pcs.lib.errors import ReportItemSeverity
++from pcs.lib.node_communication import response_to_report_item
++
++
++class GetFullClusterStatusPlaintext(
++    AllSameDataMixin, OneByOneStrategyMixin, RunRemotelyBase
++):
++    def __init__(
++        self, report_processor, hide_inactive_resources=False, verbose=False
++    ):
++        super().__init__(report_processor)
++        self._hide_inactive_resources = hide_inactive_resources
++        self._verbose = verbose
++        self._cluster_status = ""
++        self._was_successful = False
++
++    def _get_request_data(self):
++        return RequestData(
++            "remote/cluster_status_plaintext",
++            [
++                (
++                    "data_json",
++                    json.dumps(dict(
++                        hide_inactive_resources=self._hide_inactive_resources,
++                        verbose=self._verbose,
++                    ))
++                )
++            ],
++        )
++
++    def _process_response(self, response):
++        report = response_to_report_item(
++            response, severity=ReportItemSeverity.WARNING
++        )
++        if report is not None:
++            self._report(report)
++            return self._get_next_list()
++
++        node = response.request.target.label
++        try:
++            output = json.loads(response.data)
++            if output["status"] == "success":
++                self._was_successful = True
++                self._cluster_status = output["data"]
++                return []
++            if output["status_msg"]:
++                self._report(
++                    reports.node_communication_command_unsuccessful(
++                        node,
++                        response.request.action,
++                        output["status_msg"]
++                    )
++                )
++            # TODO Node name should be added to each received report item and
++            # those modified report itemss should be reported. That, however,
++            # requires reports overhaul which would add posibility to add a
++            # node name to any report item. Also, infos and warnings should not
++            # be ignored.
++            if output["report_list"]:
++                for report_data in output["report_list"]:
++                    if (
++                        report_data["severity"] == ReportItemSeverity.ERROR
++                        and
++                        report_data["report_text"]
++                    ):
++                        self._report(
++                            reports.node_communication_command_unsuccessful(
++                                node,
++                                response.request.action,
++                                report_data["report_text"]
++                            )
++                        )
++        except (ValueError, LookupError, TypeError):
++            self._report(reports.invalid_response_format(
++                node,
++                severity=ReportItemSeverity.WARNING,
++            ))
++
++        return self._get_next_list()
++
++    def on_complete(self) -> Tuple[bool, str]:
++        # Usually, reports.unable_to_perform_operation_on_any_node is reported
++        # when the operation was unsuccessful and failed on at least one node.
++        # The only use case this communication command is used does not need
++        # that report and on top of that the report causes confusing ouptut for
++        # the user. The report may be added in a future if needed.
++        return self._was_successful, self._cluster_status
+diff --git a/pcs/lib/dr/__init__.py b/pcs/lib/dr/__init__.py
+new file mode 100644
+index 00000000..e69de29b
+diff --git a/pcs/lib/dr/config/__init__.py b/pcs/lib/dr/config/__init__.py
+new file mode 100644
+index 00000000..e69de29b
+diff --git a/pcs/lib/dr/config/facade.py b/pcs/lib/dr/config/facade.py
+new file mode 100644
+index 00000000..f3187ba5
+--- /dev/null
++++ b/pcs/lib/dr/config/facade.py
+@@ -0,0 +1,49 @@
++from typing import (
++    Iterable,
++    List,
++    NamedTuple,
++)
++
++from pcs.common.dr import DrRole
++from pcs.lib.interface.config import FacadeInterface
++
++
++class DrSite(NamedTuple):
++    role: DrRole
++    node_name_list: List[str]
++
++
++class Facade(FacadeInterface):
++    @classmethod
++    def create(cls, local_role: DrRole) -> "Facade":
++        return cls(dict(
++            local=dict(
++                role=local_role.value,
++            ),
++            remote_sites=[],
++        ))
++
++    @classmethod
++    def empty(cls) -> "Facade":
++        return cls(dict())
++
++    @property
++    def local_role(self) -> DrRole:
++        return DrRole(self._config["local"]["role"])
++
++    def add_site(self, role: DrRole, node_list: Iterable[str]) -> None:
++        self._config["remote_sites"].append(
++            dict(
++                role=role.value,
++                nodes=[dict(name=node) for node in node_list],
++            )
++        )
++
++    def get_remote_site_list(self) -> List[DrSite]:
++        return [
++            DrSite(
++                DrRole(conf_site["role"]),
++                [node["name"] for node in conf_site["nodes"]]
++            )
++            for conf_site in self._config.get("remote_sites", [])
++        ]
+diff --git a/pcs/lib/dr/env.py b/pcs/lib/dr/env.py
+new file mode 100644
+index 00000000..c73ee622
+--- /dev/null
++++ b/pcs/lib/dr/env.py
+@@ -0,0 +1,28 @@
++from pcs.common import file_type_codes
++
++from pcs.lib.file.instance import FileInstance
++from pcs.lib.file.toolbox import (
++    for_file_type as get_file_toolbox,
++    FileToolbox,
++)
++
++from .config.facade import (
++    DrRole,
++    Facade,
++)
++
++class DrEnv:
++    def __init__(self):
++        self._config_file = FileInstance.for_dr_config()
++
++    @staticmethod
++    def create_facade(role: DrRole) -> Facade:
++        return Facade.create(role)
++
++    @property
++    def config(self) -> FileInstance:
++        return self._config_file
++
++    @staticmethod
++    def get_config_toolbox() -> FileToolbox:
++        return get_file_toolbox(file_type_codes.PCS_DR_CONFIG)
+diff --git a/pcs/lib/env.py b/pcs/lib/env.py
+index 66f7b1a4..0b12103e 100644
+--- a/pcs/lib/env.py
++++ b/pcs/lib/env.py
+@@ -3,11 +3,13 @@ from typing import (
+ )
+ from xml.etree.ElementTree import Element
+ 
++from pcs.common import file_type_codes
+ from pcs.common.node_communicator import Communicator, NodeCommunicatorFactory
+ from pcs.common.tools import Version
+ from pcs.lib import reports
+ from pcs.lib.booth.env import BoothEnv
+ from pcs.lib.cib.tools import get_cib_crm_feature_set
++from pcs.lib.dr.env import DrEnv
+ from pcs.lib.node import get_existing_nodes_names
+ from pcs.lib.communication import qdevice
+ from pcs.lib.communication.corosync import (
+@@ -89,6 +91,7 @@ class LibraryEnvironment:
+             self._request_timeout
+         )
+         self.__loaded_booth_env = None
++        self.__loaded_dr_env = None
+ 
+         self.__timeout_cache = {}
+ 
+@@ -108,6 +111,15 @@ class LibraryEnvironment:
+     def user_groups(self):
+         return self._user_groups
+ 
++    @property
++    def ghost_file_codes(self):
++        codes = set()
++        if not self.is_cib_live:
++            codes.add(file_type_codes.CIB)
++        if not self.is_corosync_conf_live:
++            codes.add(file_type_codes.COROSYNC_CONF)
++        return codes
++
+     def get_cib(self, minimal_version: Optional[Version] = None) -> Element:
+         if self.__loaded_cib_diff_source is not None:
+             raise AssertionError("CIB has already been loaded")
+@@ -412,3 +424,8 @@ class LibraryEnvironment:
+         if self.__loaded_booth_env is None:
+             self.__loaded_booth_env = BoothEnv(name, self._booth_files_data)
+         return self.__loaded_booth_env
++
++    def get_dr_env(self) -> DrEnv:
++        if self.__loaded_dr_env is None:
++            self.__loaded_dr_env = DrEnv()
++        return self.__loaded_dr_env
+diff --git a/pcs/lib/file/instance.py b/pcs/lib/file/instance.py
+index da6b760c..f0812c2d 100644
+--- a/pcs/lib/file/instance.py
++++ b/pcs/lib/file/instance.py
+@@ -51,18 +51,27 @@ class FileInstance():
+         """
+         Factory for known-hosts file
+         """
+-        file_type_code = file_type_codes.PCS_KNOWN_HOSTS
+-        return cls(
+-            raw_file.RealFile(metadata.for_file_type(file_type_code)),
+-            toolbox.for_file_type(file_type_code)
+-        )
++        return cls._for_common(file_type_codes.PCS_KNOWN_HOSTS)
+ 
+     @classmethod
+     def for_pacemaker_key(cls):
+         """
+         Factory for pacemaker key file
+         """
+-        file_type_code = file_type_codes.PACEMAKER_AUTHKEY
++        return cls._for_common(file_type_codes.PACEMAKER_AUTHKEY)
++
++    @classmethod
++    def for_dr_config(cls) -> "FileInstance":
++        """
++        Factory for disaster-recovery config file
++        """
++        return cls._for_common(file_type_codes.PCS_DR_CONFIG)
++
++    @classmethod
++    def _for_common(
++        cls,
++        file_type_code: file_type_codes.FileTypeCode,
++    ) -> "FileInstance":
+         return cls(
+             raw_file.RealFile(metadata.for_file_type(file_type_code)),
+             toolbox.for_file_type(file_type_code)
+diff --git a/pcs/lib/file/metadata.py b/pcs/lib/file/metadata.py
+index 175e5ac1..72701aed 100644
+--- a/pcs/lib/file/metadata.py
++++ b/pcs/lib/file/metadata.py
+@@ -50,6 +50,14 @@ _metadata = {
+         permissions=0o600,
+         is_binary=False,
+     ),
++    code.PCS_DR_CONFIG: lambda: FileMetadata(
++        file_type_code=code.PCS_DR_CONFIG,
++        path=settings.pcsd_dr_config_location,
++        owner_user_name="root",
++        owner_group_name="root",
++        permissions=0o600,
++        is_binary=False,
++    )
+ }
+ 
+ def for_file_type(file_type_code, *args, **kwargs):
+diff --git a/pcs/lib/file/toolbox.py b/pcs/lib/file/toolbox.py
+index 5d827887..db852617 100644
+--- a/pcs/lib/file/toolbox.py
++++ b/pcs/lib/file/toolbox.py
+@@ -1,4 +1,9 @@
+-from collections import namedtuple
++from typing import (
++    Any,
++    Dict,
++    NamedTuple,
++    Type,
++)
+ import json
+ 
+ from pcs.common import file_type_codes as code
+@@ -8,6 +13,8 @@ from pcs.lib.booth.config_parser import (
+     Exporter as BoothConfigExporter,
+     Parser as BoothConfigParser,
+ )
++from pcs.lib.dr.config.facade import Facade as DrConfigFacade
++from pcs.lib.errors import ReportItemList
+ from pcs.lib.interface.config import (
+     ExporterInterface,
+     FacadeInterface,
+@@ -16,27 +23,23 @@ from pcs.lib.interface.config import (
+ )
+ 
+ 
+-FileToolbox = namedtuple(
+-    "FileToolbox",
+-    [
+-        # File type code the toolbox belongs to
+-        "file_type_code",
+-        # Provides an easy access for reading and modifying data
+-        "facade",
+-        # Turns raw data into a structure which the facade is able to process
+-        "parser",
+-        # Turns a structure produced by the parser and the facade to raw data
+-        "exporter",
+-        # Checks that the structure is valid
+-        "validator",
+-        # Provides means for file syncing based on the file's version
+-        "version_controller",
+-    ]
+-)
++class FileToolbox(NamedTuple):
++    # File type code the toolbox belongs to
++    file_type_code: code.FileTypeCode
++    # Provides an easy access for reading and modifying data
++    facade: Type[FacadeInterface]
++    # Turns raw data into a structure which the facade is able to process
++    parser: Type[ParserInterface]
++    # Turns a structure produced by the parser and the facade to raw data
++    exporter: Type[ExporterInterface]
++    # Checks that the structure is valid
++    validator: None # TBI
++    # Provides means for file syncing based on the file's version
++    version_controller: None # TBI
+ 
+ 
+ class JsonParserException(ParserErrorException):
+-    def __init__(self, json_exception):
++    def __init__(self, json_exception: json.JSONDecodeError):
+         super().__init__()
+         self.json_exception = json_exception
+ 
+@@ -45,7 +48,7 @@ class JsonParser(ParserInterface):
+     Adapts standard json parser to our interfaces
+     """
+     @staticmethod
+-    def parse(raw_file_data):
++    def parse(raw_file_data: bytes) -> Dict[str, Any]:
+         try:
+             # json.loads handles bytes, it expects utf-8, 16 or 32 encoding
+             return json.loads(raw_file_data)
+@@ -54,8 +57,12 @@ class JsonParser(ParserInterface):
+ 
+     @staticmethod
+     def exception_to_report_list(
+-        exception, file_type_code, file_path, force_code, is_forced_or_warning
+-    ):
++        exception: JsonParserException,
++        file_type_code: code.FileTypeCode,
++        file_path: str,
++        force_code: str, # TODO: fix
++        is_forced_or_warning: bool
++    ) -> ReportItemList:
+         report_creator = reports.get_problem_creator(
+             force_code=force_code, is_forced=is_forced_or_warning
+         )
+@@ -80,7 +87,7 @@ class JsonExporter(ExporterInterface):
+     Adapts standard json exporter to our interfaces
+     """
+     @staticmethod
+-    def export(config_structure):
++    def export(config_structure: Dict[str, Any])-> bytes:
+         return json.dumps(
+             config_structure, indent=4, sort_keys=True,
+         ).encode("utf-8")
+@@ -88,23 +95,27 @@ class JsonExporter(ExporterInterface):
+ 
+ class NoopParser(ParserInterface):
+     @staticmethod
+-    def parse(raw_file_data):
++    def parse(raw_file_data: bytes) -> bytes:
+         return raw_file_data
+ 
+     @staticmethod
+     def exception_to_report_list(
+-        exception, file_type_code, file_path, force_code, is_forced_or_warning
+-    ):
++        exception: ParserErrorException,
++        file_type_code: code.FileTypeCode,
++        file_path: str,
++        force_code: str, # TODO: fix
++        is_forced_or_warning: bool
++    ) -> ReportItemList:
+         return []
+ 
+ class NoopExporter(ExporterInterface):
+     @staticmethod
+-    def export(config_structure):
++    def export(config_structure: bytes) -> bytes:
+         return config_structure
+ 
+ class NoopFacade(FacadeInterface):
+     @classmethod
+-    def create(cls):
++    def create(cls) -> "NoopFacade":
+         return cls(bytes())
+ 
+ 
+@@ -135,7 +146,16 @@ _toolboxes = {
+     ),
+     code.PCS_KNOWN_HOSTS: FileToolbox(
+         file_type_code=code.PCS_KNOWN_HOSTS,
+-        facade=None, # TODO needed for 'auth' and 'deauth' commands
++        # TODO needed for 'auth' and 'deauth' commands
++        facade=None, # type: ignore
++        parser=JsonParser,
++        exporter=JsonExporter,
++        validator=None, # TODO needed for files syncing
++        version_controller=None, # TODO needed for files syncing
++    ),
++    code.PCS_DR_CONFIG: FileToolbox(
++        file_type_code=code.PCS_DR_CONFIG,
++        facade=DrConfigFacade,
+         parser=JsonParser,
+         exporter=JsonExporter,
+         validator=None, # TODO needed for files syncing
+@@ -143,5 +163,5 @@ _toolboxes = {
+     ),
+ }
+ 
+-def for_file_type(file_type_code):
++def for_file_type(file_type_code: code.FileTypeCode) -> FileToolbox:
+     return _toolboxes[file_type_code]
+diff --git a/pcs/lib/node.py b/pcs/lib/node.py
+index 1930ffa8..09543c8e 100644
+--- a/pcs/lib/node.py
++++ b/pcs/lib/node.py
+@@ -1,5 +1,6 @@
+ from typing import (
+     Iterable,
++    List,
+     Optional,
+     Tuple,
+ )
+@@ -18,7 +19,7 @@ def get_existing_nodes_names(
+     corosync_conf: Optional[CorosyncConfigFacade] = None,
+     cib: Optional[Element] = None,
+     error_on_missing_name: bool = False
+-) -> Tuple[Iterable[str], ReportItemList]:
++) -> Tuple[List[str], ReportItemList]:
+     return __get_nodes_names(
+         *__get_nodes(corosync_conf, cib),
+         error_on_missing_name
+@@ -56,7 +57,7 @@ def __get_nodes_names(
+     corosync_nodes: Iterable[CorosyncNode],
+     remote_and_guest_nodes: Iterable[PacemakerNode],
+     error_on_missing_name: bool = False
+-) -> Tuple[Iterable[str], ReportItemList]:
++) -> Tuple[List[str], ReportItemList]:
+     report_list = []
+     corosync_names = []
+     name_missing_in_corosync = False
+diff --git a/pcs/lib/node_communication_format.py b/pcs/lib/node_communication_format.py
+index 6134c66d..1cef35b4 100644
+--- a/pcs/lib/node_communication_format.py
++++ b/pcs/lib/node_communication_format.py
+@@ -1,5 +1,9 @@
+ import base64
+ from collections import namedtuple
++from typing import (
++    Any,
++    Dict,
++)
+ 
+ from pcs.lib import reports
+ from pcs.lib.errors import LibraryError
+@@ -55,6 +59,18 @@ def corosync_conf_file(corosync_conf_content):
+         "corosync.conf": corosync_conf_format(corosync_conf_content)
+     }
+ 
++def pcs_dr_config_format(dr_conf_content: bytes) -> Dict[str, Any]:
++    return {
++        "type": "pcs_disaster_recovery_conf",
++        "data": base64.b64encode(dr_conf_content).decode("utf-8"),
++        "rewrite_existing": True,
++    }
++
++def pcs_dr_config_file(dr_conf_content: bytes) -> Dict[str, Any]:
++    return {
++        "disaster-recovery config": pcs_dr_config_format(dr_conf_content)
++    }
++
+ def pcs_settings_conf_format(content):
+     return {
+         "data": content,
+diff --git a/pcs/lib/reports.py b/pcs/lib/reports.py
+index e83737b0..1f081007 100644
+--- a/pcs/lib/reports.py
++++ b/pcs/lib/reports.py
+@@ -4221,3 +4221,34 @@ def resource_disable_affects_other_resources(
+             "crm_simulate_plaintext_output": crm_simulate_plaintext_output,
+         }
+     )
++
++
++def dr_config_already_exist():
++    """
++    Disaster recovery config exists when the opposite was expected
++    """
++    return ReportItem.error(
++        report_codes.DR_CONFIG_ALREADY_EXIST,
++    )
++
++def dr_config_does_not_exist():
++    """
++    Disaster recovery config does not exist when the opposite was expected
++    """
++    return ReportItem.error(
++        report_codes.DR_CONFIG_DOES_NOT_EXIST,
++    )
++
++def node_in_local_cluster(node):
++    """
++    Node is part of local cluster and it cannot be used for example to set up
++    disaster-recovery site
++
++    node -- node which is part of local cluster
++    """
++    return ReportItem.error(
++        report_codes.NODE_IN_LOCAL_CLUSTER,
++        info=dict(
++            node=node,
++        ),
++    )
+diff --git a/pcs/pcs.8 b/pcs/pcs.8
+index 5765c6b5..651fda83 100644
+--- a/pcs/pcs.8
++++ b/pcs/pcs.8
+@@ -75,6 +75,9 @@ alert
+ .TP
+ client
+  Manage pcsd client configuration.
++.TP
++dr
++ Manage disaster recovery configuration.
+ .SS "resource"
+ .TP
+ [status [\fB\-\-hide\-inactive\fR]]
+@@ -887,7 +890,7 @@ stop
+ Stop booth arbitrator service.
+ .SS "status"
+ .TP
+-[status] [\fB\-\-full\fR | \fB\-\-hide\-inactive\fR]
++[status] [\fB\-\-full\fR] [\fB\-\-hide\-inactive\fR]
+ View all information about the cluster and resources (\fB\-\-full\fR provides more details, \fB\-\-hide\-inactive\fR hides inactive resources).
+ .TP
+ resources [\fB\-\-hide\-inactive\fR]
+@@ -1015,6 +1018,19 @@ Remove specified recipients.
+ .TP
+ local-auth [<pcsd\-port>] [\-u <username>] [\-p <password>]
+ Authenticate current user to local pcsd. This is required to run some pcs commands which may require permissions of root user such as 'pcs cluster start'.
++.SS "dr"
++.TP
++config
++Display disaster-recovery configuration from the local node.
++.TP
++status [\fB\-\-full\fR] [\fB\-\-hide\-inactive\fR]
++Display status of the local and the remote site cluster (\fB\-\-full\fR provides more details, \fB\-\-hide\-inactive\fR hides inactive resources).
++.TP
++set\-recovery\-site <recovery site node>
++Set up disaster\-recovery with the local cluster being the primary site. The recovery site is defined by a name of one of its nodes.
++.TP
++destroy
++Permanently destroy disaster-recovery configuration on all sites.
+ .SH EXAMPLES
+ .TP
+ Show all resources
+diff --git a/pcs/pcs_internal.py b/pcs/pcs_internal.py
+index fecdc8d5..d956d71e 100644
+--- a/pcs/pcs_internal.py
++++ b/pcs/pcs_internal.py
+@@ -22,6 +22,7 @@ SUPPORTED_COMMANDS = {
+     "cluster.setup",
+     "cluster.add_nodes",
+     "cluster.remove_nodes",
++    "status.full_cluster_status_plaintext",
+ }
+ 
+ 
+diff --git a/pcs/settings_default.py b/pcs/settings_default.py
+index ab61b20b..6d8f33ac 100644
+--- a/pcs/settings_default.py
++++ b/pcs/settings_default.py
+@@ -50,6 +50,7 @@ pcsd_users_conf_location = os.path.join(pcsd_var_location, "pcs_users.conf")
+ pcsd_settings_conf_location = os.path.join(
+     pcsd_var_location, "pcs_settings.conf"
+ )
++pcsd_dr_config_location = os.path.join(pcsd_var_location, "disaster-recovery")
+ pcsd_exec_location = "/usr/lib/pcsd/"
+ pcsd_log_location = "/var/log/pcsd/pcsd.log"
+ pcsd_default_port = 2224
+diff --git a/pcs/usage.py b/pcs/usage.py
+index 0b16289e..e4f5af32 100644
+--- a/pcs/usage.py
++++ b/pcs/usage.py
+@@ -22,6 +22,7 @@ def full_usage():
+     out += strip_extras(host([], False))
+     out += strip_extras(alert([], False))
+     out += strip_extras(client([], False))
++    out += strip_extras(dr([], False))
+     print(out.strip())
+     print("Examples:\n" + examples.replace(r" \ ", ""))
+ 
+@@ -124,6 +125,7 @@ def generate_completion_tree_from_usage():
+     tree["alert"] = generate_tree(alert([], False))
+     tree["booth"] = generate_tree(booth([], False))
+     tree["client"] = generate_tree(client([], False))
++    tree["dr"] = generate_tree(dr([], False))
+     return tree
+ 
+ def generate_tree(usage_txt):
+@@ -194,6 +196,7 @@ Commands:
+     node        Manage cluster nodes.
+     alert       Manage pacemaker alerts.
+     client      Manage pcsd client configuration.
++    dr          Manage disaster recovery configuration.
+ """
+ # Advanced usage to possibly add later
+ #  --corosync_conf=<corosync file> Specify alternative corosync.conf file
+@@ -1517,7 +1520,7 @@ def status(args=(), pout=True):
+ Usage: pcs status [commands]...
+ View current cluster and resource status
+ Commands:
+-    [status] [--full | --hide-inactive]
++    [status] [--full] [--hide-inactive]
+         View all information about the cluster and resources (--full provides
+         more details, --hide-inactive hides inactive resources).
+ 
+@@ -2019,6 +2022,32 @@ Commands:
+     return output
+ 
+ 
++def dr(args=(), pout=True):
++    output = """
++Usage: pcs dr <command>
++Manage disaster recovery configuration.
++
++Commands:
++    config
++        Display disaster-recovery configuration from the local node.
++
++    status [--full] [--hide-inactive]
++        Display status of the local and the remote site cluster (--full
++        provides more details, --hide-inactive hides inactive resources).
++
++    set-recovery-site <recovery site node>
++        Set up disaster-recovery with the local cluster being the primary site.
++        The recovery site is defined by a name of one of its nodes.
++
++    destroy
++        Permanently destroy disaster-recovery configuration on all sites.
++"""
++    if pout:
++        print(sub_usage(args, output))
++        return None
++    return output
++
++
+ def show(main_usage_name, rest_usage_names):
+     usage_map = {
+         "acl": acl,
+@@ -2028,6 +2057,7 @@ def show(main_usage_name, rest_usage_names):
+         "cluster": cluster,
+         "config": config,
+         "constraint": constraint,
++        "dr": dr,
+         "host": host,
+         "node": node,
+         "pcsd": pcsd,
+diff --git a/pcs_test/tier0/cli/common/test_console_report.py b/pcs_test/tier0/cli/common/test_console_report.py
+index 2deb896d..0d0c2457 100644
+--- a/pcs_test/tier0/cli/common/test_console_report.py
++++ b/pcs_test/tier0/cli/common/test_console_report.py
+@@ -4489,3 +4489,27 @@ class ResourceDisableAffectsOtherResources(NameBuildTest):
+                 "crm_simulate output",
+             )
+         )
++
++
++class DrConfigAlreadyExist(NameBuildTest):
++    def test_success(self):
++        self.assert_message_from_report(
++            "Disaster-recovery already configured",
++            reports.dr_config_already_exist()
++        )
++
++
++class DrConfigDoesNotExist(NameBuildTest):
++    def test_success(self):
++        self.assert_message_from_report(
++            "Disaster-recovery is not configured",
++            reports.dr_config_does_not_exist()
++        )
++
++
++class NodeInLocalCluster(NameBuildTest):
++    def test_success(self):
++        self.assert_message_from_report(
++            "Node 'node-name' is part of local cluster",
++            reports.node_in_local_cluster("node-name")
++        )
+diff --git a/pcs_test/tier0/cli/test_dr.py b/pcs_test/tier0/cli/test_dr.py
+new file mode 100644
+index 00000000..4422cdc4
+--- /dev/null
++++ b/pcs_test/tier0/cli/test_dr.py
+@@ -0,0 +1,293 @@
++from textwrap import dedent
++from unittest import mock, TestCase
++
++from pcs_test.tools.misc import dict_to_modifiers
++
++from pcs.common import report_codes
++
++from pcs.cli import dr
++from pcs.cli.common.errors import CmdLineInputError
++
++
++@mock.patch("pcs.cli.dr.print")
++class Config(TestCase):
++    def setUp(self):
++        self.lib = mock.Mock(spec_set=["dr"])
++        self.lib.dr = mock.Mock(spec_set=["get_config"])
++
++    def _call_cmd(self, argv=None):
++        dr.config(self.lib, argv or [], dict_to_modifiers({}))
++
++    def test_argv(self, mock_print):
++        with self.assertRaises(CmdLineInputError) as cm:
++            self._call_cmd(["x"])
++        self.assertIsNone(cm.exception.message)
++        mock_print.assert_not_called()
++
++    def test_success(self, mock_print):
++        self.lib.dr.get_config.return_value = {
++            "local_site": {
++                "node_list": [],
++                "site_role": "RECOVERY",
++            },
++            "remote_site_list": [
++                {
++                    "node_list": [
++                        {"name": "nodeA2"},
++                        {"name": "nodeA1"},
++                    ],
++                    "site_role": "PRIMARY",
++                },
++                {
++                    "node_list": [
++                        {"name": "nodeB1"},
++                    ],
++                    "site_role": "RECOVERY",
++                }
++            ],
++        }
++        self._call_cmd([])
++        self.lib.dr.get_config.assert_called_once_with()
++        mock_print.assert_called_once_with(dedent("""\
++            Local site:
++              Role: Recovery
++            Remote site:
++              Role: Primary
++              Nodes:
++                nodeA1
++                nodeA2
++            Remote site:
++              Role: Recovery
++              Nodes:
++                nodeB1"""))
++
++    @mock.patch("pcs.cli.common.console_report.sys.stderr.write")
++    def test_invalid_response(self, mock_stderr, mock_print):
++        self.lib.dr.get_config.return_value = [
++            "wrong response",
++            {"x": "y"},
++        ]
++        with self.assertRaises(SystemExit) as cm:
++            self._call_cmd([])
++        self.assertEqual(cm.exception.code, 1)
++        self.lib.dr.get_config.assert_called_once_with()
++        mock_print.assert_not_called()
++        mock_stderr.assert_called_once_with(
++            "Error: Unable to communicate with pcsd, received response:\n"
++                "['wrong response', {'x': 'y'}]\n"
++        )
++
++
++class SetRecoverySite(TestCase):
++    def setUp(self):
++        self.lib = mock.Mock(spec_set=["dr"])
++        self.dr = mock.Mock(spec_set=["set_recovery_site"])
++        self.lib.dr = self.dr
++
++    def call_cmd(self, argv):
++        dr.set_recovery_site(self.lib, argv, dict_to_modifiers({}))
++
++    def test_no_node(self):
++        with self.assertRaises(CmdLineInputError) as cm:
++            self.call_cmd([])
++        self.assertIsNone(cm.exception.message)
++
++    def test_multiple_nodes(self):
++        with self.assertRaises(CmdLineInputError) as cm:
++            self.call_cmd(["node1", "node2"])
++        self.assertIsNone(cm.exception.message)
++
++    def test_success(self):
++        node = "node"
++        self.call_cmd([node])
++        self.dr.set_recovery_site.assert_called_once_with(node)
++
++
++@mock.patch("pcs.cli.dr.print")
++class Status(TestCase):
++    def setUp(self):
++        self.lib = mock.Mock(spec_set=["dr"])
++        self.lib.dr = mock.Mock(spec_set=["status_all_sites_plaintext"])
++
++    def _call_cmd(self, argv, modifiers=None):
++        dr.status(self.lib, argv, dict_to_modifiers(modifiers or {}))
++
++    def _fixture_response(self, local_success=True, remote_success=True):
++        self.lib.dr.status_all_sites_plaintext.return_value = [
++            {
++                "local_site": True,
++                "site_role": "PRIMARY",
++                "status_plaintext": (
++                    "local cluster\nstatus" if local_success
++                    else "this should never be displayed"
++                ),
++                "status_successfully_obtained": local_success,
++            },
++            {
++                "local_site": False,
++                "site_role": "RECOVERY",
++                "status_plaintext": (
++                    "remote cluster\nstatus" if remote_success
++                    else "this should never be displayed"
++                ),
++                "status_successfully_obtained": remote_success,
++            },
++        ]
++
++    @staticmethod
++    def _fixture_print():
++        return dedent("""\
++            --- Local cluster - Primary site ---
++            local cluster
++            status
++
++
++            --- Remote cluster - Recovery site ---
++            remote cluster
++            status"""
++        )
++
++    def test_argv(self, mock_print):
++        with self.assertRaises(CmdLineInputError) as cm:
++            self._call_cmd(["x"])
++        self.assertIsNone(cm.exception.message)
++        mock_print.assert_not_called()
++
++    def test_success(self, mock_print):
++        self._fixture_response()
++        self._call_cmd([])
++        self.lib.dr.status_all_sites_plaintext.assert_called_once_with(
++            hide_inactive_resources=False, verbose=False
++        )
++        mock_print.assert_called_once_with(self._fixture_print())
++
++    def test_success_full(self, mock_print):
++        self._fixture_response()
++        self._call_cmd([], {"full": True})
++        self.lib.dr.status_all_sites_plaintext.assert_called_once_with(
++            hide_inactive_resources=False, verbose=True
++        )
++        mock_print.assert_called_once_with(self._fixture_print())
++
++    def test_success_hide_inactive(self, mock_print):
++        self._fixture_response()
++        self._call_cmd([], {"hide-inactive": True})
++        self.lib.dr.status_all_sites_plaintext.assert_called_once_with(
++            hide_inactive_resources=True, verbose=False
++        )
++        mock_print.assert_called_once_with(self._fixture_print())
++
++    def test_success_all_flags(self, mock_print):
++        self._fixture_response()
++        self._call_cmd([], {"full": True, "hide-inactive": True})
++        self.lib.dr.status_all_sites_plaintext.assert_called_once_with(
++            hide_inactive_resources=True, verbose=True
++        )
++        mock_print.assert_called_once_with(self._fixture_print())
++
++    @mock.patch("pcs.cli.common.console_report.sys.stderr.write")
++    def test_error_local(self, mock_stderr, mock_print):
++        self._fixture_response(local_success=False)
++        with self.assertRaises(SystemExit) as cm:
++            self._call_cmd([])
++        self.assertEqual(cm.exception.code, 1)
++        self.lib.dr.status_all_sites_plaintext.assert_called_once_with(
++            hide_inactive_resources=False, verbose=False
++        )
++        mock_print.assert_called_once_with(dedent("""\
++            --- Local cluster - Primary site ---
++            Error: Unable to get status of the cluster from any node
++
++            --- Remote cluster - Recovery site ---
++            remote cluster
++            status"""
++        ))
++        mock_stderr.assert_called_once_with(
++            "Error: Unable to get status of all sites\n"
++        )
++
++    @mock.patch("pcs.cli.common.console_report.sys.stderr.write")
++    def test_error_remote(self, mock_stderr, mock_print):
++        self._fixture_response(remote_success=False)
++        with self.assertRaises(SystemExit) as cm:
++            self._call_cmd([])
++        self.assertEqual(cm.exception.code, 1)
++        self.lib.dr.status_all_sites_plaintext.assert_called_once_with(
++            hide_inactive_resources=False, verbose=False
++        )
++        mock_print.assert_called_once_with(dedent("""\
++            --- Local cluster - Primary site ---
++            local cluster
++            status
++
++
++            --- Remote cluster - Recovery site ---
++            Error: Unable to get status of the cluster from any node"""
++        ))
++        mock_stderr.assert_called_once_with(
++            "Error: Unable to get status of all sites\n"
++        )
++
++    @mock.patch("pcs.cli.common.console_report.sys.stderr.write")
++    def test_error_both(self, mock_stderr, mock_print):
++        self._fixture_response(local_success=False, remote_success=False)
++        with self.assertRaises(SystemExit) as cm:
++            self._call_cmd([])
++        self.assertEqual(cm.exception.code, 1)
++        self.lib.dr.status_all_sites_plaintext.assert_called_once_with(
++            hide_inactive_resources=False, verbose=False
++        )
++        mock_print.assert_called_once_with(dedent("""\
++            --- Local cluster - Primary site ---
++            Error: Unable to get status of the cluster from any node
++
++            --- Remote cluster - Recovery site ---
++            Error: Unable to get status of the cluster from any node"""
++        ))
++        mock_stderr.assert_called_once_with(
++            "Error: Unable to get status of all sites\n"
++        )
++
++    @mock.patch("pcs.cli.common.console_report.sys.stderr.write")
++    def test_invalid_response(self, mock_stderr, mock_print):
++        self.lib.dr.status_all_sites_plaintext.return_value = [
++            "wrong response",
++            {"x": "y"},
++        ]
++        with self.assertRaises(SystemExit) as cm:
++            self._call_cmd([])
++        self.assertEqual(cm.exception.code, 1)
++        self.lib.dr.status_all_sites_plaintext.assert_called_once_with(
++            hide_inactive_resources=False, verbose=False
++        )
++        mock_print.assert_not_called()
++        mock_stderr.assert_called_once_with(
++            "Error: Unable to communicate with pcsd, received response:\n"
++                "['wrong response', {'x': 'y'}]\n"
++        )
++
++
++class Destroy(TestCase):
++    def setUp(self):
++        self.lib = mock.Mock(spec_set=["dr"])
++        self.dr = mock.Mock(spec_set=["destroy"])
++        self.lib.dr = self.dr
++
++    def call_cmd(self, argv, modifiers=None):
++        modifiers = modifiers or {}
++        dr.destroy(self.lib, argv, dict_to_modifiers(modifiers))
++
++    def test_some_args(self):
++        with self.assertRaises(CmdLineInputError) as cm:
++            self.call_cmd(["arg"])
++        self.assertIsNone(cm.exception.message)
++
++    def test_success(self):
++        self.call_cmd([])
++        self.dr.destroy.assert_called_once_with(force_flags=[])
++
++    def test_skip_offline(self):
++        self.call_cmd([], modifiers={"skip-offline": True})
++        self.dr.destroy.assert_called_once_with(
++            force_flags=[report_codes.SKIP_OFFLINE_NODES]
++        )
+diff --git a/pcs_test/tier0/common/test_dr.py b/pcs_test/tier0/common/test_dr.py
+new file mode 100644
+index 00000000..2ef12855
+--- /dev/null
++++ b/pcs_test/tier0/common/test_dr.py
+@@ -0,0 +1,167 @@
++from unittest import TestCase
++
++from pcs.common import dr
++
++
++class DrConfigNodeDto(TestCase):
++    def setUp(self):
++        self.name = "node-name"
++
++    def _fixture_dto(self):
++        return dr.DrConfigNodeDto(self.name)
++
++    def _fixture_dict(self):
++        return dict(name=self.name)
++
++    def test_to_dict(self):
++        self.assertEqual(
++            self._fixture_dict(),
++            self._fixture_dto().to_dict()
++        )
++
++    def test_from_dict(self):
++        dto = dr.DrConfigNodeDto.from_dict(self._fixture_dict())
++        self.assertEqual(dto.name, self.name)
++
++
++class DrConfigSiteDto(TestCase):
++    def setUp(self):
++        self.role = dr.DrRole.PRIMARY
++        self.node_name_list = ["node1", "node2"]
++
++    def _fixture_dto(self):
++        return dr.DrConfigSiteDto(
++            self.role,
++            [dr.DrConfigNodeDto(name) for name in self.node_name_list]
++        )
++
++    def _fixture_dict(self):
++        return dict(
++            site_role=self.role,
++            node_list=[dict(name=name) for name in self.node_name_list]
++        )
++
++    def test_to_dict(self):
++        self.assertEqual(
++            self._fixture_dict(),
++            self._fixture_dto().to_dict()
++        )
++
++    def test_from_dict(self):
++        dto = dr.DrConfigSiteDto.from_dict(self._fixture_dict())
++        self.assertEqual(dto.site_role, self.role)
++        self.assertEqual(len(dto.node_list), len(self.node_name_list))
++        for i, dto_node in enumerate(dto.node_list):
++            self.assertEqual(
++                dto_node.name,
++                self.node_name_list[i],
++                f"index: {i}"
++            )
++
++
++class DrConfig(TestCase):
++    @staticmethod
++    def _fixture_site_dto(role, node_name_list):
++        return dr.DrConfigSiteDto(
++            role,
++            [dr.DrConfigNodeDto(name) for name in node_name_list]
++        )
++
++    @staticmethod
++    def _fixture_dict():
++        return {
++            "local_site": {
++                "node_list": [],
++                "site_role": "RECOVERY",
++            },
++            "remote_site_list": [
++                {
++                    "node_list": [
++                        {"name": "nodeA1"},
++                        {"name": "nodeA2"},
++                    ],
++                    "site_role": "PRIMARY",
++                },
++                {
++                    "node_list": [
++                        {"name": "nodeB1"},
++                    ],
++                    "site_role": "RECOVERY",
++                }
++            ],
++        }
++
++    def test_to_dict(self):
++        self.assertEqual(
++            self._fixture_dict(),
++            dr.DrConfigDto(
++                self._fixture_site_dto(dr.DrRole.RECOVERY, []),
++                [
++                    self._fixture_site_dto(
++                        dr.DrRole.PRIMARY,
++                        ["nodeA1", "nodeA2"]
++                    ),
++                    self._fixture_site_dto(
++                        dr.DrRole.RECOVERY,
++                        ["nodeB1"]
++                    ),
++                ]
++            ).to_dict()
++        )
++
++    def test_from_dict(self):
++        dto = dr.DrConfigDto.from_dict(self._fixture_dict())
++        self.assertEqual(
++            dto.local_site.to_dict(),
++            self._fixture_site_dto(dr.DrRole.RECOVERY, []).to_dict()
++        )
++        self.assertEqual(len(dto.remote_site_list), 2)
++        self.assertEqual(
++            dto.remote_site_list[0].to_dict(),
++            self._fixture_site_dto(
++                dr.DrRole.PRIMARY, ["nodeA1", "nodeA2"]
++            ).to_dict()
++        )
++        self.assertEqual(
++            dto.remote_site_list[1].to_dict(),
++            self._fixture_site_dto(dr.DrRole.RECOVERY, ["nodeB1"]).to_dict()
++        )
++
++class DrSiteStatusDto(TestCase):
++    def setUp(self):
++        self.local = False
++        self.role = dr.DrRole.PRIMARY
++        self.status_plaintext = "plaintext status"
++        self.status_successfully_obtained = True
++
++    def dto_fixture(self):
++        return dr.DrSiteStatusDto(
++            self.local,
++            self.role,
++            self.status_plaintext,
++            self.status_successfully_obtained,
++        )
++
++    def dict_fixture(self):
++        return dict(
++            local_site=self.local,
++            site_role=self.role.value,
++            status_plaintext=self.status_plaintext,
++            status_successfully_obtained=self.status_successfully_obtained,
++        )
++
++    def test_to_dict(self):
++        self.assertEqual(
++            self.dict_fixture(),
++            self.dto_fixture().to_dict()
++        )
++
++    def test_from_dict(self):
++        dto = dr.DrSiteStatusDto.from_dict(self.dict_fixture())
++        self.assertEqual(dto.local_site, self.local)
++        self.assertEqual(dto.site_role, self.role)
++        self.assertEqual(dto.status_plaintext, self.status_plaintext)
++        self.assertEqual(
++            dto.status_successfully_obtained,
++            self.status_successfully_obtained
++        )
+diff --git a/pcs_test/tier0/lib/commands/cluster/test_add_nodes.py b/pcs_test/tier0/lib/commands/cluster/test_add_nodes.py
+index a570d67e..295c1e6a 100644
+--- a/pcs_test/tier0/lib/commands/cluster/test_add_nodes.py
++++ b/pcs_test/tier0/lib/commands/cluster/test_add_nodes.py
+@@ -470,6 +470,11 @@ class LocalConfig():
+                 return_value=False,
+                 name=f"{local_prefix}fs.isfile.pacemaker_authkey"
+             )
++            .fs.isfile(
++                settings.pcsd_dr_config_location,
++                return_value=False,
++                name=f"{local_prefix}fs.isfile.pcsd_disaster_recovery"
++            )
+             .fs.isfile(
+                 settings.pcsd_settings_conf_location,
+                 return_value=False,
+@@ -480,10 +485,12 @@ class LocalConfig():
+     def files_sync(self, node_labels):
+         corosync_authkey_content = b"corosync authfile"
+         pcmk_authkey_content = b"pcmk authfile"
+-        pcs_settings_content = "pcs_settigns.conf data"
++        pcs_disaster_recovery_content = b"disaster recovery config data"
++        pcs_settings_content = "pcs_settings.conf data"
+         file_list = [
+             "corosync authkey",
+             "pacemaker authkey",
++            "disaster-recovery config",
+             "pcs_settings.conf",
+         ]
+         local_prefix = "local.files_sync."
+@@ -512,6 +519,19 @@ class LocalConfig():
+                 mode="rb",
+                 name=f"{local_prefix}fs.open.pcmk_authkey_read",
+             )
++            .fs.isfile(
++                settings.pcsd_dr_config_location,
++                return_value=True,
++                name=f"{local_prefix}fs.isfile.pcsd_disaster_recovery"
++            )
++            .fs.open(
++                settings.pcsd_dr_config_location,
++                return_value=(
++                    mock.mock_open(read_data=pcs_disaster_recovery_content)()
++                ),
++                mode="rb",
++                name=f"{local_prefix}fs.open.pcsd_disaster_recovery_read",
++            )
+             .fs.isfile(
+                 settings.pcsd_settings_conf_location,
+                 return_value=True,
+@@ -526,6 +546,7 @@ class LocalConfig():
+                 node_labels=node_labels,
+                 pcmk_authkey=pcmk_authkey_content,
+                 corosync_authkey=corosync_authkey_content,
++                pcs_disaster_recovery_conf=pcs_disaster_recovery_content,
+                 pcs_settings_conf=pcs_settings_content,
+                 name=f"{local_prefix}http.files.put_files",
+             )
+@@ -2105,13 +2126,16 @@ class FailureFilesDistribution(TestCase):
+         self.expected_reports = []
+         self.pcmk_authkey_content = b"pcmk authkey content"
+         self.corosync_authkey_content = b"corosync authkey content"
++        self.pcsd_dr_config_content = b"disaster recovery config data"
+         self.pcmk_authkey_file_id = "pacemaker_remote authkey"
+         self.corosync_authkey_file_id = "corosync authkey"
++        self.pcsd_dr_config_file_id = "disaster-recovery config"
+         self.unsuccessful_nodes = self.new_nodes[:1]
+         self.successful_nodes = self.new_nodes[1:]
+         self.err_msg = "an error message"
+         self.corosync_key_open_before_position = "fs.isfile.pacemaker_authkey"
+-        self.pacemaker_key_open_before_position = "fs.isfile.pcsd_settings"
++        self.pacemaker_key_open_before_position = "fs.isfile.pcsd_dr_config"
++        self.pcsd_dr_config_open_before_position = "fs.isfile.pcsd_settings"
+         patch_getaddrinfo(self, self.new_nodes)
+         self.existing_corosync_nodes = [
+             node_fixture(node, node_id)
+@@ -2149,9 +2173,14 @@ class FailureFilesDistribution(TestCase):
+             )
+             # open will be inserted here
+             .fs.isfile(
+-                settings.pcsd_settings_conf_location, return_value=False,
++                settings.pcsd_dr_config_location, return_value=True,
+                 name=self.pacemaker_key_open_before_position
+             )
++            # open will be inserted here
++            .fs.isfile(
++                settings.pcsd_settings_conf_location, return_value=False,
++                name=self.pcsd_dr_config_open_before_position
++            )
+         )
+         self.expected_reports.extend(
+             [
+@@ -2165,7 +2194,11 @@ class FailureFilesDistribution(TestCase):
+         self.distribution_started_reports = [
+             fixture.info(
+                 report_codes.FILES_DISTRIBUTION_STARTED,
+-                file_list=["corosync authkey", "pacemaker authkey"],
++                file_list=[
++                    self.corosync_authkey_file_id,
++                    "pacemaker authkey",
++                    self.pcsd_dr_config_file_id,
++                ],
+                 node_list=self.new_nodes,
+             )
+         ]
+@@ -2181,6 +2214,12 @@ class FailureFilesDistribution(TestCase):
+                 node=node,
+                 file_description="pacemaker authkey",
+             ) for node in self.successful_nodes
++        ] + [
++            fixture.info(
++                report_codes.FILE_DISTRIBUTION_SUCCESS,
++                node=node,
++                file_description=self.pcsd_dr_config_file_id,
++            ) for node in self.successful_nodes
+         ]
+ 
+     def _add_nodes_with_lib_error(self):
+@@ -2210,6 +2249,15 @@ class FailureFilesDistribution(TestCase):
+             name="fs.open.pacemaker_authkey",
+             before=self.pacemaker_key_open_before_position,
+         )
++        self.config.fs.open(
++            settings.pcsd_dr_config_location,
++            mode="rb",
++            side_effect=EnvironmentError(
++                1, self.err_msg, settings.pcsd_dr_config_location
++            ),
++            name="fs.open.pcsd_dr_config",
++            before=self.pcsd_dr_config_open_before_position,
++        )
+ 
+         self._add_nodes_with_lib_error()
+ 
+@@ -2236,7 +2284,17 @@ class FailureFilesDistribution(TestCase):
+                         f"{self.err_msg}: '{settings.pacemaker_authkey_file}'"
+                     ),
+                     operation=RawFileError.ACTION_READ,
+-                )
++                ),
++                fixture.error(
++                    report_codes.FILE_IO_ERROR,
++                    force_code=report_codes.SKIP_FILE_DISTRIBUTION_ERRORS,
++                    file_type_code=file_type_codes.PCS_DR_CONFIG,
++                    file_path=settings.pcsd_dr_config_location,
++                    reason=(
++                        f"{self.err_msg}: '{settings.pcsd_dr_config_location}'"
++                    ),
++                    operation=RawFileError.ACTION_READ,
++                ),
+             ]
+         )
+ 
+@@ -2260,6 +2318,15 @@ class FailureFilesDistribution(TestCase):
+                 name="fs.open.pacemaker_authkey",
+                 before=self.pacemaker_key_open_before_position,
+             )
++            .fs.open(
++                settings.pcsd_dr_config_location,
++                mode="rb",
++                side_effect=EnvironmentError(
++                    1, self.err_msg, settings.pcsd_dr_config_location
++                ),
++                name="fs.open.pcsd_dr_config",
++                before=self.pcsd_dr_config_open_before_position,
++            )
+             .local.distribute_and_reload_corosync_conf(
+                 corosync_conf_fixture(
+                     self.existing_corosync_nodes + [
+@@ -2301,7 +2368,16 @@ class FailureFilesDistribution(TestCase):
+                         f"{self.err_msg}: '{settings.pacemaker_authkey_file}'"
+                     ),
+                     operation=RawFileError.ACTION_READ,
+-                )
++                ),
++                fixture.warn(
++                    report_codes.FILE_IO_ERROR,
++                    file_type_code=file_type_codes.PCS_DR_CONFIG,
++                    file_path=settings.pcsd_dr_config_location,
++                    reason=(
++                        f"{self.err_msg}: '{settings.pcsd_dr_config_location}'"
++                    ),
++                    operation=RawFileError.ACTION_READ,
++                ),
+             ]
+         )
+ 
+@@ -2325,9 +2401,19 @@ class FailureFilesDistribution(TestCase):
+                 name="fs.open.pacemaker_authkey",
+                 before=self.pacemaker_key_open_before_position,
+             )
++            .fs.open(
++                settings.pcsd_dr_config_location,
++                return_value=mock.mock_open(
++                    read_data=self.pcsd_dr_config_content
++                )(),
++                mode="rb",
++                name="fs.open.pcsd_dr_config",
++                before=self.pcsd_dr_config_open_before_position,
++            )
+             .http.files.put_files(
+                 pcmk_authkey=self.pcmk_authkey_content,
+                 corosync_authkey=self.corosync_authkey_content,
++                pcs_disaster_recovery_conf=self.pcsd_dr_config_content,
+                 communication_list=[
+                     dict(
+                         label=node,
+@@ -2339,7 +2425,11 @@ class FailureFilesDistribution(TestCase):
+                             self.pcmk_authkey_file_id: dict(
+                                 code="unexpected",
+                                 message=self.err_msg
+-                            )
++                            ),
++                            self.pcsd_dr_config_file_id: dict(
++                                code="unexpected",
++                                message=self.err_msg
++                            ),
+                         }))
+                     ) for node in self.unsuccessful_nodes
+                 ] + [
+@@ -2374,6 +2464,15 @@ class FailureFilesDistribution(TestCase):
+                     reason=self.err_msg,
+                 ) for node in self.unsuccessful_nodes
+             ]
++            +
++            [
++                fixture.error(
++                    report_codes.FILE_DISTRIBUTION_ERROR,
++                    node=node,
++                    file_description=self.pcsd_dr_config_file_id,
++                    reason=self.err_msg,
++                ) for node in self.unsuccessful_nodes
++            ]
+         )
+ 
+     def test_communication_failure(self):
+@@ -2396,9 +2495,19 @@ class FailureFilesDistribution(TestCase):
+                 name="fs.open.pacemaker_authkey",
+                 before=self.pacemaker_key_open_before_position,
+             )
++            .fs.open(
++                settings.pcsd_dr_config_location,
++                return_value=mock.mock_open(
++                    read_data=self.pcsd_dr_config_content
++                )(),
++                mode="rb",
++                name="fs.open.pcsd_dr_config",
++                before=self.pcsd_dr_config_open_before_position,
++            )
+             .http.files.put_files(
+                 pcmk_authkey=self.pcmk_authkey_content,
+                 corosync_authkey=self.corosync_authkey_content,
++                pcs_disaster_recovery_conf=self.pcsd_dr_config_content,
+                 communication_list=[
+                     dict(
+                         label=node,
+@@ -2450,9 +2559,19 @@ class FailureFilesDistribution(TestCase):
+                 name="fs.open.pacemaker_authkey",
+                 before=self.pacemaker_key_open_before_position,
+             )
++            .fs.open(
++                settings.pcsd_dr_config_location,
++                return_value=mock.mock_open(
++                    read_data=self.pcsd_dr_config_content
++                )(),
++                mode="rb",
++                name="fs.open.pcsd_dr_config",
++                before=self.pcsd_dr_config_open_before_position,
++            )
+             .http.files.put_files(
+                 pcmk_authkey=self.pcmk_authkey_content,
+                 corosync_authkey=self.corosync_authkey_content,
++                pcs_disaster_recovery_conf=self.pcsd_dr_config_content,
+                 communication_list=[
+                     dict(
+                         label=node,
+@@ -2501,9 +2620,19 @@ class FailureFilesDistribution(TestCase):
+                 name="fs.open.pacemaker_authkey",
+                 before=self.pacemaker_key_open_before_position,
+             )
++            .fs.open(
++                settings.pcsd_dr_config_location,
++                return_value=mock.mock_open(
++                    read_data=self.pcsd_dr_config_content
++                )(),
++                mode="rb",
++                name="fs.open.pcsd_dr_config",
++                before=self.pcsd_dr_config_open_before_position,
++            )
+             .http.files.put_files(
+                 pcmk_authkey=self.pcmk_authkey_content,
+                 corosync_authkey=self.corosync_authkey_content,
++                pcs_disaster_recovery_conf=self.pcsd_dr_config_content,
+                 communication_list=[
+                     dict(
+                         label=node,
+diff --git a/pcs_test/tier0/lib/commands/dr/__init__.py b/pcs_test/tier0/lib/commands/dr/__init__.py
+new file mode 100644
+index 00000000..e69de29b
+diff --git a/pcs_test/tier0/lib/commands/dr/test_destroy.py b/pcs_test/tier0/lib/commands/dr/test_destroy.py
+new file mode 100644
+index 00000000..de50b21c
+--- /dev/null
++++ b/pcs_test/tier0/lib/commands/dr/test_destroy.py
+@@ -0,0 +1,342 @@
++import json
++from unittest import TestCase
++
++from pcs_test.tools import fixture
++from pcs_test.tools.command_env import get_env_tools
++
++from pcs import settings
++from pcs.common import (
++    file_type_codes,
++    report_codes,
++)
++from pcs.common.file import RawFileError
++from pcs.lib.commands import dr
++
++
++DR_CONF = "pcs disaster-recovery config"
++REASON = "error msg"
++
++
++def generate_nodes(nodes_num, prefix=""):
++    return [f"{prefix}node{i}" for i in range(1, nodes_num + 1)]
++
++
++class CheckLive(TestCase):
++    def setUp(self):
++        self.env_assist, self.config = get_env_tools(self)
++
++    def assert_live_required(self, forbidden_options):
++        self.env_assist.assert_raise_library_error(
++            lambda: dr.destroy(self.env_assist.get_env()),
++            [
++                fixture.error(
++                    report_codes.LIVE_ENVIRONMENT_REQUIRED,
++                    forbidden_options=forbidden_options
++                )
++            ],
++            expected_in_processor=False
++        )
++
++    def test_mock_corosync(self):
++        self.config.env.set_corosync_conf_data("corosync conf data")
++        self.assert_live_required([file_type_codes.COROSYNC_CONF])
++
++    def test_mock_cib(self):
++        self.config.env.set_cib_data("<cib />")
++        self.assert_live_required([file_type_codes.CIB])
++
++    def test_mock(self):
++        self.config.env.set_corosync_conf_data("corosync conf data")
++        self.config.env.set_cib_data("<cib />")
++        self.assert_live_required([
++            file_type_codes.CIB,
++            file_type_codes.COROSYNC_CONF,
++        ])
++
++
++class FixtureMixin:
++    def _fixture_load_configs(self):
++        self.config.raw_file.exists(
++            file_type_codes.PCS_DR_CONFIG,
++            settings.pcsd_dr_config_location,
++        )
++        self.config.raw_file.read(
++            file_type_codes.PCS_DR_CONFIG,
++            settings.pcsd_dr_config_location,
++            content="""
++                {{
++                    "local": {{
++                        "role": "PRIMARY"
++                    }},
++                    "remote_sites": [
++                        {{
++                            "nodes": [{nodes}],
++                            "role": "RECOVERY"
++                        }}
++                    ]
++                }}
++            """.format(
++                nodes=", ".join([
++                    json.dumps(dict(name=node))
++                    for node in self.remote_nodes
++                ])
++            )
++        )
++        self.config.corosync_conf.load(node_name_list=self.local_nodes)
++
++    def _success_reports(self):
++        return [
++            fixture.info(
++                report_codes.FILES_REMOVE_FROM_NODES_STARTED,
++                file_list=[DR_CONF],
++                node_list=self.remote_nodes + self.local_nodes,
++            )
++        ] + [
++            fixture.info(
++                report_codes.FILE_REMOVE_FROM_NODE_SUCCESS,
++                file_description=DR_CONF,
++                node=node,
++            ) for node in (self.remote_nodes + self.local_nodes)
++        ]
++
++
++class Success(FixtureMixin, TestCase):
++    def setUp(self):
++        self.env_assist, self.config = get_env_tools(self)
++        self.local_nodes = generate_nodes(5)
++        self.remote_nodes = generate_nodes(3, prefix="remote-")
++        self.config.env.set_known_nodes(self.local_nodes + self.remote_nodes)
++
++    def test_minimal(self):
++        self._fixture_load_configs()
++        self.config.http.files.remove_files(
++            node_labels=self.remote_nodes + self.local_nodes,
++            pcs_disaster_recovery_conf=True,
++        )
++        dr.destroy(self.env_assist.get_env())
++        self.env_assist.assert_reports(self._success_reports())
++
++
++class FatalConfigIssue(FixtureMixin, TestCase):
++    def setUp(self):
++        self.env_assist, self.config = get_env_tools(self)
++        self.local_nodes = generate_nodes(5)
++        self.remote_nodes = generate_nodes(3, prefix="remote-")
++
++    def test_config_missing(self):
++        self.config.raw_file.exists(
++            file_type_codes.PCS_DR_CONFIG,
++            settings.pcsd_dr_config_location,
++            exists=False,
++        )
++
++        self.env_assist.assert_raise_library_error(
++            lambda: dr.destroy(self.env_assist.get_env()),
++        )
++        self.env_assist.assert_reports([
++            fixture.error(
++                report_codes.DR_CONFIG_DOES_NOT_EXIST,
++            ),
++        ])
++
++    def test_config_read_error(self):
++        self.config.raw_file.exists(
++            file_type_codes.PCS_DR_CONFIG,
++            settings.pcsd_dr_config_location,
++        )
++        self.config.raw_file.read(
++            file_type_codes.PCS_DR_CONFIG,
++            settings.pcsd_dr_config_location,
++            exception_msg=REASON,
++        )
++
++        self.env_assist.assert_raise_library_error(
++            lambda: dr.destroy(self.env_assist.get_env()),
++        )
++        self.env_assist.assert_reports([
++            fixture.error(
++                report_codes.FILE_IO_ERROR,
++                file_type_code=file_type_codes.PCS_DR_CONFIG,
++                file_path=settings.pcsd_dr_config_location,
++                operation=RawFileError.ACTION_READ,
++                reason=REASON,
++            ),
++        ])
++
++    def test_config_parse_error(self):
++        self.config.raw_file.exists(
++            file_type_codes.PCS_DR_CONFIG,
++            settings.pcsd_dr_config_location,
++        )
++        self.config.raw_file.read(
++            file_type_codes.PCS_DR_CONFIG,
++            settings.pcsd_dr_config_location,
++            content="bad content",
++        )
++
++        self.env_assist.assert_raise_library_error(
++            lambda: dr.destroy(self.env_assist.get_env()),
++        )
++        self.env_assist.assert_reports([
++            fixture.error(
++                report_codes.PARSE_ERROR_JSON_FILE,
++                file_type_code=file_type_codes.PCS_DR_CONFIG,
++                file_path=settings.pcsd_dr_config_location,
++                line_number=1,
++                column_number=1,
++                position=0,
++                reason="Expecting value",
++                full_msg="Expecting value: line 1 column 1 (char 0)",
++            ),
++        ])
++
++    def test_corosync_conf_read_error(self):
++        self._fixture_load_configs()
++        self.config.corosync_conf.load_content(
++            "", exception_msg=REASON, instead="corosync_conf.load"
++        )
++        self.env_assist.assert_raise_library_error(
++            lambda: dr.destroy(self.env_assist.get_env()),
++            [
++                fixture.error(
++                    report_codes.UNABLE_TO_READ_COROSYNC_CONFIG,
++                    path=settings.corosync_conf_file,
++                    reason=REASON,
++                ),
++            ],
++            expected_in_processor=False
++        )
++
++    def test_corosync_conf_parse_error(self):
++        self._fixture_load_configs()
++        self.config.corosync_conf.load_content(
++            "wrong {\n  corosync", instead="corosync_conf.load"
++        )
++        self.env_assist.assert_raise_library_error(
++            lambda: dr.destroy(self.env_assist.get_env()),
++            [
++                fixture.error(
++                    report_codes
++                    .PARSE_ERROR_COROSYNC_CONF_LINE_IS_NOT_SECTION_NOR_KEY_VALUE
++                ),
++            ],
++            expected_in_processor=False
++        )
++
++
++class CommunicationIssue(FixtureMixin, TestCase):
++    def setUp(self):
++        self.env_assist, self.config = get_env_tools(self)
++        self.local_nodes = generate_nodes(5)
++        self.remote_nodes = generate_nodes(3, prefix="remote-")
++
++    def test_unknown_node(self):
++        self.config.env.set_known_nodes(
++            self.local_nodes[1:] + self.remote_nodes[1:]
++        )
++        self._fixture_load_configs()
++        self.env_assist.assert_raise_library_error(
++            lambda: dr.destroy(self.env_assist.get_env())
++        )
++        self.env_assist.assert_reports([
++            fixture.error(
++                report_codes.HOST_NOT_FOUND,
++                host_list=self.local_nodes[:1] + self.remote_nodes[:1],
++                force_code=report_codes.SKIP_OFFLINE_NODES,
++            ),
++        ])
++
++    def test_unknown_node_force(self):
++        existing_nodes = self.remote_nodes[1:] + self.local_nodes[1:]
++        self.config.env.set_known_nodes(existing_nodes)
++        self._fixture_load_configs()
++        self.config.http.files.remove_files(
++            node_labels=existing_nodes,
++            pcs_disaster_recovery_conf=True,
++        )
++        dr.destroy(
++            self.env_assist.get_env(),
++            force_flags=[report_codes.SKIP_OFFLINE_NODES],
++        )
++        self.env_assist.assert_reports([
++            fixture.warn(
++                report_codes.HOST_NOT_FOUND,
++                host_list=self.local_nodes[:1] + self.remote_nodes[:1],
++            ),
++        ] + [
++            fixture.info(
++                report_codes.FILES_REMOVE_FROM_NODES_STARTED,
++                file_list=[DR_CONF],
++                node_list=existing_nodes,
++            )
++        ] + [
++            fixture.info(
++                report_codes.FILE_REMOVE_FROM_NODE_SUCCESS,
++                file_description=DR_CONF,
++                node=node,
++            ) for node in existing_nodes
++        ])
++
++    def test_node_issues(self):
++        self.config.env.set_known_nodes(self.local_nodes + self.remote_nodes)
++        self._fixture_load_configs()
++        self.config.http.files.remove_files(
++            pcs_disaster_recovery_conf=True,
++            communication_list=[
++                dict(label=node) for node in self.remote_nodes
++            ] + [
++                dict(
++                    label=self.local_nodes[0],
++                    was_connected=False,
++                    error_msg=REASON,
++                ),
++                dict(
++                    label=self.local_nodes[1],
++                    output="invalid data",
++                ),
++                dict(
++                    label=self.local_nodes[2],
++                    output=json.dumps(dict(files={
++                        DR_CONF: dict(
++                            code="unexpected",
++                            message=REASON,
++                        ),
++                    })),
++                ),
++            ] + [
++                dict(label=node) for node in self.local_nodes[3:]
++            ]
++        )
++
++        self.env_assist.assert_raise_library_error(
++            lambda: dr.destroy(self.env_assist.get_env())
++        )
++        self.env_assist.assert_reports([
++            fixture.info(
++                report_codes.FILES_REMOVE_FROM_NODES_STARTED,
++                file_list=[DR_CONF],
++                node_list=self.remote_nodes + self.local_nodes,
++            ),
++            fixture.error(
++                report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
++                command="remote/remove_file",
++                node=self.local_nodes[0],
++                reason=REASON,
++            ),
++            fixture.error(
++                report_codes.INVALID_RESPONSE_FORMAT,
++                node=self.local_nodes[1],
++            ),
++            fixture.error(
++                report_codes.FILE_REMOVE_FROM_NODE_ERROR,
++                file_description=DR_CONF,
++                reason=REASON,
++                node=self.local_nodes[2],
++            ),
++        ] + [
++            fixture.info(
++                report_codes.FILE_REMOVE_FROM_NODE_SUCCESS,
++                file_description=DR_CONF,
++                node=node,
++            ) for node in self.local_nodes[3:] + self.remote_nodes
++        ])
+diff --git a/pcs_test/tier0/lib/commands/dr/test_get_config.py b/pcs_test/tier0/lib/commands/dr/test_get_config.py
+new file mode 100644
+index 00000000..b2297c8a
+--- /dev/null
++++ b/pcs_test/tier0/lib/commands/dr/test_get_config.py
+@@ -0,0 +1,134 @@
++from unittest import TestCase
++
++from pcs import settings
++from pcs.common import (
++    file_type_codes,
++    report_codes,
++)
++from pcs.common.file import RawFileError
++from pcs.lib.commands import dr
++
++from pcs_test.tools.command_env import get_env_tools
++from pcs_test.tools import fixture
++
++REASON = "error msg"
++
++class Config(TestCase):
++    def setUp(self):
++        self.env_assist, self.config = get_env_tools(self)
++
++    def test_success(self):
++        (self.config
++            .raw_file.exists(
++                file_type_codes.PCS_DR_CONFIG,
++                settings.pcsd_dr_config_location,
++            )
++            .raw_file.read(
++                file_type_codes.PCS_DR_CONFIG,
++                settings.pcsd_dr_config_location,
++                content="""
++                    {
++                        "local": {
++                            "role": "PRIMARY"
++                        },
++                        "remote_sites": [
++                            {
++                                "nodes": [
++                                    {
++                                        "name": "recovery-node"
++                                    }
++                                ],
++                                "role": "RECOVERY"
++                            }
++                        ]
++                    }
++                """,
++            )
++        )
++        self.assertEqual(
++            dr.get_config(self.env_assist.get_env()),
++            {
++                "local_site": {
++                    "node_list": [],
++                    "site_role": "PRIMARY",
++                },
++                 "remote_site_list": [
++                    {
++                        "node_list": [
++                            {"name": "recovery-node"},
++                        ],
++                       "site_role": "RECOVERY",
++                    },
++                ],
++            }
++        )
++
++    def test_config_missing(self):
++        (self.config
++            .raw_file.exists(
++                file_type_codes.PCS_DR_CONFIG,
++                settings.pcsd_dr_config_location,
++                exists=False,
++            )
++        )
++        self.env_assist.assert_raise_library_error(
++            lambda: dr.get_config(self.env_assist.get_env()),
++        )
++        self.env_assist.assert_reports([
++            fixture.error(
++                report_codes.DR_CONFIG_DOES_NOT_EXIST,
++            ),
++        ])
++
++    def test_config_read_error(self):
++        (self.config
++            .raw_file.exists(
++                file_type_codes.PCS_DR_CONFIG,
++                settings.pcsd_dr_config_location,
++            )
++            .raw_file.read(
++                file_type_codes.PCS_DR_CONFIG,
++                settings.pcsd_dr_config_location,
++                exception_msg=REASON,
++            )
++        )
++        self.env_assist.assert_raise_library_error(
++            lambda: dr.get_config(self.env_assist.get_env()),
++        )
++        self.env_assist.assert_reports([
++            fixture.error(
++                report_codes.FILE_IO_ERROR,
++                file_type_code=file_type_codes.PCS_DR_CONFIG,
++                file_path=settings.pcsd_dr_config_location,
++                operation=RawFileError.ACTION_READ,
++                reason=REASON,
++            ),
++        ])
++
++    def test_config_parse_error(self):
++        (self.config
++            .raw_file.exists(
++                file_type_codes.PCS_DR_CONFIG,
++                settings.pcsd_dr_config_location,
++            )
++            .raw_file.read(
++                file_type_codes.PCS_DR_CONFIG,
++                settings.pcsd_dr_config_location,
++                content="bad content",
++            )
++        )
++        self.env_assist.assert_raise_library_error(
++            lambda: dr.get_config(self.env_assist.get_env()),
++        )
++        self.env_assist.assert_reports([
++            fixture.error(
++                report_codes.PARSE_ERROR_JSON_FILE,
++                file_type_code=file_type_codes.PCS_DR_CONFIG,
++                file_path=settings.pcsd_dr_config_location,
++                line_number=1,
++                column_number=1,
++                position=0,
++                reason="Expecting value",
++                full_msg="Expecting value: line 1 column 1 (char 0)",
++            ),
++        ])
+diff --git a/pcs_test/tier0/lib/commands/dr/test_set_recovery_site.py b/pcs_test/tier0/lib/commands/dr/test_set_recovery_site.py
+new file mode 100644
+index 00000000..06d80df1
+--- /dev/null
++++ b/pcs_test/tier0/lib/commands/dr/test_set_recovery_site.py
+@@ -0,0 +1,702 @@
++import json
++from unittest import TestCase
++
++from pcs_test.tools import fixture
++from pcs_test.tools.command_env import get_env_tools
++
++from pcs import settings
++from pcs.common import (
++    file_type_codes,
++    report_codes,
++)
++from pcs.lib.dr.config.facade import DrRole
++from pcs.lib.commands import dr
++
++DR_CFG_DESC = "disaster-recovery config"
++
++COROSYNC_CONF_TEMPLATE = """\
++totem {{
++    version: 2
++    cluster_name: cluster_name
++}}
++
++nodelist {{
++{node_list}}}
++"""
++
++NODE_TEMPLATE_NO_NAME = """\
++    node {{
++        ring0_addr: {node}
++        nodeid: {id}
++    }}
++"""
++
++NODE_TEMPLATE = """\
++    node {{
++        ring0_addr: {node}
++        name: {node}
++        nodeid: {id}
++    }}
++"""
++
++
++def export_cfg(cfg_struct):
++    return json.dumps(cfg_struct, indent=4, sort_keys=True).encode("utf-8")
++
++def dr_cfg_fixture(local_role, remote_role, nodes):
++    return export_cfg(dict(
++        local=dict(
++            role=local_role.value,
++        ),
++        remote_sites=[
++            dict(
++                role=remote_role.value,
++                nodes=[dict(name=node) for node in nodes],
++            ),
++        ]
++    ))
++
++def corosync_conf_fixture(node_list):
++    return COROSYNC_CONF_TEMPLATE.format(
++        node_list="\n".join(node_list_fixture(node_list)),
++    )
++
++def node_list_fixture(node_list):
++    return [
++        NODE_TEMPLATE.format(node=node, id=i)
++        for i, node in enumerate(node_list, start=1)
++    ]
++
++
++def generate_nodes(nodes_num, prefix=""):
++    return [f"{prefix}node{i}" for i in range(1, nodes_num + 1)]
++
++
++class CheckLive(TestCase):
++    def setUp(self):
++        self.env_assist, self.config = get_env_tools(self)
++
++    def assert_live_required(self, forbidden_options):
++        self.env_assist.assert_raise_library_error(
++            lambda: dr.set_recovery_site(self.env_assist.get_env(), "node"),
++            [
++                fixture.error(
++                    report_codes.LIVE_ENVIRONMENT_REQUIRED,
++                    forbidden_options=forbidden_options
++                )
++            ],
++            expected_in_processor=False
++        )
++
++    def test_mock_corosync(self):
++        self.config.env.set_corosync_conf_data(
++            corosync_conf_fixture(generate_nodes(3))
++        )
++        self.assert_live_required([file_type_codes.COROSYNC_CONF])
++
++    def test_mock_cib(self):
++        self.config.env.set_cib_data("<cib />")
++        self.assert_live_required([file_type_codes.CIB])
++
++    def test_mock(self):
++        self.config.env.set_corosync_conf_data(
++            corosync_conf_fixture(generate_nodes(3))
++        )
++        self.config.env.set_cib_data("<cib />")
++        self.assert_live_required([
++            file_type_codes.CIB,
++            file_type_codes.COROSYNC_CONF,
++        ])
++
++
++class SetRecoverySiteSuccess(TestCase):
++    def setUp(self):
++        self.env_assist, self.config = get_env_tools(self)
++
++    def _test_minimal(self, local_cluster_size, recovery_cluster_size):
++        local_nodes = generate_nodes(local_cluster_size)
++        remote_nodes = generate_nodes(recovery_cluster_size, prefix="recovery-")
++        orig_node = remote_nodes[-1]
++        cfg = self.config
++        cfg.env.set_known_nodes(local_nodes + remote_nodes)
++        cfg.raw_file.exists(
++            file_type_codes.PCS_DR_CONFIG,
++            settings.pcsd_dr_config_location,
++            exists=False,
++        )
++        cfg.corosync_conf.load_content(corosync_conf_fixture(local_nodes))
++        cfg.http.corosync.get_corosync_conf(
++            corosync_conf_fixture(remote_nodes), node_labels=[orig_node]
++        )
++        cfg.http.files.put_files(
++            node_labels=remote_nodes,
++            pcs_disaster_recovery_conf=dr_cfg_fixture(
++                DrRole.RECOVERY, DrRole.PRIMARY, local_nodes
++            ),
++            name="distribute_remote",
++        )
++        cfg.http.files.put_files(
++            node_labels=local_nodes,
++            pcs_disaster_recovery_conf=dr_cfg_fixture(
++                DrRole.PRIMARY, DrRole.RECOVERY, remote_nodes
++            ),
++            name="distribute_local",
++        )
++        dr.set_recovery_site(self.env_assist.get_env(), orig_node)
++        self.env_assist.assert_reports(
++            [
++                fixture.info(
++                    report_codes.FILES_DISTRIBUTION_STARTED,
++                    file_list=[DR_CFG_DESC],
++                    node_list=remote_nodes,
++                )
++            ] + [
++                fixture.info(
++                    report_codes.FILE_DISTRIBUTION_SUCCESS,
++                    file_description=DR_CFG_DESC,
++                    node=node,
++                ) for node in remote_nodes
++            ] + [
++                fixture.info(
++                    report_codes.FILES_DISTRIBUTION_STARTED,
++                    file_list=[DR_CFG_DESC],
++                    node_list=local_nodes,
++                )
++            ] + [
++                fixture.info(
++                    report_codes.FILE_DISTRIBUTION_SUCCESS,
++                    file_description=DR_CFG_DESC,
++                    node=node,
++                ) for node in local_nodes
++            ]
++        )
++
++    def test_minimal_local_1_remote_1(self):
++        self._test_minimal(1, 1)
++
++    def test_minimal_local_1_remote_2(self):
++        self._test_minimal(1, 2)
++
++    def test_minimal_local_1_remote_3(self):
++        self._test_minimal(1, 3)
++
++    def test_minimal_local_2_remote_1(self):
++        self._test_minimal(2, 1)
++
++    def test_minimal_local_2_remote_2(self):
++        self._test_minimal(2, 2)
++
++    def test_minimal_local_2_remote_3(self):
++        self._test_minimal(2, 3)
++
++    def test_minimal_local_3_remote_1(self):
++        self._test_minimal(3, 1)
++
++    def test_minimal_local_3_remote_2(self):
++        self._test_minimal(3, 2)
++
++    def test_minimal_local_3_remote_3(self):
++        self._test_minimal(3, 3)
++
++
++class FailureValidations(TestCase):
++    def setUp(self):
++        self.env_assist, self.config = get_env_tools(self)
++        self.local_nodes = generate_nodes(4)
++
++    def test_dr_cfg_exist(self):
++        orig_node = "node"
++        cfg = self.config
++        cfg.env.set_known_nodes(self.local_nodes + [orig_node])
++        cfg.raw_file.exists(
++            file_type_codes.PCS_DR_CONFIG,
++            settings.pcsd_dr_config_location,
++            exists=True,
++        )
++        cfg.corosync_conf.load_content(corosync_conf_fixture(self.local_nodes))
++        self.env_assist.assert_raise_library_error(
++            lambda: dr.set_recovery_site(self.env_assist.get_env(), orig_node),
++        )
++        self.env_assist.assert_reports([
++            fixture.error(
++                report_codes.DR_CONFIG_ALREADY_EXIST,
++            )
++        ])
++
++    def test_local_nodes_name_missing(self):
++        orig_node = "node"
++        cfg = self.config
++        cfg.env.set_known_nodes(self.local_nodes + [orig_node])
++        cfg.raw_file.exists(
++            file_type_codes.PCS_DR_CONFIG,
++            settings.pcsd_dr_config_location,
++            exists=False,
++        )
++        cfg.corosync_conf.load_content(
++            COROSYNC_CONF_TEMPLATE.format(
++                node_list="\n".join(
++                    [
++                        NODE_TEMPLATE_NO_NAME.format(
++                            node=self.local_nodes[0], id=len(self.local_nodes)
++                        )
++                    ] + node_list_fixture(self.local_nodes[1:])
++                )
++            )
++        )
++        self.env_assist.assert_raise_library_error(
++            lambda: dr.set_recovery_site(self.env_assist.get_env(), orig_node),
++        )
++        self.env_assist.assert_reports([
++            fixture.error(
++                report_codes.COROSYNC_CONFIG_MISSING_NAMES_OF_NODES,
++                fatal=True,
++            )
++        ])
++
++    def test_node_part_of_local_cluster(self):
++        orig_node = self.local_nodes[-1]
++        cfg = self.config
++        cfg.env.set_known_nodes(self.local_nodes + [orig_node])
++        cfg.raw_file.exists(
++            file_type_codes.PCS_DR_CONFIG,
++            settings.pcsd_dr_config_location,
++            exists=False,
++        )
++        cfg.corosync_conf.load_content(corosync_conf_fixture(self.local_nodes))
++        self.env_assist.assert_raise_library_error(
++            lambda: dr.set_recovery_site(self.env_assist.get_env(), orig_node),
++        )
++        self.env_assist.assert_reports([
++            fixture.error(
++                report_codes.NODE_IN_LOCAL_CLUSTER,
++                node=orig_node,
++            )
++        ])
++
++    def test_tokens_missing_for_local_nodes(self):
++        orig_node = "node"
++        cfg = self.config
++        cfg.env.set_known_nodes(self.local_nodes[:-1] + [orig_node])
++        cfg.raw_file.exists(
++            file_type_codes.PCS_DR_CONFIG,
++            settings.pcsd_dr_config_location,
++            exists=False,
++        )
++        cfg.corosync_conf.load_content(corosync_conf_fixture(self.local_nodes))
++        self.env_assist.assert_raise_library_error(
++            lambda: dr.set_recovery_site(self.env_assist.get_env(), orig_node),
++        )
++        self.env_assist.assert_reports([
++            fixture.error(
++                report_codes.HOST_NOT_FOUND,
++                host_list=self.local_nodes[-1:],
++            )
++        ])
++
++    def test_token_missing_for_node(self):
++        orig_node = "node"
++        cfg = self.config
++        cfg.env.set_known_nodes(self.local_nodes)
++        cfg.raw_file.exists(
++            file_type_codes.PCS_DR_CONFIG,
++            settings.pcsd_dr_config_location,
++            exists=False,
++        )
++        cfg.corosync_conf.load_content(corosync_conf_fixture(self.local_nodes))
++        self.env_assist.assert_raise_library_error(
++            lambda: dr.set_recovery_site(self.env_assist.get_env(), orig_node),
++        )
++        self.env_assist.assert_reports([
++            fixture.error(
++                report_codes.HOST_NOT_FOUND,
++                host_list=[orig_node],
++            )
++        ])
++
++    def test_tokens_missing_for_remote_cluster(self):
++        remote_nodes = generate_nodes(3, prefix="recovery-")
++        orig_node = remote_nodes[0]
++        cfg = self.config
++        cfg.env.set_known_nodes(self.local_nodes + remote_nodes[:-1])
++        cfg.raw_file.exists(
++            file_type_codes.PCS_DR_CONFIG,
++            settings.pcsd_dr_config_location,
++            exists=False,
++        )
++        cfg.corosync_conf.load_content(corosync_conf_fixture(self.local_nodes))
++        cfg.http.corosync.get_corosync_conf(
++            corosync_conf_fixture(remote_nodes), node_labels=[orig_node]
++        )
++        self.env_assist.assert_raise_library_error(
++            lambda: dr.set_recovery_site(self.env_assist.get_env(), orig_node),
++        )
++        self.env_assist.assert_reports([
++            fixture.error(
++                report_codes.HOST_NOT_FOUND,
++                host_list=remote_nodes[-1:],
++            )
++        ])
++
++
++REASON = "error msg"
++
++
++class FailureRemoteCorocyncConf(TestCase):
++    def setUp(self):
++        self.env_assist, self.config = get_env_tools(self)
++        self.local_nodes = generate_nodes(4)
++        self.remote_nodes = generate_nodes(3, prefix="recovery-")
++        self.node = self.remote_nodes[0]
++
++        self.config.env.set_known_nodes(self.local_nodes + self.remote_nodes)
++        self.config.raw_file.exists(
++            file_type_codes.PCS_DR_CONFIG,
++            settings.pcsd_dr_config_location,
++            exists=False,
++        )
++        self.config.corosync_conf.load_content(
++            corosync_conf_fixture(self.local_nodes)
++        )
++
++    def test_network_issue(self):
++        self.config.http.corosync.get_corosync_conf(
++            communication_list=[
++                dict(
++                    label=self.node,
++                    was_connected=False,
++                    error_msg=REASON,
++                )
++            ]
++        )
++        self.env_assist.assert_raise_library_error(
++            lambda: dr.set_recovery_site(self.env_assist.get_env(), self.node),
++        )
++        self.env_assist.assert_reports([
++            fixture.warn(
++                report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
++                node=self.node,
++                command="remote/get_corosync_conf",
++                reason=REASON,
++
++            ),
++            fixture.error(report_codes.UNABLE_TO_PERFORM_OPERATION_ON_ANY_NODE)
++        ])
++
++    def test_file_does_not_exist(self):
++        self.config.http.corosync.get_corosync_conf(
++            communication_list=[
++                dict(
++                    label=self.node,
++                    response_code=400,
++                    output=REASON,
++                )
++            ]
++        )
++        self.env_assist.assert_raise_library_error(
++            lambda: dr.set_recovery_site(self.env_assist.get_env(), self.node),
++        )
++        self.env_assist.assert_reports([
++            fixture.warn(
++                report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
++                node=self.node,
++                command="remote/get_corosync_conf",
++                reason=REASON,
++
++            ),
++            fixture.error(report_codes.UNABLE_TO_PERFORM_OPERATION_ON_ANY_NODE)
++        ])
++
++    def test_node_names_missing(self):
++        self.config.http.corosync.get_corosync_conf(
++            COROSYNC_CONF_TEMPLATE.format(
++                node_list="\n".join(
++                    [
++                        NODE_TEMPLATE_NO_NAME.format(
++                            node=self.remote_nodes[-1],
++                            id=len(self.remote_nodes),
++                        )
++                    ] + node_list_fixture(self.remote_nodes[:-1])
++                )
++            ),
++            node_labels=[self.node],
++        )
++        self.env_assist.assert_raise_library_error(
++            lambda: dr.set_recovery_site(self.env_assist.get_env(), self.node),
++        )
++        self.env_assist.assert_reports([
++            fixture.error(
++                report_codes.COROSYNC_CONFIG_MISSING_NAMES_OF_NODES,
++                fatal=True,
++            )
++        ])
++
++
++class FailureRemoteDrCfgDistribution(TestCase):
++    # pylint: disable=too-many-instance-attributes
++    def setUp(self):
++        self.env_assist, self.config = get_env_tools(self)
++        self.local_nodes = generate_nodes(4)
++        self.remote_nodes = generate_nodes(3, prefix="recovery-")
++        self.node = self.remote_nodes[0]
++        self.failed_nodes = self.remote_nodes[-1:]
++        successful_nodes = self.remote_nodes[:-1]
++
++        self.config.env.set_known_nodes(self.local_nodes + self.remote_nodes)
++        self.config.raw_file.exists(
++            file_type_codes.PCS_DR_CONFIG,
++            settings.pcsd_dr_config_location,
++            exists=False,
++        )
++        self.config.corosync_conf.load_content(
++            corosync_conf_fixture(self.local_nodes)
++        )
++        self.config.http.corosync.get_corosync_conf(
++            corosync_conf_fixture(self.remote_nodes), node_labels=[self.node]
++        )
++
++        self.success_communication = [
++            dict(label=node) for node in successful_nodes
++        ]
++        self.expected_reports = [
++            fixture.info(
++                report_codes.FILES_DISTRIBUTION_STARTED,
++                file_list=[DR_CFG_DESC],
++                node_list=self.remote_nodes,
++            )
++        ] + [
++            fixture.info(
++                report_codes.FILE_DISTRIBUTION_SUCCESS,
++                file_description=DR_CFG_DESC,
++                node=node,
++            ) for node in successful_nodes
++        ]
++
++    def test_write_failure(self):
++        self.config.http.files.put_files(
++            communication_list=self.success_communication + [
++                dict(
++                    label=node,
++                    output=json.dumps(dict(files={
++                        DR_CFG_DESC: dict(
++                            code="unexpected",
++                            message=REASON
++                        ),
++                    }))
++                ) for node in self.failed_nodes
++            ],
++            pcs_disaster_recovery_conf=dr_cfg_fixture(
++                DrRole.RECOVERY, DrRole.PRIMARY, self.local_nodes
++            ),
++        )
++        self.env_assist.assert_raise_library_error(
++            lambda: dr.set_recovery_site(self.env_assist.get_env(), self.node),
++        )
++        self.env_assist.assert_reports(
++             self.expected_reports + [
++                fixture.error(
++                    report_codes.FILE_DISTRIBUTION_ERROR,
++                    file_description=DR_CFG_DESC,
++                    reason=REASON,
++                    node=node,
++                ) for node in self.failed_nodes
++            ]
++        )
++
++    def test_network_failure(self):
++        self.config.http.files.put_files(
++            communication_list=self.success_communication + [
++                dict(
++                    label=node,
++                    was_connected=False,
++                    error_msg=REASON,
++                ) for node in self.failed_nodes
++            ],
++            pcs_disaster_recovery_conf=dr_cfg_fixture(
++                DrRole.RECOVERY, DrRole.PRIMARY, self.local_nodes
++            ),
++        )
++        self.env_assist.assert_raise_library_error(
++            lambda: dr.set_recovery_site(self.env_assist.get_env(), self.node),
++        )
++        self.env_assist.assert_reports(
++             self.expected_reports + [
++                fixture.error(
++                    report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
++                    command="remote/put_file",
++                    reason=REASON,
++                    node=node,
++                ) for node in self.failed_nodes
++            ]
++        )
++
++    def test_communication_error(self):
++        self.config.http.files.put_files(
++            communication_list=self.success_communication + [
++                dict(
++                    label=node,
++                    response_code=400,
++                    output=REASON,
++                ) for node in self.failed_nodes
++            ],
++            pcs_disaster_recovery_conf=dr_cfg_fixture(
++                DrRole.RECOVERY, DrRole.PRIMARY, self.local_nodes
++            ),
++        )
++        self.env_assist.assert_raise_library_error(
++            lambda: dr.set_recovery_site(self.env_assist.get_env(), self.node),
++        )
++        self.env_assist.assert_reports(
++             self.expected_reports + [
++                fixture.error(
++                    report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
++                    command="remote/put_file",
++                    reason=REASON,
++                    node=node,
++                ) for node in self.failed_nodes
++            ]
++        )
++
++
++class FailureLocalDrCfgDistribution(TestCase):
++    # pylint: disable=too-many-instance-attributes
++    def setUp(self):
++        self.env_assist, self.config = get_env_tools(self)
++        local_nodes = generate_nodes(4)
++        self.remote_nodes = generate_nodes(3, prefix="recovery-")
++        self.node = self.remote_nodes[0]
++        self.failed_nodes = local_nodes[-1:]
++        successful_nodes = local_nodes[:-1]
++
++        self.config.env.set_known_nodes(local_nodes + self.remote_nodes)
++        self.config.raw_file.exists(
++            file_type_codes.PCS_DR_CONFIG,
++            settings.pcsd_dr_config_location,
++            exists=False,
++        )
++        self.config.corosync_conf.load_content(
++            corosync_conf_fixture(local_nodes)
++        )
++        self.config.http.corosync.get_corosync_conf(
++            corosync_conf_fixture(self.remote_nodes), node_labels=[self.node]
++        )
++        self.config.http.files.put_files(
++            node_labels=self.remote_nodes,
++            pcs_disaster_recovery_conf=dr_cfg_fixture(
++                DrRole.RECOVERY, DrRole.PRIMARY, local_nodes
++            ),
++            name="distribute_remote",
++        )
++
++        self.success_communication = [
++            dict(label=node) for node in successful_nodes
++        ]
++        self.expected_reports = [
++            fixture.info(
++                report_codes.FILES_DISTRIBUTION_STARTED,
++                file_list=[DR_CFG_DESC],
++                node_list=self.remote_nodes,
++            )
++        ] + [
++            fixture.info(
++                report_codes.FILE_DISTRIBUTION_SUCCESS,
++                file_description=DR_CFG_DESC,
++                node=node,
++            ) for node in self.remote_nodes
++        ] + [
++            fixture.info(
++                report_codes.FILES_DISTRIBUTION_STARTED,
++                file_list=[DR_CFG_DESC],
++                node_list=local_nodes,
++            )
++        ] + [
++            fixture.info(
++                report_codes.FILE_DISTRIBUTION_SUCCESS,
++                file_description=DR_CFG_DESC,
++                node=node,
++            ) for node in successful_nodes
++        ]
++
++    def test_write_failure(self):
++        self.config.http.files.put_files(
++            communication_list=self.success_communication + [
++                dict(
++                    label=node,
++                    output=json.dumps(dict(files={
++                        DR_CFG_DESC: dict(
++                            code="unexpected",
++                            message=REASON
++                        ),
++                    }))
++                ) for node in self.failed_nodes
++            ],
++            pcs_disaster_recovery_conf=dr_cfg_fixture(
++                DrRole.PRIMARY, DrRole.RECOVERY, self.remote_nodes
++            ),
++        )
++        self.env_assist.assert_raise_library_error(
++            lambda: dr.set_recovery_site(self.env_assist.get_env(), self.node),
++        )
++        self.env_assist.assert_reports(
++             self.expected_reports + [
++                fixture.error(
++                    report_codes.FILE_DISTRIBUTION_ERROR,
++                    file_description=DR_CFG_DESC,
++                    reason=REASON,
++                    node=node,
++                ) for node in self.failed_nodes
++            ]
++        )
++
++    def test_network_failure(self):
++        self.config.http.files.put_files(
++            communication_list=self.success_communication + [
++                dict(
++                    label=node,
++                    was_connected=False,
++                    error_msg=REASON,
++                ) for node in self.failed_nodes
++            ],
++            pcs_disaster_recovery_conf=dr_cfg_fixture(
++                DrRole.PRIMARY, DrRole.RECOVERY, self.remote_nodes
++            ),
++        )
++        self.env_assist.assert_raise_library_error(
++            lambda: dr.set_recovery_site(self.env_assist.get_env(), self.node),
++        )
++        self.env_assist.assert_reports(
++             self.expected_reports + [
++                fixture.error(
++                    report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
++                    command="remote/put_file",
++                    reason=REASON,
++                    node=node,
++                ) for node in self.failed_nodes
++            ]
++        )
++
++    def test_communication_error(self):
++        self.config.http.files.put_files(
++            communication_list=self.success_communication + [
++                dict(
++                    label=node,
++                    response_code=400,
++                    output=REASON,
++                ) for node in self.failed_nodes
++            ],
++            pcs_disaster_recovery_conf=dr_cfg_fixture(
++                DrRole.PRIMARY, DrRole.RECOVERY, self.remote_nodes
++            ),
++        )
++        self.env_assist.assert_raise_library_error(
++            lambda: dr.set_recovery_site(self.env_assist.get_env(), self.node),
++        )
++        self.env_assist.assert_reports(
++             self.expected_reports + [
++                fixture.error(
++                    report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
++                    command="remote/put_file",
++                    reason=REASON,
++                    node=node,
++                ) for node in self.failed_nodes
++            ]
++        )
+diff --git a/pcs_test/tier0/lib/commands/dr/test_status.py b/pcs_test/tier0/lib/commands/dr/test_status.py
+new file mode 100644
+index 00000000..b46eb757
+--- /dev/null
++++ b/pcs_test/tier0/lib/commands/dr/test_status.py
+@@ -0,0 +1,756 @@
++import json
++import re
++from unittest import TestCase
++
++from pcs import settings
++from pcs.common import (
++    file_type_codes,
++    report_codes,
++)
++from pcs.common.dr import DrRole
++from pcs.common.file import RawFileError
++from pcs.lib.commands import dr
++
++from pcs_test.tools.command_env import get_env_tools
++from pcs_test.tools import fixture
++
++
++REASON = "error msg"
++
++class CheckLive(TestCase):
++    def setUp(self):
++        self.env_assist, self.config = get_env_tools(self)
++
++    def assert_live_required(self, forbidden_options):
++        self.env_assist.assert_raise_library_error(
++            lambda: dr.status_all_sites_plaintext(self.env_assist.get_env()),
++            [
++                fixture.error(
++                    report_codes.LIVE_ENVIRONMENT_REQUIRED,
++                    forbidden_options=forbidden_options
++                )
++            ],
++            expected_in_processor=False
++        )
++
++    def test_mock_corosync(self):
++        self.config.env.set_corosync_conf_data("corosync conf")
++        self.assert_live_required([file_type_codes.COROSYNC_CONF])
++
++    def test_mock_cib(self):
++        self.config.env.set_cib_data("<cib />")
++        self.assert_live_required([file_type_codes.CIB])
++
++    def test_mock(self):
++        self.config.env.set_corosync_conf_data("corosync conf")
++        self.config.env.set_cib_data("<cib />")
++        self.assert_live_required([
++            file_type_codes.CIB,
++            file_type_codes.COROSYNC_CONF,
++        ])
++
++class FixtureMixin():
++    def _set_up(self, local_node_count=2):
++        self.local_node_name_list = [
++            f"node{i}" for i in range(1, local_node_count + 1)
++        ]
++        self.remote_node_name_list = ["recovery-node"]
++        self.config.env.set_known_nodes(
++            self.local_node_name_list + self.remote_node_name_list
++        )
++        self.local_status = "local cluster\nstatus\n"
++        self.remote_status = "remote cluster\nstatus\n"
++
++    def _fixture_load_configs(self):
++        (self.config
++            .raw_file.exists(
++                file_type_codes.PCS_DR_CONFIG,
++                settings.pcsd_dr_config_location,
++            )
++            .raw_file.read(
++                file_type_codes.PCS_DR_CONFIG,
++                settings.pcsd_dr_config_location,
++                content="""
++                    {
++                        "local": {
++                            "role": "PRIMARY"
++                        },
++                        "remote_sites": [
++                            {
++                                "nodes": [
++                                    {
++                                        "name": "recovery-node"
++                                    }
++                                ],
++                                "role": "RECOVERY"
++                            }
++                        ]
++                    }
++                """,
++            )
++            .corosync_conf.load(node_name_list=self.local_node_name_list)
++        )
++
++    def _fixture_result(self, local_success=True, remote_success=True):
++        return [
++            {
++                "local_site": True,
++                "site_role": DrRole.PRIMARY,
++                "status_plaintext": self.local_status if local_success else "",
++                "status_successfully_obtained": local_success,
++            },
++            {
++                "local_site": False,
++                "site_role": DrRole.RECOVERY,
++                "status_plaintext": (
++                    self.remote_status if remote_success else ""
++                ),
++                "status_successfully_obtained": remote_success,
++            }
++        ]
++
++class Success(FixtureMixin, TestCase):
++    def setUp(self):
++        self.env_assist, self.config = get_env_tools(self)
++        self._set_up()
++
++    def _assert_success(self, hide_inactive_resources, verbose):
++        self._fixture_load_configs()
++        (self.config
++            .http.status.get_full_cluster_status_plaintext(
++                name="http.status.get_full_cluster_status_plaintext.local",
++                node_labels=self.local_node_name_list[:1],
++                hide_inactive_resources=hide_inactive_resources,
++                verbose=verbose,
++                cluster_status_plaintext=self.local_status,
++            )
++            .http.status.get_full_cluster_status_plaintext(
++                name="http.status.get_full_cluster_status_plaintext.remote",
++                node_labels=self.remote_node_name_list[:1],
++                hide_inactive_resources=hide_inactive_resources,
++                verbose=verbose,
++                cluster_status_plaintext=self.remote_status,
++            )
++        )
++        result = dr.status_all_sites_plaintext(
++            self.env_assist.get_env(),
++            hide_inactive_resources=hide_inactive_resources,
++            verbose=verbose,
++        )
++        self.assertEqual(result, self._fixture_result())
++
++    def test_success_minimal(self):
++        self._assert_success(False, False)
++
++    def test_success_full(self):
++        self._assert_success(False, True)
++
++    def test_success_hide_inactive(self):
++        self._assert_success(True, False)
++
++    def test_success_all_flags(self):
++        self._assert_success(True, True)
++
++    def test_local_not_running_first_node(self):
++        self._fixture_load_configs()
++        (self.config
++            .http.status.get_full_cluster_status_plaintext(
++                name="http.status.get_full_cluster_status_plaintext.local",
++                cluster_status_plaintext=self.local_status,
++                communication_list=[
++                    [dict(
++                        label=self.local_node_name_list[0],
++                        output=json.dumps(dict(
++                            status="error",
++                            status_msg="",
++                            data=None,
++                            report_list=[
++                                {
++                                    "severity": "ERROR",
++                                    "code": "CRM_MON_ERROR",
++                                    "info": {
++                                        "reason": REASON,
++                                    },
++                                    "forceable": None,
++                                    "report_text": "translated report",
++                                }
++                            ]
++                        )),
++                    )],
++                    [dict(
++                        label=self.local_node_name_list[1],
++                    )],
++                ]
++            )
++            .http.status.get_full_cluster_status_plaintext(
++                name="http.status.get_full_cluster_status_plaintext.remote",
++                node_labels=self.remote_node_name_list[:1],
++                cluster_status_plaintext=self.remote_status,
++            )
++        )
++        result = dr.status_all_sites_plaintext(self.env_assist.get_env())
++        self.assertEqual(result, self._fixture_result())
++        self.env_assist.assert_reports([
++            fixture.error(
++                report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
++                node=self.local_node_name_list[0],
++                command="remote/cluster_status_plaintext",
++                reason="translated report",
++            ),
++        ])
++
++    def test_local_not_running(self):
++        self._fixture_load_configs()
++        (self.config
++            .http.status.get_full_cluster_status_plaintext(
++                name="http.status.get_full_cluster_status_plaintext.local",
++                cmd_status="error",
++                cmd_status_msg="",
++                cluster_status_plaintext="",
++                report_list=[
++                    {
++                        "severity": "ERROR",
++                        "code": "CRM_MON_ERROR",
++                        "info": {
++                            "reason": REASON,
++                        },
++                        "forceable": None,
++                        "report_text": "translated report",
++                    }
++                ],
++                communication_list=[
++                    [dict(
++                        label=self.local_node_name_list[0],
++                    )],
++                    [dict(
++                        label=self.local_node_name_list[1],
++                    )],
++                ]
++            )
++            .http.status.get_full_cluster_status_plaintext(
++                name="http.status.get_full_cluster_status_plaintext.remote",
++                node_labels=self.remote_node_name_list[:1],
++                cluster_status_plaintext=self.remote_status,
++            )
++        )
++        result = dr.status_all_sites_plaintext(self.env_assist.get_env())
++        self.assertEqual(result, self._fixture_result(local_success=False))
++        self.env_assist.assert_reports(
++            [
++                fixture.error(
++                    report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
++                    node=node,
++                    command="remote/cluster_status_plaintext",
++                    reason="translated report",
++                )
++                for node in self.local_node_name_list
++            ]
++        )
++
++    def test_remote_not_running(self):
++        self._fixture_load_configs()
++        (self.config
++            .http.status.get_full_cluster_status_plaintext(
++                name="http.status.get_full_cluster_status_plaintext.local",
++                node_labels=self.local_node_name_list[:1],
++                cluster_status_plaintext=self.local_status,
++            )
++            .http.status.get_full_cluster_status_plaintext(
++                name="http.status.get_full_cluster_status_plaintext.remote",
++                node_labels=self.remote_node_name_list[:1],
++                cmd_status="error",
++                cmd_status_msg="",
++                cluster_status_plaintext="",
++                report_list=[
++                    {
++                        "severity": "ERROR",
++                        "code": "CRM_MON_ERROR",
++                        "info": {
++                            "reason": REASON,
++                        },
++                        "forceable": None,
++                        "report_text": "translated report",
++                    }
++                ],
++            )
++        )
++        result = dr.status_all_sites_plaintext(self.env_assist.get_env())
++        self.assertEqual(result, self._fixture_result(remote_success=False))
++        self.env_assist.assert_reports(
++            [
++                fixture.error(
++                    report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
++                    node=node,
++                    command="remote/cluster_status_plaintext",
++                    reason="translated report",
++                )
++                for node in self.remote_node_name_list
++            ]
++        )
++
++    def test_both_not_running(self):
++        self._fixture_load_configs()
++        (self.config
++            .http.status.get_full_cluster_status_plaintext(
++                name="http.status.get_full_cluster_status_plaintext.local",
++                cmd_status="error",
++                cmd_status_msg="",
++                cluster_status_plaintext="",
++                report_list=[
++                    {
++                        "severity": "ERROR",
++                        "code": "CRM_MON_ERROR",
++                        "info": {
++                            "reason": REASON,
++                        },
++                        "forceable": None,
++                        "report_text": "translated report",
++                    }
++                ],
++                communication_list=[
++                    [dict(
++                        label=self.local_node_name_list[0],
++                    )],
++                    [dict(
++                        label=self.local_node_name_list[1],
++                    )],
++                ]
++            )
++            .http.status.get_full_cluster_status_plaintext(
++                name="http.status.get_full_cluster_status_plaintext.remote",
++                node_labels=self.remote_node_name_list[:1],
++                cmd_status="error",
++                cmd_status_msg="",
++                cluster_status_plaintext="",
++                report_list=[
++                    {
++                        "severity": "ERROR",
++                        "code": "CRM_MON_ERROR",
++                        "info": {
++                            "reason": REASON,
++                        },
++                        "forceable": None,
++                        "report_text": "translated report",
++                    }
++                ],
++            )
++        )
++        result = dr.status_all_sites_plaintext(self.env_assist.get_env())
++        self.assertEqual(result, self._fixture_result(
++            local_success=False, remote_success=False
++        ))
++        self.env_assist.assert_reports(
++            [
++                fixture.error(
++                    report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
++                    node=node,
++                    command="remote/cluster_status_plaintext",
++                    reason="translated report",
++                )
++                for node in (
++                    self.local_node_name_list + self.remote_node_name_list
++                )
++            ]
++        )
++
++
++class CommunicationIssue(FixtureMixin, TestCase):
++    def setUp(self):
++        self.env_assist, self.config = get_env_tools(self)
++        self._set_up()
++
++    def test_unknown_node(self):
++        self.config.env.set_known_nodes(
++            self.local_node_name_list[1:] + self.remote_node_name_list
++        )
++        self._fixture_load_configs()
++        (self.config
++            .http.status.get_full_cluster_status_plaintext(
++                name="http.status.get_full_cluster_status_plaintext.local",
++                node_labels=self.local_node_name_list[1:],
++                cluster_status_plaintext=self.local_status,
++            )
++            .http.status.get_full_cluster_status_plaintext(
++                name="http.status.get_full_cluster_status_plaintext.remote",
++                node_labels=self.remote_node_name_list[:1],
++                cluster_status_plaintext=self.remote_status,
++            )
++        )
++        result = dr.status_all_sites_plaintext(self.env_assist.get_env())
++        self.assertEqual(result, self._fixture_result())
++        self.env_assist.assert_reports([
++            fixture.warn(
++                report_codes.HOST_NOT_FOUND,
++                host_list=["node1"],
++            ),
++        ])
++
++    def test_unknown_all_nodes_in_site(self):
++        self.config.env.set_known_nodes(
++            self.local_node_name_list
++        )
++        self._fixture_load_configs()
++        self.env_assist.assert_raise_library_error(
++            lambda: dr.status_all_sites_plaintext(self.env_assist.get_env()),
++        )
++        self.env_assist.assert_reports([
++            fixture.warn(
++                report_codes.HOST_NOT_FOUND,
++                host_list=self.remote_node_name_list,
++            ),
++            fixture.error(
++                report_codes.NONE_HOST_FOUND,
++            ),
++        ])
++
++    def test_missing_node_names(self):
++        self._fixture_load_configs()
++        coro_call = self.config.calls.get("corosync_conf.load")
++        (self.config
++            .http.status.get_full_cluster_status_plaintext(
++                name="http.status.get_full_cluster_status_plaintext.local",
++                node_labels=[],
++            )
++            .http.status.get_full_cluster_status_plaintext(
++                name="http.status.get_full_cluster_status_plaintext.remote",
++                node_labels=self.remote_node_name_list[:1],
++                cluster_status_plaintext=self.remote_status,
++            )
++        )
++        coro_call.content = re.sub(r"name: node\d", "", coro_call.content)
++        result = dr.status_all_sites_plaintext(self.env_assist.get_env())
++        self.assertEqual(result, self._fixture_result(local_success=False))
++        self.env_assist.assert_reports([
++            fixture.warn(
++                report_codes.COROSYNC_CONFIG_MISSING_NAMES_OF_NODES,
++                fatal=False,
++            ),
++        ])
++
++    def test_node_issues(self):
++        self._set_up(local_node_count=7)
++        self._fixture_load_configs()
++        (self.config
++            .http.status.get_full_cluster_status_plaintext(
++                name="http.status.get_full_cluster_status_plaintext.local",
++                cluster_status_plaintext=self.local_status,
++                communication_list=[
++                    [dict(
++                        label=self.local_node_name_list[0],
++                        was_connected=False,
++                    )],
++                    [dict(
++                        label=self.local_node_name_list[1],
++                        response_code=401,
++                    )],
++                    [dict(
++                        label=self.local_node_name_list[2],
++                        response_code=500,
++                    )],
++                    [dict(
++                        label=self.local_node_name_list[3],
++                        response_code=404,
++                    )],
++                    [dict(
++                        label=self.local_node_name_list[4],
++                        output="invalid data",
++                    )],
++                    [dict(
++                        label=self.local_node_name_list[5],
++                        output=json.dumps(dict(status="success"))
++                    )],
++                    [dict(
++                        label=self.local_node_name_list[6],
++                    )],
++                ]
++            )
++            .http.status.get_full_cluster_status_plaintext(
++                name="http.status.get_full_cluster_status_plaintext.remote",
++                node_labels=self.remote_node_name_list[:1],
++                cluster_status_plaintext=self.remote_status,
++            )
++        )
++        result = dr.status_all_sites_plaintext(self.env_assist.get_env())
++        self.assertEqual(result, self._fixture_result())
++        self.env_assist.assert_reports([
++            fixture.warn(
++                report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
++                command="remote/cluster_status_plaintext",
++                node="node1",
++                reason=None,
++            ),
++            fixture.warn(
++                report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED,
++                command="remote/cluster_status_plaintext",
++                node="node2",
++                reason="HTTP error: 401",
++            ),
++            fixture.warn(
++                report_codes.NODE_COMMUNICATION_ERROR,
++                command="remote/cluster_status_plaintext",
++                node="node3",
++                reason="HTTP error: 500",
++            ),
++            fixture.warn(
++                report_codes.NODE_COMMUNICATION_ERROR_UNSUPPORTED_COMMAND,
++                command="remote/cluster_status_plaintext",
++                node="node4",
++                reason="HTTP error: 404",
++            ),
++            fixture.warn(
++                report_codes.INVALID_RESPONSE_FORMAT,
++                node="node5",
++            ),
++            fixture.warn(
++                report_codes.INVALID_RESPONSE_FORMAT,
++                node="node6",
++            ),
++        ])
++
++    def test_local_site_down(self):
++        self._fixture_load_configs()
++        (self.config
++            .http.status.get_full_cluster_status_plaintext(
++                name="http.status.get_full_cluster_status_plaintext.local",
++                cluster_status_plaintext=self.local_status,
++                communication_list=[
++                    [dict(
++                        label=self.local_node_name_list[0],
++                        was_connected=False,
++                    )],
++                    [dict(
++                        label=self.local_node_name_list[1],
++                        was_connected=False,
++                    )],
++                ]
++            )
++            .http.status.get_full_cluster_status_plaintext(
++                name="http.status.get_full_cluster_status_plaintext.remote",
++                node_labels=self.remote_node_name_list[:1],
++                cluster_status_plaintext=self.remote_status,
++            )
++        )
++        result = dr.status_all_sites_plaintext(self.env_assist.get_env())
++        self.assertEqual(result, self._fixture_result(local_success=False))
++        self.env_assist.assert_reports([
++            fixture.warn(
++                report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
++                command="remote/cluster_status_plaintext",
++                node="node1",
++                reason=None,
++            ),
++            fixture.warn(
++                report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
++                command="remote/cluster_status_plaintext",
++                node="node2",
++                reason=None,
++            ),
++        ])
++
++    def test_remote_site_down(self):
++        self._fixture_load_configs()
++        (self.config
++            .http.status.get_full_cluster_status_plaintext(
++                name="http.status.get_full_cluster_status_plaintext.local",
++                node_labels=self.local_node_name_list[:1],
++                cluster_status_plaintext=self.local_status,
++            )
++            .http.status.get_full_cluster_status_plaintext(
++                name="http.status.get_full_cluster_status_plaintext.remote",
++                cluster_status_plaintext=self.remote_status,
++                communication_list=[
++                    [dict(
++                        label=self.remote_node_name_list[0],
++                        was_connected=False,
++                    )],
++                ]
++            )
++        )
++        result = dr.status_all_sites_plaintext(self.env_assist.get_env())
++        self.assertEqual(result, self._fixture_result(remote_success=False))
++        self.env_assist.assert_reports([
++            fixture.warn(
++                report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
++                command="remote/cluster_status_plaintext",
++                node="recovery-node",
++                reason=None,
++            ),
++        ])
++
++    def test_both_sites_down(self):
++        self._fixture_load_configs()
++        (self.config
++            .http.status.get_full_cluster_status_plaintext(
++                name="http.status.get_full_cluster_status_plaintext.local",
++                cluster_status_plaintext=self.local_status,
++                communication_list=[
++                    [dict(
++                        label=self.local_node_name_list[0],
++                        was_connected=False,
++                    )],
++                    [dict(
++                        label=self.local_node_name_list[1],
++                        was_connected=False,
++                    )],
++                ]
++            )
++            .http.status.get_full_cluster_status_plaintext(
++                name="http.status.get_full_cluster_status_plaintext.remote",
++                cluster_status_plaintext=self.remote_status,
++                communication_list=[
++                    [dict(
++                        label=self.remote_node_name_list[0],
++                        was_connected=False,
++                    )],
++                ]
++            )
++        )
++        result = dr.status_all_sites_plaintext(self.env_assist.get_env())
++        self.assertEqual(
++            result,
++            self._fixture_result(local_success=False, remote_success=False)
++        )
++        self.env_assist.assert_reports([
++            fixture.warn(
++                report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
++                command="remote/cluster_status_plaintext",
++                node="node1",
++                reason=None,
++            ),
++            fixture.warn(
++                report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
++                command="remote/cluster_status_plaintext",
++                node="node2",
++                reason=None,
++            ),
++            fixture.warn(
++                report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
++                command="remote/cluster_status_plaintext",
++                node="recovery-node",
++                reason=None,
++            ),
++        ])
++
++
++class FatalConfigIssue(TestCase):
++    def setUp(self):
++        self.env_assist, self.config = get_env_tools(self)
++
++    def test_config_missing(self):
++        (self.config
++            .raw_file.exists(
++                file_type_codes.PCS_DR_CONFIG,
++                settings.pcsd_dr_config_location,
++                exists=False,
++            )
++        )
++        self.env_assist.assert_raise_library_error(
++            lambda: dr.status_all_sites_plaintext(self.env_assist.get_env()),
++        )
++        self.env_assist.assert_reports([
++            fixture.error(
++                report_codes.DR_CONFIG_DOES_NOT_EXIST,
++            ),
++        ])
++
++    def test_config_read_error(self):
++        (self.config
++            .raw_file.exists(
++                file_type_codes.PCS_DR_CONFIG,
++                settings.pcsd_dr_config_location,
++            )
++            .raw_file.read(
++                file_type_codes.PCS_DR_CONFIG,
++                settings.pcsd_dr_config_location,
++                exception_msg=REASON,
++            )
++        )
++        self.env_assist.assert_raise_library_error(
++            lambda: dr.status_all_sites_plaintext(self.env_assist.get_env()),
++        )
++        self.env_assist.assert_reports([
++            fixture.error(
++                report_codes.FILE_IO_ERROR,
++                file_type_code=file_type_codes.PCS_DR_CONFIG,
++                file_path=settings.pcsd_dr_config_location,
++                operation=RawFileError.ACTION_READ,
++                reason=REASON,
++            ),
++        ])
++
++    def test_config_parse_error(self):
++        (self.config
++            .raw_file.exists(
++                file_type_codes.PCS_DR_CONFIG,
++                settings.pcsd_dr_config_location,
++            )
++            .raw_file.read(
++                file_type_codes.PCS_DR_CONFIG,
++                settings.pcsd_dr_config_location,
++                content="bad content",
++            )
++        )
++        self.env_assist.assert_raise_library_error(
++            lambda: dr.status_all_sites_plaintext(self.env_assist.get_env()),
++        )
++        self.env_assist.assert_reports([
++            fixture.error(
++                report_codes.PARSE_ERROR_JSON_FILE,
++                file_type_code=file_type_codes.PCS_DR_CONFIG,
++                file_path=settings.pcsd_dr_config_location,
++                line_number=1,
++                column_number=1,
++                position=0,
++                reason="Expecting value",
++                full_msg="Expecting value: line 1 column 1 (char 0)",
++            ),
++        ])
++
++    def test_corosync_conf_read_error(self):
++        (self.config
++            .raw_file.exists(
++                file_type_codes.PCS_DR_CONFIG,
++                settings.pcsd_dr_config_location,
++            )
++            .raw_file.read(
++                file_type_codes.PCS_DR_CONFIG,
++                settings.pcsd_dr_config_location,
++                content="{}",
++            )
++            .corosync_conf.load_content("", exception_msg=REASON)
++        )
++        self.env_assist.assert_raise_library_error(
++            lambda: dr.status_all_sites_plaintext(self.env_assist.get_env()),
++            [
++                fixture.error(
++                    report_codes.UNABLE_TO_READ_COROSYNC_CONFIG,
++                    path=settings.corosync_conf_file,
++                    reason=REASON,
++                ),
++            ],
++            expected_in_processor=False
++        )
++
++    def test_corosync_conf_parse_error(self):
++        (self.config
++            .raw_file.exists(
++                file_type_codes.PCS_DR_CONFIG,
++                settings.pcsd_dr_config_location,
++            )
++            .raw_file.read(
++                file_type_codes.PCS_DR_CONFIG,
++                settings.pcsd_dr_config_location,
++                content="{}",
++            )
++            .corosync_conf.load_content("wrong {\n  corosync")
++        )
++        self.env_assist.assert_raise_library_error(
++            lambda: dr.status_all_sites_plaintext(self.env_assist.get_env()),
++            [
++                fixture.error(
++                    report_codes
++                    .PARSE_ERROR_COROSYNC_CONF_LINE_IS_NOT_SECTION_NOR_KEY_VALUE
++                ),
++            ],
++            expected_in_processor=False
++        )
+diff --git a/pcs_test/tier0/lib/communication/test_status.py b/pcs_test/tier0/lib/communication/test_status.py
+new file mode 100644
+index 00000000..b8db7a73
+--- /dev/null
++++ b/pcs_test/tier0/lib/communication/test_status.py
+@@ -0,0 +1,7 @@
++from unittest import TestCase
++
++class GetFullClusterStatusPlaintext(TestCase):
++    """
++    tested in:
++        pcs_test.tier0.lib.commands.dr.test_status
++    """
+diff --git a/pcs_test/tier0/lib/dr/__init__.py b/pcs_test/tier0/lib/dr/__init__.py
+new file mode 100644
+index 00000000..e69de29b
+diff --git a/pcs_test/tier0/lib/dr/test_facade.py b/pcs_test/tier0/lib/dr/test_facade.py
+new file mode 100644
+index 00000000..baa17b1e
+--- /dev/null
++++ b/pcs_test/tier0/lib/dr/test_facade.py
+@@ -0,0 +1,138 @@
++from unittest import TestCase
++
++from pcs.common.dr import DrRole
++from pcs.lib.dr.config import facade
++
++
++class Facade(TestCase):
++    def test_create(self):
++        for role in DrRole:
++            with self.subTest(local_role=role.value):
++                self.assertEqual(
++                    dict(
++                        local=dict(
++                            role=role.value,
++                        ),
++                        remote_sites=[],
++                    ),
++                    facade.Facade.create(role).config,
++                )
++
++    def test_local_role(self):
++        for role in DrRole:
++            with self.subTest(local_role=role.value):
++                cfg = facade.Facade({
++                    "local": {
++                        "role": role.value,
++                    },
++                    "remote_sites": [
++                    ],
++                })
++                self.assertEqual(cfg.local_role, role)
++
++    def test_add_site(self):
++        node_list = [f"node{i}" for i in range(4)]
++        cfg = facade.Facade.create(DrRole.PRIMARY)
++        cfg.add_site(DrRole.RECOVERY, node_list)
++        self.assertEqual(
++            dict(
++                local=dict(
++                    role=DrRole.PRIMARY.value,
++                ),
++                remote_sites=[
++                    dict(
++                        role=DrRole.RECOVERY.value,
++                        nodes=[dict(name=node) for node in node_list],
++                    ),
++                ]
++            ),
++            cfg.config
++        )
++
++class GetRemoteSiteList(TestCase):
++    def test_no_sites(self):
++        cfg = facade.Facade({
++            "local": {
++                "role": DrRole.PRIMARY.value,
++            },
++            "remote_sites": [
++            ],
++        })
++        self.assertEqual(
++            cfg.get_remote_site_list(),
++            []
++        )
++
++    def test_one_site(self):
++        cfg = facade.Facade({
++            "local": {
++                "role": DrRole.PRIMARY.value,
++            },
++            "remote_sites": [
++                {
++                    "role": DrRole.RECOVERY.value,
++                    "nodes": [
++                        {"name": "node1"},
++                    ],
++                },
++            ],
++        })
++        self.assertEqual(
++            cfg.get_remote_site_list(),
++            [
++                facade.DrSite(role=DrRole.RECOVERY, node_name_list=["node1"]),
++            ]
++        )
++
++    def test_more_sites(self):
++        cfg = facade.Facade({
++            "local": {
++                "role": DrRole.RECOVERY.value,
++            },
++            "remote_sites": [
++                {
++                    "role": DrRole.PRIMARY.value,
++                    "nodes": [
++                        {"name": "nodeA1"},
++                        {"name": "nodeA2"},
++                    ],
++                },
++                {
++                    "role": DrRole.RECOVERY.value,
++                    "nodes": [
++                        {"name": "nodeB1"},
++                        {"name": "nodeB2"},
++                    ],
++                },
++            ],
++        })
++        self.assertEqual(
++            cfg.get_remote_site_list(),
++            [
++                facade.DrSite(
++                    role=DrRole.PRIMARY, node_name_list=["nodeA1", "nodeA2"]
++                ),
++                facade.DrSite(
++                    role=DrRole.RECOVERY, node_name_list=["nodeB1", "nodeB2"]
++                ),
++            ]
++        )
++
++    def test_no_nodes(self):
++        cfg = facade.Facade({
++            "local": {
++                "role": DrRole.PRIMARY.value,
++            },
++            "remote_sites": [
++                {
++                    "role": DrRole.RECOVERY.value,
++                    "nodes": [],
++                },
++            ],
++        })
++        self.assertEqual(
++            cfg.get_remote_site_list(),
++            [
++                facade.DrSite(role=DrRole.RECOVERY, node_name_list=[]),
++            ]
++        )
+diff --git a/pcs_test/tier0/lib/test_env.py b/pcs_test/tier0/lib/test_env.py
+index edab9dc6..5c1c6a39 100644
+--- a/pcs_test/tier0/lib/test_env.py
++++ b/pcs_test/tier0/lib/test_env.py
+@@ -9,7 +9,7 @@ from pcs_test.tools.misc import (
+     get_test_resource as rc,
+ )
+ 
+-from pcs.common import report_codes
++from pcs.common import file_type_codes, report_codes
+ from pcs.lib.env import LibraryEnvironment
+ from pcs.lib.errors import ReportItemSeverity as severity
+ 
+@@ -57,6 +57,46 @@ class LibraryEnvironmentTest(TestCase):
+         env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+         self.assertEqual([], env.user_groups)
+ 
++class GhostFileCodes(TestCase):
++    def setUp(self):
++        self.mock_logger = mock.MagicMock(logging.Logger)
++        self.mock_reporter = MockLibraryReportProcessor()
++
++    def _fixture_get_env(self, cib_data=None, corosync_conf_data=None):
++        return LibraryEnvironment(
++            self.mock_logger,
++            self.mock_reporter,
++            cib_data=cib_data,
++            corosync_conf_data=corosync_conf_data
++        )
++
++    def test_nothing(self):
++        self.assertEqual(
++            self._fixture_get_env().ghost_file_codes,
++            set()
++        )
++
++    def test_corosync(self):
++        self.assertEqual(
++            self._fixture_get_env(corosync_conf_data="x").ghost_file_codes,
++            set([file_type_codes.COROSYNC_CONF])
++        )
++
++    def test_cib(self):
++        self.assertEqual(
++            self._fixture_get_env(cib_data="x").ghost_file_codes,
++            set([file_type_codes.CIB])
++        )
++
++    def test_all(self):
++        self.assertEqual(
++            self._fixture_get_env(
++                cib_data="x",
++                corosync_conf_data="x",
++            ).ghost_file_codes,
++            set([file_type_codes.COROSYNC_CONF, file_type_codes.CIB])
++        )
++
+ @patch_env("CommandRunner")
+ class CmdRunner(TestCase):
+     def setUp(self):
+diff --git a/pcs_test/tools/command_env/config_corosync_conf.py b/pcs_test/tools/command_env/config_corosync_conf.py
+index 3db57cee..a0bd9f33 100644
+--- a/pcs_test/tools/command_env/config_corosync_conf.py
++++ b/pcs_test/tools/command_env/config_corosync_conf.py
+@@ -9,9 +9,14 @@ class CorosyncConf:
+         self.__calls = call_collection
+ 
+     def load_content(
+-        self, content, name="corosync_conf.load_content", instead=None
++        self, content, name="corosync_conf.load_content", instead=None,
++        exception_msg=None
+     ):
+-        self.__calls.place(name, Call(content), instead=instead)
++        self.__calls.place(
++            name,
++            Call(content, exception_msg=exception_msg),
++            instead=instead
++        )
+ 
+     def load(
+         self, node_name_list=None, name="corosync_conf.load",
+diff --git a/pcs_test/tools/command_env/config_http.py b/pcs_test/tools/command_env/config_http.py
+index 6827c2b1..911a82df 100644
+--- a/pcs_test/tools/command_env/config_http.py
++++ b/pcs_test/tools/command_env/config_http.py
+@@ -7,6 +7,7 @@ from pcs_test.tools.command_env.config_http_files import FilesShortcuts
+ from pcs_test.tools.command_env.config_http_host import HostShortcuts
+ from pcs_test.tools.command_env.config_http_pcmk import PcmkShortcuts
+ from pcs_test.tools.command_env.config_http_sbd import SbdShortcuts
++from pcs_test.tools.command_env.config_http_status import StatusShortcuts
+ from pcs_test.tools.command_env.mock_node_communicator import(
+     place_communication,
+     place_requests,
+@@ -34,6 +35,7 @@ def _mutual_exclusive(param_names, **kwargs):
+ 
+ 
+ class HttpConfig:
++    # pylint: disable=too-many-instance-attributes
+     def __init__(self, call_collection, wrap_helper):
+         self.__calls = call_collection
+ 
+@@ -43,6 +45,7 @@ class HttpConfig:
+         self.host = wrap_helper(HostShortcuts(self.__calls))
+         self.pcmk = wrap_helper(PcmkShortcuts(self.__calls))
+         self.sbd = wrap_helper(SbdShortcuts(self.__calls))
++        self.status = wrap_helper(StatusShortcuts(self.__calls))
+ 
+     def add_communication(self, name, communication_list, **kwargs):
+         """
+diff --git a/pcs_test/tools/command_env/config_http_corosync.py b/pcs_test/tools/command_env/config_http_corosync.py
+index f7df73c1..3d89e649 100644
+--- a/pcs_test/tools/command_env/config_http_corosync.py
++++ b/pcs_test/tools/command_env/config_http_corosync.py
+@@ -29,6 +29,30 @@ class CorosyncShortcuts:
+             output='{"corosync":false}'
+         )
+ 
++    def get_corosync_conf(
++        self,
++        corosync_conf="",
++        node_labels=None,
++        communication_list=None,
++        name="http.corosync.get_corosync_conf",
++    ):
++        """
++        Create a call for loading corosync.conf text from remote nodes
++
++        string corosync_conf -- corosync.conf text to be loaded
++        list node_labels -- create success responses from these nodes
++        list communication_list -- create custom responses
++        string name -- the key of this call
++        """
++        place_multinode_call(
++            self.__calls,
++            name,
++            node_labels,
++            communication_list,
++            action="remote/get_corosync_conf",
++            output=corosync_conf,
++        )
++
+     def set_corosync_conf(
+         self, corosync_conf, node_labels=None, communication_list=None,
+         name="http.corosync.set_corosync_conf"
+diff --git a/pcs_test/tools/command_env/config_http_files.py b/pcs_test/tools/command_env/config_http_files.py
+index 8cc9b878..b4e93d64 100644
+--- a/pcs_test/tools/command_env/config_http_files.py
++++ b/pcs_test/tools/command_env/config_http_files.py
+@@ -11,9 +11,11 @@ class FilesShortcuts:
+ 
+     def put_files(
+         self, node_labels=None, pcmk_authkey=None, corosync_authkey=None,
+-        corosync_conf=None, pcs_settings_conf=None, communication_list=None,
++        corosync_conf=None, pcs_disaster_recovery_conf=None,
++        pcs_settings_conf=None, communication_list=None,
+         name="http.files.put_files",
+     ):
++        # pylint: disable=too-many-arguments
+         """
+         Create a call for the files distribution to the nodes.
+ 
+@@ -21,6 +23,7 @@ class FilesShortcuts:
+         pcmk_authkey bytes -- content of pacemaker authkey file
+         corosync_authkey bytes -- content of corosync authkey file
+         corosync_conf string -- content of corosync.conf
++        pcs_disaster_recovery_conf string -- content of pcs DR config
+         pcs_settings_conf string -- content of pcs_settings.conf
+         communication_list list -- create custom responses
+         name string -- the key of this call
+@@ -58,6 +61,17 @@ class FilesShortcuts:
+             )
+             output_data[file_id] = written_output_dict
+ 
++        if pcs_disaster_recovery_conf:
++            file_id = "disaster-recovery config"
++            input_data[file_id] = dict(
++                data=base64.b64encode(
++                    pcs_disaster_recovery_conf
++                ).decode("utf-8"),
++                type="pcs_disaster_recovery_conf",
++                rewrite_existing=True,
++            )
++            output_data[file_id] = written_output_dict
++
+         if pcs_settings_conf:
+             file_id = "pcs_settings.conf"
+             input_data[file_id] = dict(
+@@ -78,7 +92,8 @@ class FilesShortcuts:
+         )
+ 
+     def remove_files(
+-        self, node_labels=None, pcsd_settings=False, communication_list=None,
++        self, node_labels=None, pcsd_settings=False,
++        pcs_disaster_recovery_conf=False, communication_list=None,
+         name="http.files.remove_files"
+     ):
+         """
+@@ -86,6 +101,7 @@ class FilesShortcuts:
+ 
+         node_labels list -- create success responses from these nodes
+         pcsd_settings bool -- if True, remove file pcsd_settings
++        pcs_disaster_recovery_conf bool -- if True, remove pcs DR config
+         communication_list list -- create custom responses
+         name string -- the key of this call
+         """
+@@ -100,6 +116,14 @@ class FilesShortcuts:
+                 message="",
+             )
+ 
++        if pcs_disaster_recovery_conf:
++            file_id = "pcs disaster-recovery config"
++            input_data[file_id] = dict(type="pcs_disaster_recovery_conf")
++            output_data[file_id] = dict(
++                code="deleted",
++                message="",
++            )
++
+         place_multinode_call(
+             self.__calls,
+             name,
+diff --git a/pcs_test/tools/command_env/config_http_status.py b/pcs_test/tools/command_env/config_http_status.py
+new file mode 100644
+index 00000000..888b27bb
+--- /dev/null
++++ b/pcs_test/tools/command_env/config_http_status.py
+@@ -0,0 +1,52 @@
++import json
++
++from pcs_test.tools.command_env.mock_node_communicator import (
++    place_multinode_call,
++)
++
++class StatusShortcuts:
++    def __init__(self, calls):
++        self.__calls = calls
++
++    def get_full_cluster_status_plaintext(
++        self, node_labels=None, communication_list=None,
++        name="http.status.get_full_cluster_status_plaintext",
++        hide_inactive_resources=False, verbose=False,
++        cmd_status="success", cmd_status_msg="", report_list=None,
++        cluster_status_plaintext="",
++    ):
++        # pylint: disable=too-many-arguments
++        """
++        Create a call for getting cluster status in plaintext
++
++        node_labels list -- create success responses from these nodes
++        communication_list list -- create custom responses
++        name string -- the key of this call
++        bool hide_inactive_resources -- input flag
++        bool verbose -- input flag
++        string cmd_status -- did the command succeed?
++        string_cmd_status_msg -- details for cmd_status
++        iterable report_list -- reports from a remote node
++        string cluster_status_plaintext -- resulting cluster status
++        """
++        report_list = report_list or []
++        place_multinode_call(
++            self.__calls,
++            name,
++            node_labels,
++            communication_list,
++            action="remote/cluster_status_plaintext",
++            param_list=[(
++                "data_json",
++                json.dumps(dict(
++                    hide_inactive_resources=hide_inactive_resources,
++                    verbose=verbose,
++                ))
++            )],
++            output=json.dumps(dict(
++                status=cmd_status,
++                status_msg=cmd_status_msg,
++                data=cluster_status_plaintext,
++                report_list=report_list,
++            )),
++        )
+diff --git a/pcs_test/tools/command_env/mock_get_local_corosync_conf.py b/pcs_test/tools/command_env/mock_get_local_corosync_conf.py
+index 854cb8f0..01eca5f1 100644
+--- a/pcs_test/tools/command_env/mock_get_local_corosync_conf.py
++++ b/pcs_test/tools/command_env/mock_get_local_corosync_conf.py
+@@ -1,10 +1,15 @@
++from pcs import settings
++from pcs.lib import reports
++from pcs.lib.errors import LibraryError
++
+ CALL_TYPE_GET_LOCAL_COROSYNC_CONF = "CALL_TYPE_GET_LOCAL_COROSYNC_CONF"
+ 
+ class Call:
+     type = CALL_TYPE_GET_LOCAL_COROSYNC_CONF
+ 
+-    def __init__(self, content):
++    def __init__(self, content, exception_msg=None):
+         self.content = content
++        self.exception_msg = exception_msg
+ 
+     def __repr__(self):
+         return str("<GetLocalCorosyncConf>")
+@@ -13,5 +18,10 @@ class Call:
+ def get_get_local_corosync_conf(call_queue):
+     def get_local_corosync_conf():
+         _, expected_call = call_queue.take(CALL_TYPE_GET_LOCAL_COROSYNC_CONF)
++        if expected_call.exception_msg:
++            raise LibraryError(reports.corosync_config_read_error(
++                settings.corosync_conf_file,
++                expected_call.exception_msg,
++            ))
+         return expected_call.content
+     return get_local_corosync_conf
+diff --git a/pcsd/capabilities.xml b/pcsd/capabilities.xml
+index f9a76a22..1adb57ce 100644
+--- a/pcsd/capabilities.xml
++++ b/pcsd/capabilities.xml
+@@ -1696,6 +1696,18 @@
+ 
+ 
+ 
++    <capability id="pcs.disaster-recovery.essentials" in-pcs="1" in-pcsd="0">
++      <description>
++        Configure disaster-recovery with the local cluster as the primary site
++        and one recovery site. Display local disaster-recovery config. Display
++        status of all sites. Remove disaster-recovery config.
++
++        pcs commands: dr config, dr destroy, dr set-recovery-site, dr status
++      </description>
++    </capability>
++
++
++
+     <capability id="resource-agents.describe" in-pcs="1" in-pcsd="1">
+       <description>
+         Describe a resource agent - present its metadata.
+diff --git a/pcsd/pcsd_file.rb b/pcsd/pcsd_file.rb
+index 486b764d..d82b55d2 100644
+--- a/pcsd/pcsd_file.rb
++++ b/pcsd/pcsd_file.rb
+@@ -198,6 +198,20 @@ module PcsdFile
+     end
+   end
+ 
++  class PutPcsDrConf < PutFile
++    def full_file_name
++      @full_file_name ||= PCSD_DR_CONFIG_LOCATION
++    end
++
++    def binary?()
++      return true
++    end
++
++    def permissions()
++      return 0600
++    end
++  end
++
+   TYPES = {
+     "booth_authfile" => PutFileBoothAuthfile,
+     "booth_config" => PutFileBoothConfig,
+@@ -205,6 +219,7 @@ module PcsdFile
+     "corosync_authkey" => PutFileCorosyncAuthkey,
+     "corosync_conf" => PutFileCorosyncConf,
+     "pcs_settings_conf" => PutPcsSettingsConf,
++    "pcs_disaster_recovery_conf" => PutPcsDrConf,
+   }
+ end
+ 
+diff --git a/pcsd/pcsd_remove_file.rb b/pcsd/pcsd_remove_file.rb
+index 1038402d..ffaed8e3 100644
+--- a/pcsd/pcsd_remove_file.rb
++++ b/pcsd/pcsd_remove_file.rb
+@@ -41,8 +41,15 @@ module PcsdRemoveFile
+     end
+   end
+ 
++  class RemovePcsDrConf < RemoveFile
++    def full_file_name
++      @full_file_name ||= PCSD_DR_CONFIG_LOCATION
++    end
++  end
++
+   TYPES = {
+     "pcmk_remote_authkey" => RemovePcmkRemoteAuthkey,
+     "pcsd_settings" => RemovePcsdSettings,
++    "pcs_disaster_recovery_conf" => RemovePcsDrConf,
+   }
+ end
+diff --git a/pcsd/remote.rb b/pcsd/remote.rb
+index 6f454681..28b91382 100644
+--- a/pcsd/remote.rb
++++ b/pcsd/remote.rb
+@@ -27,6 +27,7 @@ def remote(params, request, auth_user)
+       :status => method(:node_status),
+       :status_all => method(:status_all),
+       :cluster_status => method(:cluster_status_remote),
++      :cluster_status_plaintext => method(:cluster_status_plaintext),
+       :auth => method(:auth),
+       :check_auth => method(:check_auth),
+       :cluster_setup => method(:cluster_setup),
+@@ -219,6 +220,18 @@ def cluster_status_remote(params, request, auth_user)
+   return JSON.generate(status)
+ end
+ 
++# get cluster status in plaintext (over-the-network version of 'pcs status')
++def cluster_status_plaintext(params, request, auth_user)
++  if not allowed_for_local_cluster(auth_user, Permissions::READ)
++    return 403, 'Permission denied'
++  end
++  return pcs_internal_proxy(
++    auth_user,
++    params.fetch(:data_json, ""),
++    "status.full_cluster_status_plaintext"
++  )
++end
++
+ def cluster_start(params, request, auth_user)
+   if params[:name]
+     code, response = send_request_with_token(
+@@ -444,7 +457,11 @@ def get_corosync_conf_remote(params, request, auth_user)
+   if not allowed_for_local_cluster(auth_user, Permissions::READ)
+     return 403, 'Permission denied'
+   end
+-  return get_corosync_conf()
++  begin
++    return get_corosync_conf()
++  rescue
++    return 400, 'Unable to read corosync.conf'
++  end
+ end
+ 
+ # deprecated, use /remote/put_file (note that put_file doesn't support backup
+diff --git a/pcsd/settings.rb b/pcsd/settings.rb
+index a6fd0a26..e8dc0c96 100644
+--- a/pcsd/settings.rb
++++ b/pcsd/settings.rb
+@@ -9,6 +9,7 @@ KEY_FILE = PCSD_VAR_LOCATION + 'pcsd.key'
+ KNOWN_HOSTS_FILE_NAME = 'known-hosts'
+ PCSD_SETTINGS_CONF_LOCATION = PCSD_VAR_LOCATION + "pcs_settings.conf"
+ PCSD_USERS_CONF_LOCATION = PCSD_VAR_LOCATION + "pcs_users.conf"
++PCSD_DR_CONFIG_LOCATION = PCSD_VAR_LOCATION + "disaster-recovery"
+ 
+ CRM_MON = "/usr/sbin/crm_mon"
+ CRM_NODE = "/usr/sbin/crm_node"
+diff --git a/pcsd/settings.rb.debian b/pcsd/settings.rb.debian
+index 5d830af9..daaae37b 100644
+--- a/pcsd/settings.rb.debian
++++ b/pcsd/settings.rb.debian
+@@ -9,6 +9,7 @@ KEY_FILE = PCSD_VAR_LOCATION + 'pcsd.key'
+ KNOWN_HOSTS_FILE_NAME = 'known-hosts'
+ PCSD_SETTINGS_CONF_LOCATION = PCSD_VAR_LOCATION + "pcs_settings.conf"
+ PCSD_USERS_CONF_LOCATION = PCSD_VAR_LOCATION + "pcs_users.conf"
++PCSD_DR_CONFIG_LOCATION = PCSD_VAR_LOCATION + "disaster-recovery"
+ 
+ CRM_MON = "/usr/sbin/crm_mon"
+ CRM_NODE = "/usr/sbin/crm_node"
+diff --git a/pylintrc b/pylintrc
+index 5fc4c200..9255a804 100644
+--- a/pylintrc
++++ b/pylintrc
+@@ -19,7 +19,7 @@ max-parents=10
+ min-public-methods=0
+ 
+ [BASIC]
+-good-names=e, i, op, ip, el, maxDiff, cm, ok, T
++good-names=e, i, op, ip, el, maxDiff, cm, ok, T, dr
+ 
+ [VARIABLES]
+ # A regular expression matching the name of dummy variables (i.e. expectedly
+-- 
+2.21.0
+
diff --git a/SOURCES/bz1676957-01-fix-crashes-in-pcs-host-auth.patch b/SOURCES/bz1676957-01-fix-crashes-in-pcs-host-auth.patch
deleted file mode 100644
index 4a69ea8..0000000
--- a/SOURCES/bz1676957-01-fix-crashes-in-pcs-host-auth.patch
+++ /dev/null
@@ -1,26 +0,0 @@
-From a6bcd7f5a387722b1cdec2d8cd8b9e3fafc36da4 Mon Sep 17 00:00:00 2001
-From: Tomas Jelinek <tojeline@redhat.com>
-Date: Mon, 17 Jun 2019 12:51:32 +0200
-Subject: [PATCH 1/3] fix crashes in 'pcs host auth'
-
----
- pcsd/cfgsync.rb | 3 ++-
- 1 file changed, 2 insertions(+), 1 deletion(-)
-
-diff --git a/pcsd/cfgsync.rb b/pcsd/cfgsync.rb
-index 703c0e39..f5d0ba86 100644
---- a/pcsd/cfgsync.rb
-+++ b/pcsd/cfgsync.rb
-@@ -826,7 +826,8 @@ module Cfgsync
-       PCSAuth.getSuperuserAuth(), [config_new.class], target_nodes,
-       cluster_name, new_hosts
-     )
--    fetched_hosts, _ = fetcher.fetch_all()[config_new.class.name]
-+    fetched_configs, _node_connected = fetcher.fetch_all()
-+    fetched_hosts = fetched_configs[config_new.class.name]
-     config_new = Cfgsync::merge_known_host_files(
-       config_old, fetched_hosts, new_hosts, remove_hosts_names
-     )
--- 
-2.21.0
-
diff --git a/SOURCES/bz1725183-01-fix-and-options-for-non-root-users.patch b/SOURCES/bz1725183-01-fix-and-options-for-non-root-users.patch
deleted file mode 100644
index 0aec508..0000000
--- a/SOURCES/bz1725183-01-fix-and-options-for-non-root-users.patch
+++ /dev/null
@@ -1,97 +0,0 @@
-From b667913a72f9516e3c9ae1452784874ff01a7688 Mon Sep 17 00:00:00 2001
-From: Tomas Jelinek <tojeline@redhat.com>
-Date: Tue, 2 Jul 2019 13:35:58 +0200
-Subject: [PATCH] fix - and -- options for non-root users
-
----
- pcs/app.py | 32 ++++++++++++++++++++++----------
- 1 file changed, 22 insertions(+), 10 deletions(-)
-
-diff --git a/pcs/app.py b/pcs/app.py
-index abb9277e..c13cb15c 100644
---- a/pcs/app.py
-+++ b/pcs/app.py
-@@ -37,12 +37,21 @@ from pcs.cli.routing import (
- from pcs.lib.errors import LibraryError
- 
- 
--def non_root_run(argv_cmd):
-+def _non_root_run(argv_cmd):
-     """
-     This function will run commands which has to be run as root for users which
-     are not root. If it required to run such command as root it will do that by
-     sending it to the local pcsd and then it will exit.
-     """
-+    # matching the commands both in here and in pcsd expects -o and --options
-+    # to be at the end of a command
-+    argv_and_options = argv_cmd[:]
-+    for option, value in utils.pcs_options.items():
-+        if parse_args.is_option_expecting_value(option):
-+            argv_and_options.extend([option, value])
-+        else:
-+            argv_and_options.append(option)
-+
-     # specific commands need to be run under root account, pass them to pcsd
-     # don't forget to allow each command in pcsd.rb in "post /run_pcs do"
-     root_command_list = [
-@@ -67,29 +76,29 @@ def non_root_run(argv_cmd):
-         ['status', 'quorum', '...'],
-         ['status', 'pcsd', '...'],
-     ]
--    orig_argv = argv_cmd[:]
-+
-     for root_cmd in root_command_list:
-         if (
--            (argv_cmd == root_cmd)
-+            (argv_and_options == root_cmd)
-             or
-             (
-                 root_cmd[-1] == "..."
-                 and
--                argv_cmd[:len(root_cmd)-1] == root_cmd[:-1]
-+                argv_and_options[:len(root_cmd)-1] == root_cmd[:-1]
-             )
-         ):
-             # handle interactivity of 'pcs cluster auth'
--            if argv_cmd[0:2] in [["cluster", "auth"], ["host", "auth"]]:
-+            if argv_and_options[0:2] in [["cluster", "auth"], ["host", "auth"]]:
-                 if "-u" not in utils.pcs_options:
-                     username = utils.get_terminal_input('Username: ')
--                    orig_argv.extend(["-u", username])
-+                    argv_and_options.extend(["-u", username])
-                 if "-p" not in utils.pcs_options:
-                     password = utils.get_terminal_password()
--                    orig_argv.extend(["-p", password])
-+                    argv_and_options.extend(["-p", password])
- 
-             # call the local pcsd
-             err_msgs, exitcode, std_out, std_err = utils.call_local_pcsd(
--                orig_argv
-+                argv_and_options
-             )
-             if err_msgs:
-                 for msg in err_msgs:
-@@ -105,7 +114,10 @@ logging.basicConfig()
- usefile = False
- filename = ""
- def main(argv=None):
--    # pylint: disable=too-many-locals, too-many-branches, too-many-statements, global-statement
-+    # pylint: disable=global-statement
-+    # pylint: disable=too-many-branches
-+    # pylint: disable=too-many-locals
-+    # pylint: disable=too-many-statements
-     if completion.has_applicable_environment(os.environ):
-         print(completion.make_suggestions(
-             os.environ,
-@@ -207,7 +219,7 @@ def main(argv=None):
-     logger.handlers = []
- 
-     if (os.getuid() != 0) and (argv and argv[0] != "help") and not usefile:
--        non_root_run(argv)
-+        _non_root_run(argv)
-     cmd_map = {
-         "resource": resource.resource_cmd,
-         "cluster": cluster.cluster_cmd,
--- 
-2.21.0
-
diff --git a/SOURCES/bz1740218-01-set-authkey-length-to-256-bytes.patch b/SOURCES/bz1740218-01-set-authkey-length-to-256-bytes.patch
deleted file mode 100644
index 7171520..0000000
--- a/SOURCES/bz1740218-01-set-authkey-length-to-256-bytes.patch
+++ /dev/null
@@ -1,79 +0,0 @@
-From 77cce4b737c8d242e3c550e3d14cb4893b4ad73c Mon Sep 17 00:00:00 2001
-From: Tomas Jelinek <tojeline@redhat.com>
-Date: Tue, 13 Aug 2019 10:06:29 +0200
-Subject: [PATCH] set authkey length to 256 bytes
-
----
- pcs/settings_default.py                                         | 7 +++++--
- pcs_test/tier0/lib/commands/remote_node/test_node_add_guest.py  | 4 ++--
- pcs_test/tier0/lib/commands/remote_node/test_node_add_remote.py | 4 ++--
- 3 files changed, 9 insertions(+), 6 deletions(-)
-
-diff --git a/pcs/settings_default.py b/pcs/settings_default.py
-index 60647f5d..07014e33 100644
---- a/pcs/settings_default.py
-+++ b/pcs/settings_default.py
-@@ -20,11 +20,14 @@ corosync_qdevice_net_client_certs_dir = os.path.join(
- )
- corosync_qdevice_net_client_ca_file_name = "qnetd-cacert.crt"
- corosync_authkey_file = os.path.join(corosync_conf_dir, "authkey")
--corosync_authkey_bytes = 384
-+# Must be set to 256 for corosync to work in FIPS environment.
-+corosync_authkey_bytes = 256
- corosync_log_file = "/var/log/cluster/corosync.log"
- pacemaker_authkey_file = "/etc/pacemaker/authkey"
--pacemaker_authkey_bytes = 384
-+# Using the same value as for corosync. Higher values MAY work in FIPS.
-+pacemaker_authkey_bytes = 256
- booth_authkey_file_mode = 0o600
-+# Booth does not support keys longer than 64 bytes.
- booth_authkey_bytes = 64
- cluster_conf_file = "/etc/cluster/cluster.conf"
- fence_agent_binaries = "/usr/sbin/"
-diff --git a/pcs_test/tier0/lib/commands/remote_node/test_node_add_guest.py b/pcs_test/tier0/lib/commands/remote_node/test_node_add_guest.py
-index 4a68c9d1..ee463c70 100644
---- a/pcs_test/tier0/lib/commands/remote_node/test_node_add_guest.py
-+++ b/pcs_test/tier0/lib/commands/remote_node/test_node_add_guest.py
-@@ -158,7 +158,7 @@ class AddGuest(TestCase):
-             .local.push_cib()
-         )
-         node_add_guest(self.env_assist.get_env())
--        generate_binary_key.assert_called_once_with(random_bytes_count=384)
-+        generate_binary_key.assert_called_once_with(random_bytes_count=256)
-         self.env_assist.assert_reports(
-             REPORTS
-                 .adapt(
-@@ -531,7 +531,7 @@ class AddGuest(TestCase):
-             .local.push_cib()
-         )
-         node_add_guest(self.env_assist.get_env(), skip_offline_nodes=True)
--        generate_binary_key.assert_called_once_with(random_bytes_count=384)
-+        generate_binary_key.assert_called_once_with(random_bytes_count=256)
-         self.env_assist.assert_reports(
-             fixture_reports_new_node_unreachable(NODE_NAME)
-             + [
-diff --git a/pcs_test/tier0/lib/commands/remote_node/test_node_add_remote.py b/pcs_test/tier0/lib/commands/remote_node/test_node_add_remote.py
-index bb2b6615..d34d7126 100644
---- a/pcs_test/tier0/lib/commands/remote_node/test_node_add_remote.py
-+++ b/pcs_test/tier0/lib/commands/remote_node/test_node_add_remote.py
-@@ -216,7 +216,7 @@ class AddRemote(TestCase):
-             .env.push_cib(resources=FIXTURE_RESOURCES)
-         )
-         node_add_remote(self.env_assist.get_env())
--        generate_binary_key.assert_called_once_with(random_bytes_count=384)
-+        generate_binary_key.assert_called_once_with(random_bytes_count=256)
-         self.env_assist.assert_reports(
-             REPORTS
-                 .adapt(
-@@ -511,7 +511,7 @@ class AddRemote(TestCase):
-             .env.push_cib(resources=FIXTURE_RESOURCES)
-         )
-         node_add_remote(self.env_assist.get_env(), skip_offline_nodes=True)
--        generate_binary_key.assert_called_once_with(random_bytes_count=384)
-+        generate_binary_key.assert_called_once_with(random_bytes_count=256)
-         self.env_assist.assert_reports(
-             fixture_reports_new_node_unreachable(NODE_NAME)
-             + [
--- 
-2.11.0
-
diff --git a/SOURCES/bz1743731-01-fix-error-msg-when-cluster-is-not-set-up.patch b/SOURCES/bz1743731-01-fix-error-msg-when-cluster-is-not-set-up.patch
new file mode 100644
index 0000000..06f551e
--- /dev/null
+++ b/SOURCES/bz1743731-01-fix-error-msg-when-cluster-is-not-set-up.patch
@@ -0,0 +1,130 @@
+From 8058591d0d79942bf6c61f105a180592bac7cf69 Mon Sep 17 00:00:00 2001
+From: Ondrej Mular <omular@redhat.com>
+Date: Thu, 28 Nov 2019 16:57:24 +0100
+Subject: [PATCH 2/3] fix error msg when cluster is not set up
+
+---
+ CHANGELOG.md                                |  4 +++
+ pcs/cluster.py                              |  3 +++
+ pcs/lib/commands/qdevice.py                 |  2 ++
+ pcs_test/tier0/lib/commands/test_qdevice.py | 27 +++++++++++++++++++--
+ 4 files changed, 34 insertions(+), 2 deletions(-)
+
+diff --git a/CHANGELOG.md b/CHANGELOG.md
+index 889436c3..5a7ec377 100644
+--- a/CHANGELOG.md
++++ b/CHANGELOG.md
+@@ -6,7 +6,11 @@
+ - It is possible to configure a disaster-recovery site and display its status
+   ([rhbz#1676431])
+ 
++### Fixed
++- Error messages in cases when cluster is not set up ([rhbz#1743731])
++
+ [rhbz#1676431]: https://bugzilla.redhat.com/show_bug.cgi?id=1676431
++[rhbz#1743731]: https://bugzilla.redhat.com/show_bug.cgi?id=1743731
+ 
+ 
+ ## [0.10.4] - 2019-11-28
+diff --git a/pcs/cluster.py b/pcs/cluster.py
+index 9473675f..0e9b3365 100644
+--- a/pcs/cluster.py
++++ b/pcs/cluster.py
+@@ -190,6 +190,9 @@ def start_cluster(argv):
+             wait_for_nodes_started(nodes, wait_timeout)
+         return
+ 
++    if not utils.hasCorosyncConf():
++        utils.err("cluster is not currently configured on this node")
++
+     print("Starting Cluster...")
+     service_list = ["corosync"]
+     if utils.need_to_handle_qdevice_service():
+diff --git a/pcs/lib/commands/qdevice.py b/pcs/lib/commands/qdevice.py
+index 3d7af234..41f7c296 100644
+--- a/pcs/lib/commands/qdevice.py
++++ b/pcs/lib/commands/qdevice.py
+@@ -81,6 +81,8 @@ def qdevice_start(lib_env, model):
+     start qdevice now on local host
+     """
+     _check_model(model)
++    if not qdevice_net.qdevice_initialized():
++        raise LibraryError(reports.qdevice_not_initialized(model))
+     _service_start(lib_env, qdevice_net.qdevice_start)
+ 
+ def qdevice_stop(lib_env, model, proceed_if_used=False):
+diff --git a/pcs_test/tier0/lib/commands/test_qdevice.py b/pcs_test/tier0/lib/commands/test_qdevice.py
+index b2c83ca4..af23db61 100644
+--- a/pcs_test/tier0/lib/commands/test_qdevice.py
++++ b/pcs_test/tier0/lib/commands/test_qdevice.py
+@@ -689,6 +689,7 @@ class QdeviceNetDisableTest(QdeviceTestCase):
+         )
+ 
+ 
++@mock.patch("pcs.lib.corosync.qdevice_net.qdevice_initialized")
+ @mock.patch("pcs.lib.external.start_service")
+ @mock.patch.object(
+     LibraryEnvironment,
+@@ -696,9 +697,11 @@ class QdeviceNetDisableTest(QdeviceTestCase):
+     lambda self: "mock_runner"
+ )
+ class QdeviceNetStartTest(QdeviceTestCase):
+-    def test_success(self, mock_net_start):
++    def test_success(self, mock_net_start, mock_qdevice_initialized):
++        mock_qdevice_initialized.return_value = True
+         lib.qdevice_start(self.lib_env, "net")
+         mock_net_start.assert_called_once_with("mock_runner", "corosync-qnetd")
++        mock_qdevice_initialized.assert_called_once_with()
+         assert_report_item_list_equal(
+             self.mock_reporter.report_item_list,
+             [
+@@ -719,11 +722,12 @@ class QdeviceNetStartTest(QdeviceTestCase):
+             ]
+         )
+ 
+-    def test_failed(self, mock_net_start):
++    def test_failed(self, mock_net_start, mock_qdevice_initialized):
+         mock_net_start.side_effect = StartServiceError(
+             "test service",
+             "test error"
+         )
++        mock_qdevice_initialized.return_value = True
+ 
+         assert_raise_library_error(
+             lambda: lib.qdevice_start(self.lib_env, "net"),
+@@ -737,6 +741,7 @@ class QdeviceNetStartTest(QdeviceTestCase):
+             )
+         )
+         mock_net_start.assert_called_once_with("mock_runner", "corosync-qnetd")
++        mock_qdevice_initialized.assert_called_once_with()
+         assert_report_item_list_equal(
+             self.mock_reporter.report_item_list,
+             [
+@@ -750,6 +755,24 @@ class QdeviceNetStartTest(QdeviceTestCase):
+             ]
+         )
+ 
++    def test_qdevice_not_initialized(
++        self, mock_net_start, mock_qdevice_initialized
++    ):
++        mock_qdevice_initialized.return_value = False
++
++        assert_raise_library_error(
++            lambda: lib.qdevice_start(self.lib_env, "net"),
++            (
++                severity.ERROR,
++                report_codes.QDEVICE_NOT_INITIALIZED,
++                {
++                    "model": "net",
++                }
++            )
++        )
++        mock_net_start.assert_not_called()
++        mock_qdevice_initialized.assert_called_once_with()
++
+ 
+ @mock.patch("pcs.lib.corosync.qdevice_net.qdevice_status_cluster_text")
+ @mock.patch("pcs.lib.external.stop_service")
+-- 
+2.21.0
+
diff --git a/SOURCES/bz1750427-01-link-to-sbd-man-page-from-sbd-enable-doc.patch b/SOURCES/bz1750427-01-link-to-sbd-man-page-from-sbd-enable-doc.patch
new file mode 100644
index 0000000..bfae069
--- /dev/null
+++ b/SOURCES/bz1750427-01-link-to-sbd-man-page-from-sbd-enable-doc.patch
@@ -0,0 +1,40 @@
+From e4ab588efe0f4cc6b5fcf0853293c93bd4f31604 Mon Sep 17 00:00:00 2001
+From: Ondrej Mular <omular@redhat.com>
+Date: Wed, 29 Jan 2020 13:13:45 +0100
+Subject: [PATCH 4/7] link to sbd man page from `sbd enable` doc
+
+---
+ pcs/pcs.8    | 2 +-
+ pcs/usage.py | 3 ++-
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/pcs/pcs.8 b/pcs/pcs.8
+index 651fda83..ff2ba0b0 100644
+--- a/pcs/pcs.8
++++ b/pcs/pcs.8
+@@ -531,7 +531,7 @@ history update
+ Update fence history from all nodes.
+ .TP
+ sbd enable [watchdog=<path>[@<node>]]... [device=<path>[@<node>]]... [<SBD_OPTION>=<value>]... [\fB\-\-no\-watchdog\-validation\fR]
+-Enable SBD in cluster. Default path for watchdog device is /dev/watchdog. Allowed SBD options: SBD_WATCHDOG_TIMEOUT (default: 5), SBD_DELAY_START (default: no), SBD_STARTMODE (default: always) and SBD_TIMEOUT_ACTION. It is possible to specify up to 3 devices per node. If \fB\-\-no\-watchdog\-validation\fR is specified, validation of watchdogs will be skipped.
++Enable SBD in cluster. Default path for watchdog device is /dev/watchdog. Allowed SBD options: SBD_WATCHDOG_TIMEOUT (default: 5), SBD_DELAY_START (default: no), SBD_STARTMODE (default: always) and SBD_TIMEOUT_ACTION. SBD options are documented in sbd(8) man page. It is possible to specify up to 3 devices per node. If \fB\-\-no\-watchdog\-validation\fR is specified, validation of watchdogs will be skipped.
+ 
+ .B WARNING: Cluster has to be restarted in order to apply these changes.
+ 
+diff --git a/pcs/usage.py b/pcs/usage.py
+index e4f5af32..30c63964 100644
+--- a/pcs/usage.py
++++ b/pcs/usage.py
+@@ -1147,7 +1147,8 @@ Commands:
+         Enable SBD in cluster. Default path for watchdog device is
+         /dev/watchdog. Allowed SBD options: SBD_WATCHDOG_TIMEOUT (default: 5),
+         SBD_DELAY_START (default: no), SBD_STARTMODE (default: always) and
+-        SBD_TIMEOUT_ACTION. It is possible to specify up to 3 devices per node.
++        SBD_TIMEOUT_ACTION. SBD options are documented in sbd(8) man page. It
++        is possible to specify up to 3 devices per node.
+         If --no-watchdog-validation is specified, validation of watchdogs will
+         be skipped.
+ 
+-- 
+2.21.1
+
diff --git a/SOURCES/bz1781303-01-fix-safe-disabling-clones-groups-bundles.patch b/SOURCES/bz1781303-01-fix-safe-disabling-clones-groups-bundles.patch
new file mode 100644
index 0000000..02a5533
--- /dev/null
+++ b/SOURCES/bz1781303-01-fix-safe-disabling-clones-groups-bundles.patch
@@ -0,0 +1,636 @@
+From e56f42bf31ae0a52618fe8754fd0b2ae623e6a7a Mon Sep 17 00:00:00 2001
+From: Tomas Jelinek <tojeline@redhat.com>
+Date: Thu, 12 Dec 2019 14:46:44 +0100
+Subject: [PATCH 1/7] squash bz1781303 fix safe-disabling clones, groups,
+ bundles
+
+fix simulate_cib_error report
+
+Putting only one CIB in the report is not enough info. Both original and
+changed CIB as well as crm_simulate output would be needed. All that
+info can be seen in debug messages. So there is no need to put it in the
+report.
+---
+ pcs/cli/common/console_report.py              |   7 +-
+ pcs/lib/cib/resource/common.py                |  21 +-
+ pcs/lib/commands/resource.py                  |  27 +-
+ pcs/lib/pacemaker/live.py                     |   8 +-
+ pcs/lib/reports.py                            |   4 +-
+ .../tier0/cli/common/test_console_report.py   |  10 +-
+ .../tier0/lib/cib/test_resource_common.py     |  60 ++++-
+ .../resource/test_resource_enable_disable.py  | 242 +++++++++++++++++-
+ pcs_test/tier0/lib/pacemaker/test_live.py     |   7 -
+ 9 files changed, 350 insertions(+), 36 deletions(-)
+
+diff --git a/pcs/cli/common/console_report.py b/pcs/cli/common/console_report.py
+index d349c823..60dbb2a0 100644
+--- a/pcs/cli/common/console_report.py
++++ b/pcs/cli/common/console_report.py
+@@ -1269,8 +1269,11 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
+     ,
+ 
+     codes.CIB_SIMULATE_ERROR: lambda info:
+-        "Unable to simulate changes in CIB: {reason}\n{cib}"
+-        .format(**info)
++        "Unable to simulate changes in CIB{_reason}"
++        .format(
++            _reason=format_optional(info["reason"], ": {0}"),
++            **info
++        )
+     ,
+ 
+     codes.CIB_PUSH_FORCED_FULL_DUE_TO_CRM_FEATURE_SET: lambda info:
+diff --git a/pcs/lib/cib/resource/common.py b/pcs/lib/cib/resource/common.py
+index f1891003..e30c5e69 100644
+--- a/pcs/lib/cib/resource/common.py
++++ b/pcs/lib/cib/resource/common.py
+@@ -1,8 +1,9 @@
+ from collections import namedtuple
+ from typing import (
+     cast,
++    List,
+     Optional,
+-    Sequence,
++    Set,
+ )
+ from xml.etree.ElementTree import Element
+ 
+@@ -114,7 +115,23 @@ def find_primitives(resource_el):
+         return [resource_el]
+     return []
+ 
+-def get_inner_resources(resource_el: Element) -> Sequence[Element]:
++def get_all_inner_resources(resource_el: Element) -> Set[Element]:
++    """
++    Return all inner resources (both direct and indirect) of a resource
++    Example: for a clone containing a group, this function will return both
++    the group and the resources inside the group
++
++    resource_el -- resource element to get its inner resources
++    """
++    all_inner: Set[Element] = set()
++    to_process = set([resource_el])
++    while to_process:
++        new_inner = get_inner_resources(to_process.pop())
++        to_process.update(set(new_inner) - all_inner)
++        all_inner.update(new_inner)
++    return all_inner
++
++def get_inner_resources(resource_el: Element) -> List[Element]:
+     """
+     Return list of inner resources (direct descendants) of a resource
+     specified as resource_el.
+diff --git a/pcs/lib/commands/resource.py b/pcs/lib/commands/resource.py
+index 1b652ea4..4f975c7f 100644
+--- a/pcs/lib/commands/resource.py
++++ b/pcs/lib/commands/resource.py
+@@ -802,7 +802,28 @@ def disable_safe(env, resource_ids, strict, wait):
+     with resource_environment(
+         env, wait, resource_ids, _ensure_disabled_after_wait(True)
+     ) as resources_section:
+-        _disable_validate_and_edit_cib(env, resources_section, resource_ids)
++        id_provider = IdProvider(resources_section)
++        resource_el_list = _find_resources_or_raise(
++            resources_section,
++            resource_ids
++        )
++        env.report_processor.process_list(
++            _resource_list_enable_disable(
++                resource_el_list,
++                resource.common.disable,
++                id_provider,
++                env.get_cluster_state()
++            )
++        )
++
++        inner_resources_names_set = set()
++        for resource_el in resource_el_list:
++            inner_resources_names_set.update({
++                inner_resource_el.get("id")
++                for inner_resource_el
++                    in resource.common.get_all_inner_resources(resource_el)
++            })
++
+         plaintext_status, transitions, dummy_cib = simulate_cib(
+             env.cmd_runner(),
+             get_root(resources_section)
+@@ -830,6 +851,10 @@ def disable_safe(env, resource_ids, strict, wait):
+                     exclude=resource_ids
+                 )
+             )
++
++        # Stopping a clone stops all its inner resources. That should not block
++        # stopping the clone.
++        other_affected = other_affected - inner_resources_names_set
+         if other_affected:
+             raise LibraryError(
+                 reports.resource_disable_affects_other_resources(
+diff --git a/pcs/lib/pacemaker/live.py b/pcs/lib/pacemaker/live.py
+index 83274af0..233f2e2d 100644
+--- a/pcs/lib/pacemaker/live.py
++++ b/pcs/lib/pacemaker/live.py
+@@ -271,7 +271,7 @@ def simulate_cib_xml(runner, cib_xml):
+         transitions_file = write_tmpfile(None)
+     except OSError as e:
+         raise LibraryError(
+-            reports.cib_simulate_error(format_os_error(e), cib_xml)
++            reports.cib_simulate_error(format_os_error(e))
+         )
+ 
+     cmd = [
+@@ -284,7 +284,7 @@ def simulate_cib_xml(runner, cib_xml):
+     stdout, stderr, retval = runner.run(cmd, stdin_string=cib_xml)
+     if retval != 0:
+         raise LibraryError(
+-            reports.cib_simulate_error(stderr.strip(), cib_xml)
++            reports.cib_simulate_error(stderr.strip())
+         )
+ 
+     try:
+@@ -297,7 +297,7 @@ def simulate_cib_xml(runner, cib_xml):
+         return stdout, transitions_xml, new_cib_xml
+     except OSError as e:
+         raise LibraryError(
+-            reports.cib_simulate_error(format_os_error(e), cib_xml)
++            reports.cib_simulate_error(format_os_error(e))
+         )
+ 
+ def simulate_cib(runner, cib):
+@@ -319,7 +319,7 @@ def simulate_cib(runner, cib):
+         )
+     except (etree.XMLSyntaxError, etree.DocumentInvalid) as e:
+         raise LibraryError(
+-            reports.cib_simulate_error(str(e), cib_xml)
++            reports.cib_simulate_error(str(e))
+         )
+ 
+ ### wait for idle
+diff --git a/pcs/lib/reports.py b/pcs/lib/reports.py
+index 1f081007..c9b4a25d 100644
+--- a/pcs/lib/reports.py
++++ b/pcs/lib/reports.py
+@@ -1935,18 +1935,16 @@ def cib_diff_error(reason, cib_old, cib_new):
+         }
+     )
+ 
+-def cib_simulate_error(reason, cib):
++def cib_simulate_error(reason):
+     """
+     cannot simulate effects a CIB would have on a live cluster
+ 
+     string reason -- error description
+-    string cib -- the CIB whose effects were to be simulated
+     """
+     return ReportItem.error(
+         report_codes.CIB_SIMULATE_ERROR,
+         info={
+             "reason": reason,
+-            "cib": cib,
+         }
+     )
+ 
+diff --git a/pcs_test/tier0/cli/common/test_console_report.py b/pcs_test/tier0/cli/common/test_console_report.py
+index 0d0c2457..29e9614d 100644
+--- a/pcs_test/tier0/cli/common/test_console_report.py
++++ b/pcs_test/tier0/cli/common/test_console_report.py
+@@ -2238,8 +2238,14 @@ class CibDiffError(NameBuildTest):
+ class CibSimulateError(NameBuildTest):
+     def test_success(self):
+         self.assert_message_from_report(
+-            "Unable to simulate changes in CIB: error message\n<cib />",
+-            reports.cib_simulate_error("error message", "<cib />")
++            "Unable to simulate changes in CIB: error message",
++            reports.cib_simulate_error("error message")
++        )
++
++    def test_empty_reason(self):
++        self.assert_message_from_report(
++            "Unable to simulate changes in CIB",
++            reports.cib_simulate_error("")
+         )
+ 
+ 
+diff --git a/pcs_test/tier0/lib/cib/test_resource_common.py b/pcs_test/tier0/lib/cib/test_resource_common.py
+index ebba09da..cd716ba2 100644
+--- a/pcs_test/tier0/lib/cib/test_resource_common.py
++++ b/pcs_test/tier0/lib/cib/test_resource_common.py
+@@ -200,10 +200,12 @@ class FindOneOrMoreResources(TestCase):
+ 
+ 
+ class FindResourcesMixin:
++    _iterable_type = list
++
+     def assert_find_resources(self, input_resource_id, output_resource_ids):
+         self.assertEqual(
+-            output_resource_ids,
+-            [
++            self._iterable_type(output_resource_ids),
++            self._iterable_type([
+                 element.get("id", "")
+                 for element in
+                 self._tested_fn(
+@@ -211,7 +213,7 @@ class FindResourcesMixin:
+                         './/*[@id="{0}"]'.format(input_resource_id)
+                     )
+                 )
+-            ]
++            ])
+         )
+ 
+     def test_group(self):
+@@ -235,6 +237,27 @@ class FindResourcesMixin:
+     def test_bundle_with_primitive(self):
+         self.assert_find_resources("H-bundle", ["H"])
+ 
++    def test_primitive(self):
++        raise NotImplementedError()
++
++    def test_primitive_in_clone(self):
++        raise NotImplementedError()
++
++    def test_primitive_in_master(self):
++        raise NotImplementedError()
++
++    def test_primitive_in_group(self):
++        raise NotImplementedError()
++
++    def test_primitive_in_bundle(self):
++        raise NotImplementedError()
++
++    def test_cloned_group(self):
++        raise NotImplementedError()
++
++    def test_mastered_group(self):
++        raise NotImplementedError()
++
+ 
+ class FindPrimitives(TestCase, FindResourcesMixin):
+     _tested_fn = staticmethod(common.find_primitives)
+@@ -266,6 +289,37 @@ class FindPrimitives(TestCase, FindResourcesMixin):
+         self.assert_find_resources("F-master", ["F1", "F2"])
+ 
+ 
++class GetAllInnerResources(TestCase, FindResourcesMixin):
++    _iterable_type = set
++    _tested_fn = staticmethod(common.get_all_inner_resources)
++
++    def test_primitive(self):
++        self.assert_find_resources("A", set())
++
++    def test_primitive_in_clone(self):
++        self.assert_find_resources("B", set())
++
++    def test_primitive_in_master(self):
++        self.assert_find_resources("C", set())
++
++    def test_primitive_in_group(self):
++        self.assert_find_resources("D1", set())
++        self.assert_find_resources("D2", set())
++        self.assert_find_resources("E1", set())
++        self.assert_find_resources("E2", set())
++        self.assert_find_resources("F1", set())
++        self.assert_find_resources("F2", set())
++
++    def test_primitive_in_bundle(self):
++        self.assert_find_resources("H", set())
++
++    def test_cloned_group(self):
++        self.assert_find_resources("E-clone", {"E", "E1", "E2"})
++
++    def test_mastered_group(self):
++        self.assert_find_resources("F-master", {"F", "F1", "F2"})
++
++
+ class GetInnerResources(TestCase, FindResourcesMixin):
+     _tested_fn = staticmethod(common.get_inner_resources)
+ 
+diff --git a/pcs_test/tier0/lib/commands/resource/test_resource_enable_disable.py b/pcs_test/tier0/lib/commands/resource/test_resource_enable_disable.py
+index 634f0f33..62899940 100644
+--- a/pcs_test/tier0/lib/commands/resource/test_resource_enable_disable.py
++++ b/pcs_test/tier0/lib/commands/resource/test_resource_enable_disable.py
+@@ -1729,12 +1729,6 @@ class DisableSimulate(TestCase):
+                 fixture.error(
+                     report_codes.CIB_SIMULATE_ERROR,
+                     reason="some stderr",
+-                    # curently, there is no way to normalize xml with our lxml
+-                    # version 4.2.3, so this never passes equality tests
+-                    # cib=self.config.calls.get(
+-                    #         "runner.pcmk.simulate_cib"
+-                    #     ).check_stdin.expected_stdin
+-                    # ,
+                 ),
+             ],
+             expected_in_processor=False
+@@ -1988,12 +1982,6 @@ class DisableSafeMixin():
+                 fixture.error(
+                     report_codes.CIB_SIMULATE_ERROR,
+                     reason="some stderr",
+-                    # curently, there is no way to normalize xml with our lxml
+-                    # version 4.2.3, so this never passes equality tests
+-                    # cib=self.config.calls.get(
+-                    #         "runner.pcmk.simulate_cib"
+-                    #     ).check_stdin.expected_stdin
+-                    # ,
+                 ),
+             ],
+             expected_in_processor=False
+@@ -2118,6 +2106,236 @@ class DisableSafeMixin():
+             fixture.report_resource_not_running("B"),
+         ])
+ 
++    def test_inner_resources(self, mock_write_tmpfile):
++        cib_xml = """
++            <resources>
++                <primitive id="A" />
++                <clone id="B-clone">
++                    <primitive id="B" />
++                </clone>
++                <master id="C-master">
++                    <primitive id="C" />
++                </master>
++                <group id="D">
++                    <primitive id="D1" />
++                    <primitive id="D2" />
++                </group>
++                <clone id="E-clone">
++                    <group id="E">
++                        <primitive id="E1" />
++                        <primitive id="E2" />
++                    </group>
++                </clone>
++                <master id="F-master">
++                    <group id="F">
++                        <primitive id="F1" />
++                        <primitive id="F2" />
++                    </group>
++                </master>
++                <bundle id="G-bundle" />
++                <bundle id="H-bundle">
++                    <primitive id="H" />
++                </bundle>
++            </resources>
++        """
++        status_xml = """
++            <resources>
++                <resource id="A" managed="true" />
++                <clone id="B-clone" managed="true" multi_state="false"
++                    unique="false"
++                >
++                    <resource id="B" managed="true" />
++                    <resource id="B" managed="true" />
++                </clone>
++                <clone id="C-master" managed="true" multi_state="true"
++                    unique="false"
++                >
++                    <resource id="C" managed="true" />
++                    <resource id="C" managed="true" />
++                </clone>
++                <group id="D" number_resources="2">
++                    <resource id="D1" managed="true" />
++                    <resource id="D2" managed="true" />
++                </group>
++                <clone id="E-clone" managed="true" multi_state="false"
++                    unique="false"
++                >
++                    <group id="E:0" number_resources="2">
++                        <resource id="E1" managed="true" />
++                        <resource id="E2" managed="true" />
++                    </group>
++                    <group id="E:1" number_resources="2">
++                        <resource id="E1" managed="true" />
++                        <resource id="E2" managed="true" />
++                    </group>
++                </clone>
++                <clone id="F-master" managed="true" multi_state="true"
++                    unique="false"
++                >
++                    <group id="F:0" number_resources="2">
++                        <resource id="F1" managed="true" />
++                        <resource id="F2" managed="true" />
++                    </group>
++                    <group id="F:1" number_resources="2">
++                        <resource id="F1" managed="true" />
++                        <resource id="F2" managed="true" />
++                    </group>
++                </clone>
++                <bundle id="H-bundle" type="docker" image="pcmktest:http"
++                    unique="false" managed="true" failed="false"
++                >
++                    <replica id="0">
++                        <resource id="H" />
++                    </replica>
++                    <replica id="1">
++                        <resource id="H" />
++                    </replica>
++                </bundle>
++            </resources>
++        """
++        synapses = []
++        index = 0
++        for res_name, is_clone in [
++            ("A", False),
++            ("B", True),
++            ("C", True),
++            ("D1", False),
++            ("D2", False),
++            ("E1", True),
++            ("E2", True),
++            ("F1", True),
++            ("F2", True),
++            ("H", False),
++        ]:
++            if is_clone:
++                synapses.append(f"""
++                  <synapse>
++                    <action_set>
++                      <rsc_op id="{index}" operation="stop" on_node="node1">
++                        <primitive id="{res_name}" long_id="{res_name}:0" />
++                      </rsc_op>
++                    </action_set>
++                  </synapse>
++                  <synapse>
++                    <action_set>
++                      <rsc_op id="{index + 1}" operation="stop" on_node="node2">
++                        <primitive id="{res_name}" long_id="{res_name}:1" />
++                      </rsc_op>
++                    </action_set>
++                  </synapse>
++                """)
++                index += 2
++            else:
++                synapses.append(f"""
++                  <synapse>
++                    <action_set>
++                      <rsc_op id="{index}" operation="stop" on_node="node1">
++                        <primitive id="{res_name}" />
++                      </rsc_op>
++                    </action_set>
++                  </synapse>
++                """)
++                index += 1
++        transitions_xml = (
++            "<transition_graph>" + "\n".join(synapses) + "</transition_graph>"
++        )
++
++        self.tmpfile_transitions.read.return_value = transitions_xml
++        mock_write_tmpfile.side_effect = [
++            self.tmpfile_new_cib, self.tmpfile_transitions,
++            AssertionError("No other write_tmpfile call expected")
++        ]
++        (self.config
++            .runner.cib.load(resources=cib_xml)
++            .runner.pcmk.load_state(resources=status_xml)
++        )
++        self.config.runner.pcmk.simulate_cib(
++            self.tmpfile_new_cib.name,
++            self.tmpfile_transitions.name,
++            stdout="simulate output",
++            resources="""
++                <resources>
++                    <primitive id="A" />
++                    <clone id="B-clone">
++                        <meta_attributes id="B-clone-meta_attributes">
++                            <nvpair name="target-role" value="Stopped"
++                                id="B-clone-meta_attributes-target-role"
++                            />
++                        </meta_attributes>
++                        <primitive id="B" />
++                    </clone>
++                    <master id="C-master">
++                        <meta_attributes id="C-master-meta_attributes">
++                            <nvpair name="target-role" value="Stopped"
++                                id="C-master-meta_attributes-target-role"
++                            />
++                        </meta_attributes>
++                        <primitive id="C" />
++                    </master>
++                    <group id="D">
++                        <meta_attributes id="D-meta_attributes">
++                            <nvpair name="target-role" value="Stopped"
++                                id="D-meta_attributes-target-role"
++                            />
++                        </meta_attributes>
++                        <primitive id="D1" />
++                        <primitive id="D2" />
++                    </group>
++                    <clone id="E-clone">
++                        <meta_attributes id="E-clone-meta_attributes">
++                            <nvpair name="target-role" value="Stopped"
++                                id="E-clone-meta_attributes-target-role"
++                            />
++                        </meta_attributes>
++                        <group id="E">
++                            <primitive id="E1" />
++                            <primitive id="E2" />
++                        </group>
++                    </clone>
++                    <master id="F-master">
++                        <meta_attributes id="F-master-meta_attributes">
++                            <nvpair name="target-role" value="Stopped"
++                                id="F-master-meta_attributes-target-role"
++                            />
++                        </meta_attributes>
++                        <group id="F">
++                            <primitive id="F1" />
++                            <primitive id="F2" />
++                        </group>
++                    </master>
++                    <bundle id="G-bundle" />
++                    <bundle id="H-bundle">
++                        <meta_attributes id="H-bundle-meta_attributes">
++                            <nvpair name="target-role" value="Stopped"
++                                id="H-bundle-meta_attributes-target-role"
++                            />
++                        </meta_attributes>
++                        <primitive id="H" />
++                    </bundle>
++                </resources>
++            """
++        )
++        self.env_assist.assert_raise_library_error(
++            lambda: resource.disable_safe(
++                self.env_assist.get_env(),
++                ["B-clone", "C-master", "D", "E-clone", "F-master", "H-bundle"],
++                self.strict,
++                False,
++            ),
++            [
++                fixture.error(
++                    report_codes.RESOURCE_DISABLE_AFFECTS_OTHER_RESOURCES,
++                    disabled_resource_list=[
++                        "B-clone", "C-master", "D", "E-clone", "F-master",
++                        "H-bundle"
++                    ],
++                    affected_resource_list=["A"],
++                    crm_simulate_plaintext_output="simulate output",
++                ),
++            ],
++            expected_in_processor=False
++        )
++
+ @mock.patch("pcs.lib.pacemaker.live.write_tmpfile")
+ class DisableSafe(DisableSafeMixin, TestCase):
+     strict = False
+diff --git a/pcs_test/tier0/lib/pacemaker/test_live.py b/pcs_test/tier0/lib/pacemaker/test_live.py
+index dfebcb17..1ea5454e 100644
+--- a/pcs_test/tier0/lib/pacemaker/test_live.py
++++ b/pcs_test/tier0/lib/pacemaker/test_live.py
+@@ -686,7 +686,6 @@ class SimulateCibXml(LibraryPacemakerTest):
+             fixture.error(
+                 report_codes.CIB_SIMULATE_ERROR,
+                 reason="some error",
+-                cib="<cib />",
+             ),
+         )
+         mock_runner.run.assert_not_called()
+@@ -703,7 +702,6 @@ class SimulateCibXml(LibraryPacemakerTest):
+             fixture.error(
+                 report_codes.CIB_SIMULATE_ERROR,
+                 reason="some error",
+-                cib="<cib />",
+             ),
+         )
+         mock_runner.run.assert_not_called()
+@@ -729,7 +727,6 @@ class SimulateCibXml(LibraryPacemakerTest):
+             fixture.error(
+                 report_codes.CIB_SIMULATE_ERROR,
+                 reason="some error",
+-                cib="<cib />",
+             ),
+         )
+ 
+@@ -755,7 +752,6 @@ class SimulateCibXml(LibraryPacemakerTest):
+             fixture.error(
+                 report_codes.CIB_SIMULATE_ERROR,
+                 reason="some error",
+-                cib="<cib />",
+             ),
+         )
+ 
+@@ -782,7 +778,6 @@ class SimulateCibXml(LibraryPacemakerTest):
+             fixture.error(
+                 report_codes.CIB_SIMULATE_ERROR,
+                 reason="some error",
+-                cib="<cib />",
+             ),
+         )
+ 
+@@ -819,7 +814,6 @@ class SimulateCib(TestCase):
+                     "Start tag expected, '<' not found, line 1, column 1 "
+                     "(<string>, line 1)"
+                 ),
+-                cib=self.cib_xml,
+             ),
+         )
+ 
+@@ -835,7 +829,6 @@ class SimulateCib(TestCase):
+                     "Start tag expected, '<' not found, line 1, column 1 "
+                     "(<string>, line 1)"
+                 ),
+-                cib=self.cib_xml,
+             ),
+         )
+ 
+-- 
+2.21.1
+
diff --git a/SOURCES/bz1783106-01-fix-sinatra-wrapper-performance-issue.patch b/SOURCES/bz1783106-01-fix-sinatra-wrapper-performance-issue.patch
new file mode 100644
index 0000000..d79e1a2
--- /dev/null
+++ b/SOURCES/bz1783106-01-fix-sinatra-wrapper-performance-issue.patch
@@ -0,0 +1,1295 @@
+From d54c102cee7a61dd3eccd62d60af218aa97a85fc Mon Sep 17 00:00:00 2001
+From: Ivan Devat <idevat@redhat.com>
+Date: Thu, 9 Jan 2020 15:53:37 +0100
+Subject: [PATCH 6/7] squash bz1783106 fix-sinatra-wrapper-performance-issue
+
+create prototype of tornado - thin communication
+
+put socket path to settings
+
+don't mix logs from threads in ruby daemon
+
+run ruby daemon via systemd units
+
+support trailing slash by gui urls e.g. /manage/
+
+decode body from ruby response for log
+
+configure ruby wrapper by socket path
+
+remove env values not used for ruby calls any more
+
+deal with ruby daemon communication issues
+
+fix tests
+
+cleanup ruby server code
+
+deal with errors from ruby daemon in python daemon
+
+remove unused cmdline wrapper
+
+add ruby daemon infrastructure to spec etc.
+
+stop logging to stderr from ruby daemon
+
+fix spec file
+
+* add missing cp for new rubygems
+* make sure to start the new ruby daemon on package upgrade
+* tests: give the new daemon enough time to start
+---
+ .gitlab-ci.yml                            |   7 +-
+ Makefile                                  |   6 +
+ pcs.spec.in                               |  30 ++++-
+ pcs/daemon/app/sinatra_ui.py              |   2 +-
+ pcs/daemon/env.py                         |  36 ------
+ pcs/daemon/ruby_pcsd.py                   | 136 +++++++++++-----------
+ pcs/daemon/run.py                         |   8 +-
+ pcs/settings_default.py                   |   1 +
+ pcs_test/tier0/daemon/app/fixtures_app.py |   3 +-
+ pcs_test/tier0/daemon/test_env.py         |  66 +----------
+ pcs_test/tier0/daemon/test_ruby_pcsd.py   |  13 +--
+ pcsd/Gemfile                              |   1 +
+ pcsd/Gemfile.lock                         |   7 ++
+ pcsd/Makefile                             |   3 +
+ pcsd/bootstrap.rb                         |  20 +++-
+ pcsd/cfgsync.rb                           |   6 +-
+ pcsd/pcs.rb                               |   9 +-
+ pcsd/pcsd-cli.rb                          |   3 +-
+ pcsd/pcsd-ruby.service                    |  20 ++++
+ pcsd/pcsd.conf                            |   4 +
+ pcsd/pcsd.rb                              |  31 ++---
+ pcsd/pcsd.service                         |   2 +
+ pcsd/pcsd.service-runner                  |  24 ++++
+ pcsd/remote.rb                            |   6 +-
+ pcsd/rserver.rb                           |  98 ++++++++++++++++
+ pcsd/settings.rb                          |   1 +
+ pcsd/settings.rb.debian                   |   1 +
+ pcsd/sinatra_cmdline_wrapper.rb           |  63 ----------
+ 28 files changed, 330 insertions(+), 277 deletions(-)
+ create mode 100644 pcsd/pcsd-ruby.service
+ create mode 100644 pcsd/pcsd.service-runner
+ create mode 100644 pcsd/rserver.rb
+ delete mode 100644 pcsd/sinatra_cmdline_wrapper.rb
+
+diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
+index 23ab56a9..92b32033 100644
+--- a/.gitlab-ci.yml
++++ b/.gitlab-ci.yml
+@@ -116,8 +116,11 @@ python_smoke_tests:
+         procps-ng
+         rpms/pcs-ci-*.rpm
+       "
+-    - /usr/sbin/pcsd & # start pcsd
+-    - sleep 10 # wait for pcsd to start up properly
++    - export GEM_HOME=/usr/lib/pcsd/vendor/bundle/ruby
++    - /usr/lib/pcsd/pcsd & # start pcsd (ruby - thin)
++    - sleep 10 # wait for pcsd (ruby - thin) to start up properly
++    - /usr/sbin/pcsd & # start pcsd (python - tornado)
++    - sleep 10 # wait for pcsd (python - tornado) to start up properly
+     - pcs_test/smoke.sh
+   artifacts:
+     paths:
+diff --git a/Makefile b/Makefile
+index f2b0d9b9..b9f64acd 100644
+--- a/Makefile
++++ b/Makefile
+@@ -267,7 +267,12 @@ ifeq ($(IS_DEBIAN)$(IS_SYSTEMCTL),truefalse)
+ else
+ 	install -d ${DEST_SYSTEMD_SYSTEM}
+ 	install -m 644 ${SYSTEMD_SERVICE_FILE} ${DEST_SYSTEMD_SYSTEM}/pcsd.service
++	install -m 644 pcsd/pcsd-ruby.service ${DEST_SYSTEMD_SYSTEM}/pcsd-ruby.service
+ endif
++	# ${DEST_LIB}/pcsd/pcsd holds the selinux context
++	install -m 755 pcsd/pcsd.service-runner ${DEST_LIB}/pcsd/pcsd
++	rm ${DEST_LIB}/pcsd/pcsd.service-runner
++	
+ 	install -m 700 -d ${DESTDIR}/var/lib/pcsd
+ 	install -m 644 -D pcsd/pcsd.logrotate ${DESTDIR}/etc/logrotate.d/pcsd
+ 	install -m644 -D pcsd/pcsd.8 ${DEST_MAN}/pcsd.8
+@@ -293,6 +298,7 @@ ifeq ($(IS_DEBIAN)$(IS_SYSTEMCTL),truefalse)
+ 	rm -f ${DEST_INIT}/pcsd
+ else
+ 	rm -f ${DEST_SYSTEMD_SYSTEM}/pcsd.service
++	rm -f ${DEST_SYSTEMD_SYSTEM}/pcsd-ruby.service
+ 	rm -f ${DEST_SYSTEMD_SYSTEM}/pcs_snmp_agent.service
+ endif
+ 	rm -f ${DESTDIR}/etc/pam.d/pcsd
+diff --git a/pcs.spec.in b/pcs.spec.in
+index 5195dc51..32fbf614 100644
+--- a/pcs.spec.in
++++ b/pcs.spec.in
+@@ -28,7 +28,9 @@ Summary: Pacemaker Configuration System
+ %global pyagentx_version   0.4.pcs.2
+ %global tornado_version    6.0.3
+ %global version_rubygem_backports  3.11.4
++%global version_rubygem_daemons  1.3.1
+ %global version_rubygem_ethon  0.11.0
++%global version_rubygem_eventmachine  1.2.7
+ %global version_rubygem_ffi  1.9.25
+ %global version_rubygem_json  2.1.0
+ %global version_rubygem_mustermann  1.0.3
+@@ -37,6 +39,7 @@ Summary: Pacemaker Configuration System
+ %global version_rubygem_rack_protection  2.0.4
+ %global version_rubygem_rack_test  1.0.0
+ %global version_rubygem_sinatra  2.0.4
++%global version_rubygem_thin  1.7.2
+ %global version_rubygem_tilt  2.0.9
+ 
+ # We do not use _libdir macro because upstream is not prepared for it.
+@@ -83,6 +86,9 @@ Source89: https://rubygems.org/downloads/rack-protection-%{version_rubygem_rack_
+ Source90: https://rubygems.org/downloads/rack-test-%{version_rubygem_rack_test}.gem
+ Source91: https://rubygems.org/downloads/sinatra-%{version_rubygem_sinatra}.gem
+ Source92: https://rubygems.org/downloads/tilt-%{version_rubygem_tilt}.gem
++Source93: https://rubygems.org/downloads/eventmachine-%{version_rubygem_eventmachine}.gem
++Source94: https://rubygems.org/downloads/daemons-%{version_rubygem_daemons}.gem
++Source95: https://rubygems.org/downloads/thin-%{version_rubygem_thin}.gem
+ 
+ Source100: https://github.com/idevat/pcs-web-ui/archive/%{ui_commit}/%{ui_src_name}.tar.gz
+ Source101: https://github.com/idevat/pcs-web-ui/releases/download/%{ui_commit}/pcs-web-ui-node-modules-%{ui_commit}.tar.xz
+@@ -164,7 +170,9 @@ Recommends: overpass-fonts
+ 
+ Provides: bundled(tornado) = %{tornado_version}
+ Provides: bundled(backports) = %{version_rubygem_backports}
++Provides: bundled(daemons) = %{version_rubygem_daemons}
+ Provides: bundled(ethon) = %{version_rubygem_ethon}
++Provides: bundled(eventmachine) = %{version_rubygem_eventmachine}
+ Provides: bundled(ffi) = %{version_rubygem_ffi}
+ Provides: bundled(json) = %{version_rubygem_json}
+ Provides: bundled(mustermann) = %{version_rubygem_mustermann}
+@@ -173,6 +181,7 @@ Provides: bundled(rack) = %{version_rubygem_rack}
+ Provides: bundled(rack) = %{version_rubygem_rack_protection}
+ Provides: bundled(rack) = %{version_rubygem_rack_test}
+ Provides: bundled(sinatra) = %{version_rubygem_sinatra}
++Provides: bundled(thin) = %{version_rubygem_thin}
+ Provides: bundled(tilt) = %{version_rubygem_tilt}
+ 
+ %description
+@@ -228,6 +237,9 @@ cp -f %SOURCE89 pcsd/vendor/cache
+ cp -f %SOURCE90 pcsd/vendor/cache
+ cp -f %SOURCE91 pcsd/vendor/cache
+ cp -f %SOURCE92 pcsd/vendor/cache
++cp -f %SOURCE93 pcsd/vendor/cache
++cp -f %SOURCE94 pcsd/vendor/cache
++cp -f %SOURCE95 pcsd/vendor/cache
+ 
+ 
+ # 3) dir for python bundles
+@@ -262,15 +274,18 @@ gem install \
+   --force --verbose -l --no-user-install %{gem_install_params} \
+   -i %{rubygem_bundle_dir} \
+   %{rubygem_cache_dir}/backports-%{version_rubygem_backports}.gem \
++  %{rubygem_cache_dir}/daemons-%{version_rubygem_daemons}.gem \
+   %{rubygem_cache_dir}/ethon-%{version_rubygem_ethon}.gem \
++  %{rubygem_cache_dir}/eventmachine-%{version_rubygem_eventmachine}.gem \
+   %{rubygem_cache_dir}/ffi-%{version_rubygem_ffi}.gem \
+   %{rubygem_cache_dir}/json-%{version_rubygem_json}.gem \
+   %{rubygem_cache_dir}/mustermann-%{version_rubygem_mustermann}.gem \
+   %{rubygem_cache_dir}/open4-%{version_rubygem_open4}.gem \
+-  %{rubygem_cache_dir}/rack-%{version_rubygem_rack}.gem \
+   %{rubygem_cache_dir}/rack-protection-%{version_rubygem_rack_protection}.gem \
+   %{rubygem_cache_dir}/rack-test-%{version_rubygem_rack_test}.gem \
++  %{rubygem_cache_dir}/rack-%{version_rubygem_rack}.gem \
+   %{rubygem_cache_dir}/sinatra-%{version_rubygem_sinatra}.gem \
++  %{rubygem_cache_dir}/thin-%{version_rubygem_thin}.gem \
+   %{rubygem_cache_dir}/tilt-%{version_rubygem_tilt}.gem \
+   -- '--with-ldflags=-Wl,-z,relro -Wl,-z,ibt -Wl,-z,now -Wl,--gc-sections' \
+      '--with-cflags=-O2 -ffunction-sections'
+@@ -324,20 +339,31 @@ rm -r -v ${pcsd_dir}/test
+ # remove javascript testing files
+ rm -r -v ${pcsd_dir}/public/js/dev
+ 
++%posttrans
++# Make sure the new version of the daemon is runnning.
++# Also, make sure to start pcsd-ruby if it hasn't been started or even
++# installed before. This is done by restarting pcsd.service.
++%{_bindir}/systemctl daemon-reload
++%{_bindir}/systemctl try-restart pcsd.service
++
++
+ %post
+ %systemd_post pcsd.service
++%systemd_post pcsd-ruby.service
+ 
+ %post -n %{pcs_snmp_pkg_name}
+ %systemd_post pcs_snmp_agent.service
+ 
+ %preun
+ %systemd_preun pcsd.service
++%systemd_preun pcsd-ruby.service
+ 
+ %preun -n %{pcs_snmp_pkg_name}
+ %systemd_preun pcs_snmp_agent.service
+ 
+ %postun
+ %systemd_postun_with_restart pcsd.service
++%systemd_postun_with_restart pcsd-ruby.service
+ 
+ %postun -n %{pcs_snmp_pkg_name}
+ %systemd_postun_with_restart pcs_snmp_agent.service
+@@ -357,6 +383,7 @@ rm -r -v ${pcsd_dir}/public/js/dev
+ %{pcs_libdir}/pcsd/.bundle/config
+ %{pcs_libdir}/pcs/bundled/packages/tornado*
+ %{_unitdir}/pcsd.service
++%{_unitdir}/pcsd-ruby.service
+ %{_datadir}/bash-completion/completions/pcs
+ %{_sharedstatedir}/pcsd
+ %{_sysconfdir}/pam.d/pcsd
+@@ -374,6 +401,7 @@ rm -r -v ${pcsd_dir}/public/js/dev
+ %{_mandir}/man8/pcsd.*
+ %exclude %{pcs_libdir}/pcsd/*.debian
+ %exclude %{pcs_libdir}/pcsd/pcsd.service
++%exclude %{pcs_libdir}/pcsd/pcsd-ruby.service
+ %exclude %{pcs_libdir}/pcsd/pcsd.conf
+ %exclude %{pcs_libdir}/pcsd/pcsd.8
+ %exclude %{pcs_libdir}/pcsd/public/js/dev/*
+diff --git a/pcs/daemon/app/sinatra_ui.py b/pcs/daemon/app/sinatra_ui.py
+index 1348134d..5315a48f 100644
+--- a/pcs/daemon/app/sinatra_ui.py
++++ b/pcs/daemon/app/sinatra_ui.py
+@@ -153,7 +153,7 @@ def get_routes(
+         # The protection by session was moved from ruby code to python code
+         # (tornado).
+         (
+-            r"/($|manage$|permissions$|managec/.+/main)",
++            r"/($|manage/?$|permissions/?$|managec/.+/main)",
+             SinatraGuiProtected,
+             {**sessions, **ruby_wrapper}
+         ),
+diff --git a/pcs/daemon/env.py b/pcs/daemon/env.py
+index 54a9819f..26cdcf9b 100644
+--- a/pcs/daemon/env.py
++++ b/pcs/daemon/env.py
+@@ -15,7 +15,6 @@ from pcs.lib.validate import is_port_number
+ # Relative location instead of system location is used for development purposes.
+ PCSD_LOCAL_DIR = realpath(dirname(abspath(__file__)) + "/../../pcsd")
+ 
+-PCSD_CMDLINE_ENTRY_RB_SCRIPT = "sinatra_cmdline_wrapper.rb"
+ PCSD_STATIC_FILES_DIR_NAME = "public"
+ 
+ PCSD_PORT = "PCSD_PORT"
+@@ -26,12 +25,8 @@ NOTIFY_SOCKET = "NOTIFY_SOCKET"
+ PCSD_DEBUG = "PCSD_DEBUG"
+ PCSD_DISABLE_GUI = "PCSD_DISABLE_GUI"
+ PCSD_SESSION_LIFETIME = "PCSD_SESSION_LIFETIME"
+-GEM_HOME = "GEM_HOME"
+ PCSD_DEV = "PCSD_DEV"
+-PCSD_CMDLINE_ENTRY = "PCSD_CMDLINE_ENTRY"
+ PCSD_STATIC_FILES_DIR = "PCSD_STATIC_FILES_DIR"
+-HTTPS_PROXY = "HTTPS_PROXY"
+-NO_PROXY = "NO_PROXY"
+ 
+ Env = namedtuple("Env", [
+     PCSD_PORT,
+@@ -42,11 +37,7 @@ Env = namedtuple("Env", [
+     PCSD_DEBUG,
+     PCSD_DISABLE_GUI,
+     PCSD_SESSION_LIFETIME,
+-    GEM_HOME,
+-    PCSD_CMDLINE_ENTRY,
+     PCSD_STATIC_FILES_DIR,
+-    HTTPS_PROXY,
+-    NO_PROXY,
+     PCSD_DEV,
+     "has_errors",
+ ])
+@@ -62,11 +53,7 @@ def prepare_env(environ, logger=None):
+         loader.pcsd_debug(),
+         loader.pcsd_disable_gui(),
+         loader.session_lifetime(),
+-        loader.gem_home(),
+-        loader.pcsd_cmdline_entry(),
+         loader.pcsd_static_files_dir(),
+-        loader.https_proxy(),
+-        loader.no_proxy(),
+         loader.pcsd_dev(),
+         loader.has_errors(),
+     )
+@@ -173,20 +160,6 @@ class EnvLoader:
+     def pcsd_debug(self):
+         return self.__has_true_in_environ(PCSD_DEBUG)
+ 
+-    def gem_home(self):
+-        if settings.pcsd_gem_path is None:
+-            return None
+-        return self.__in_pcsd_path(
+-            settings.pcsd_gem_path,
+-            "Ruby gem location"
+-        )
+-
+-    def pcsd_cmdline_entry(self):
+-        return self.__in_pcsd_path(
+-            PCSD_CMDLINE_ENTRY_RB_SCRIPT,
+-            "Ruby handlers entrypoint"
+-        )
+-
+     def pcsd_static_files_dir(self):
+         return self.__in_pcsd_path(
+             PCSD_STATIC_FILES_DIR_NAME,
+@@ -194,15 +167,6 @@ class EnvLoader:
+             existence_required=not self.pcsd_disable_gui()
+         )
+ 
+-    def https_proxy(self):
+-        for key in ["https_proxy", HTTPS_PROXY, "all_proxy", "ALL_PROXY"]:
+-            if key in self.environ:
+-                return self.environ[key]
+-        return None
+-
+-    def no_proxy(self):
+-        return self.environ.get("no_proxy", self.environ.get(NO_PROXY, None))
+-
+     @lru_cache()
+     def pcsd_dev(self):
+         return self.__has_true_in_environ(PCSD_DEV)
+diff --git a/pcs/daemon/ruby_pcsd.py b/pcs/daemon/ruby_pcsd.py
+index 5bdaffeb..e612f8da 100644
+--- a/pcs/daemon/ruby_pcsd.py
++++ b/pcs/daemon/ruby_pcsd.py
+@@ -1,14 +1,16 @@
+ import json
+ import logging
+-import os.path
+-from base64 import b64decode
++from base64 import b64decode, b64encode, binascii
+ from collections import namedtuple
+ from time import time as now
+ 
+-from tornado.gen import multi, convert_yielded
++import pycurl
++from tornado.gen import convert_yielded
+ from tornado.web import HTTPError
+ from tornado.httputil import split_host_and_port, HTTPServerRequest
+-from tornado.process import Subprocess
++from tornado.httpclient import AsyncHTTPClient
++from tornado.curl_httpclient import CurlError
++
+ 
+ from pcs.daemon import log
+ 
+@@ -33,7 +35,7 @@ class SinatraResult(namedtuple("SinatraResult", "headers, status, body")):
+         return cls(
+             response["headers"],
+             response["status"],
+-            b64decode(response["body"])
++            response["body"]
+         )
+ 
+ def log_group_id_generator():
+@@ -58,24 +60,12 @@ def process_response_logs(rb_log_list):
+             group_id=group_id
+         )
+ 
+-def log_communication(request_json, stdout, stderr):
+-    log.pcsd.debug("Request for ruby pcsd wrapper: '%s'", request_json)
+-    log.pcsd.debug("Response stdout from ruby pcsd wrapper: '%s'", stdout)
+-    log.pcsd.debug("Response stderr from ruby pcsd wrapper: '%s'", stderr)
+-
+ class Wrapper:
+-    # pylint: disable=too-many-instance-attributes
+-    def __init__(
+-        self, pcsd_cmdline_entry, gem_home=None, debug=False,
+-        ruby_executable="ruby", https_proxy=None, no_proxy=None
+-    ):
+-        self.__gem_home = gem_home
+-        self.__pcsd_cmdline_entry = pcsd_cmdline_entry
+-        self.__pcsd_dir = os.path.dirname(pcsd_cmdline_entry)
+-        self.__ruby_executable = ruby_executable
++    def __init__(self, pcsd_ruby_socket, debug=False):
+         self.__debug = debug
+-        self.__https_proxy = https_proxy
+-        self.__no_proxy = no_proxy
++        AsyncHTTPClient.configure('tornado.curl_httpclient.CurlAsyncHTTPClient')
++        self.__client = AsyncHTTPClient()
++        self.__pcsd_ruby_socket = pcsd_ruby_socket
+ 
+     @staticmethod
+     def get_sinatra_request(request: HTTPServerRequest):
+@@ -102,55 +92,76 @@ class Wrapper:
+             "rack.input": request.body.decode("utf8"),
+         }}
+ 
++    def prepare_curl_callback(self, curl):
++        curl.setopt(pycurl.UNIX_SOCKET_PATH, self.__pcsd_ruby_socket)
++        curl.setopt(pycurl.TIMEOUT, 70)
++
+     async def send_to_ruby(self, request_json):
+-        env = {
+-            "PCSD_DEBUG": "true" if self.__debug else "false"
+-        }
+-        if self.__gem_home is not None:
+-            env["GEM_HOME"] = self.__gem_home
+-
+-        if self.__no_proxy is not None:
+-            env["NO_PROXY"] = self.__no_proxy
+-        if self.__https_proxy is not None:
+-            env["HTTPS_PROXY"] = self.__https_proxy
+-
+-        pcsd_ruby = Subprocess(
+-            [
+-                self.__ruby_executable, "-I",
+-                self.__pcsd_dir,
+-                self.__pcsd_cmdline_entry
+-            ],
+-            stdin=Subprocess.STREAM,
+-            stdout=Subprocess.STREAM,
+-            stderr=Subprocess.STREAM,
+-            env=env
+-        )
+-        await pcsd_ruby.stdin.write(str.encode(request_json))
+-        pcsd_ruby.stdin.close()
+-        return await multi([
+-            pcsd_ruby.stdout.read_until_close(),
+-            pcsd_ruby.stderr.read_until_close(),
+-            pcsd_ruby.wait_for_exit(raise_error=False),
+-        ])
++        # We do not need location for communication with ruby itself since we
++        # communicate via unix socket. But it is required by AsyncHTTPClient so
++        # "localhost" is used.
++        tornado_request = b64encode(request_json.encode()).decode()
++        return (await self.__client.fetch(
++            "localhost",
++            method="POST",
++            body=f"TORNADO_REQUEST={tornado_request}",
++            prepare_curl_callback=self.prepare_curl_callback,
++        )).body
+ 
+     async def run_ruby(self, request_type, request=None):
++        """
++        request_type: SINATRA_GUI|SINATRA_REMOTE|SYNC_CONFIGS
++        request: result of get_sinatra_request|None
++            i.e. it has structure returned by get_sinatra_request if the request
++            is not None - so we can get SERVER_NAME and  SERVER_PORT
++        """
+         request = request or {}
+         request.update({"type": request_type})
+         request_json = json.dumps(request)
+-        stdout, stderr, dummy_status = await self.send_to_ruby(request_json)
++
++        if self.__debug:
++            log.pcsd.debug("Ruby daemon request: '%s'", request_json)
+         try:
+-            response = json.loads(stdout)
+-        except json.JSONDecodeError as e:
+-            self.__log_bad_response(
+-                f"Cannot decode json from ruby pcsd wrapper: '{e}'",
+-                request_json, stdout, stderr
++            ruby_response = await self.send_to_ruby(request_json)
++        except CurlError as e:
++            log.pcsd.error(
++                "Cannot connect to ruby daemon (message: '%s'). Is it running?",
++                e
+             )
+             raise HTTPError(500)
+-        else:
+-            if self.__debug:
+-                log_communication(request_json, stdout, stderr)
+-            process_response_logs(response["logs"])
++
++        try:
++            response = json.loads(ruby_response)
++            if "error" in response:
++                log.pcsd.error(
++                    "Ruby daemon response contains an error: '%s'",
++                    json.dumps(response)
++                )
++                raise HTTPError(500)
++
++            logs = response.pop("logs", [])
++            if "body" in response:
++                body = b64decode(response.pop("body"))
++                if self.__debug:
++                    log.pcsd.debug(
++                        "Ruby daemon response (without logs and body): '%s'",
++                        json.dumps(response)
++                    )
++                    log.pcsd.debug("Ruby daemon response body: '%s'", body)
++                response["body"] = body
++
++            elif self.__debug:
++                log.pcsd.debug(
++                    "Ruby daemon response (without logs): '%s'",
++                    json.dumps(response)
++                )
++            process_response_logs(logs)
+             return response
++        except (json.JSONDecodeError, binascii.Error) as e:
++            if self.__debug:
++                log.pcsd.debug("Ruby daemon response: '%s'", ruby_response)
++            log.pcsd.error("Cannot decode json from ruby pcsd wrapper: '%s'", e)
++            raise HTTPError(500)
+ 
+     async def request_gui(
+         self, request: HTTPServerRequest, user, groups, is_authenticated
+@@ -186,8 +197,3 @@ class Wrapper:
+         except HTTPError:
+             log.pcsd.error("Config synchronization failed")
+             return int(now()) + DEFAULT_SYNC_CONFIG_DELAY
+-
+-    def __log_bad_response(self, error_message, request_json, stdout, stderr):
+-        log.pcsd.error(error_message)
+-        if self.__debug:
+-            log_communication(request_json, stdout, stderr)
+diff --git a/pcs/daemon/run.py b/pcs/daemon/run.py
+index bafd9f3c..874ee2f1 100644
+--- a/pcs/daemon/run.py
++++ b/pcs/daemon/run.py
+@@ -65,6 +65,8 @@ def configure_app(
+                 # old web ui by default
+                 [(r"/", RedirectHandler, dict(url="/manage"))]
+                 +
++                [(r"/ui", RedirectHandler, dict(url="/ui/"))]
++                +
+                 ui.get_routes(
+                     url_prefix="/ui/",
+                     app_dir=os.path.join(public_dir, "ui"),
+@@ -101,12 +103,8 @@ def main():
+ 
+     sync_config_lock = Lock()
+     ruby_pcsd_wrapper = ruby_pcsd.Wrapper(
+-        pcsd_cmdline_entry=env.PCSD_CMDLINE_ENTRY,
+-        gem_home=env.GEM_HOME,
++        settings.pcsd_ruby_socket,
+         debug=env.PCSD_DEBUG,
+-        ruby_executable=settings.ruby_executable,
+-        https_proxy=env.HTTPS_PROXY,
+-        no_proxy=env.NO_PROXY,
+     )
+     make_app = configure_app(
+         session.Storage(env.PCSD_SESSION_LIFETIME),
+diff --git a/pcs/settings_default.py b/pcs/settings_default.py
+index 6d8f33ac..f761ce43 100644
+--- a/pcs/settings_default.py
++++ b/pcs/settings_default.py
+@@ -43,6 +43,7 @@ cibadmin = os.path.join(pacemaker_binaries, "cibadmin")
+ crm_mon_schema = '/usr/share/pacemaker/crm_mon.rng'
+ agent_metadata_schema = "/usr/share/resource-agents/ra-api-1.dtd"
+ pcsd_var_location = "/var/lib/pcsd/"
++pcsd_ruby_socket = "/run/pcsd-ruby.socket"
+ pcsd_cert_location = os.path.join(pcsd_var_location, "pcsd.crt")
+ pcsd_key_location = os.path.join(pcsd_var_location, "pcsd.key")
+ pcsd_known_hosts_location = os.path.join(pcsd_var_location, "known-hosts")
+diff --git a/pcs_test/tier0/daemon/app/fixtures_app.py b/pcs_test/tier0/daemon/app/fixtures_app.py
+index 2e4feba4..8d5b8f4c 100644
+--- a/pcs_test/tier0/daemon/app/fixtures_app.py
++++ b/pcs_test/tier0/daemon/app/fixtures_app.py
+@@ -1,4 +1,3 @@
+-from base64 import b64encode
+ from pprint import pformat
+ from urllib.parse import urlencode
+ 
+@@ -30,7 +29,7 @@ class RubyPcsdWrapper(ruby_pcsd.Wrapper):
+         return {
+             "headers": self.headers,
+             "status": self.status_code,
+-            "body": b64encode(self.body),
++            "body": self.body,
+         }
+ 
+ class AppTest(AsyncHTTPTestCase):
+diff --git a/pcs_test/tier0/daemon/test_env.py b/pcs_test/tier0/daemon/test_env.py
+index 9e78eafd..e2f7f5b1 100644
+--- a/pcs_test/tier0/daemon/test_env.py
++++ b/pcs_test/tier0/daemon/test_env.py
+@@ -41,11 +41,7 @@ class Prepare(TestCase, create_setup_patch_mixin(env)):
+             env.PCSD_DEBUG: False,
+             env.PCSD_DISABLE_GUI: False,
+             env.PCSD_SESSION_LIFETIME: settings.gui_session_lifetime_seconds,
+-            env.GEM_HOME: pcsd_dir(settings.pcsd_gem_path),
+-            env.PCSD_CMDLINE_ENTRY: pcsd_dir(env.PCSD_CMDLINE_ENTRY_RB_SCRIPT),
+             env.PCSD_STATIC_FILES_DIR: pcsd_dir(env.PCSD_STATIC_FILES_DIR_NAME),
+-            env.HTTPS_PROXY: None,
+-            env.NO_PROXY: None,
+             env.PCSD_DEV: False,
+             "has_errors": False,
+         }
+@@ -77,8 +73,6 @@ class Prepare(TestCase, create_setup_patch_mixin(env)):
+             env.PCSD_DISABLE_GUI: "true",
+             env.PCSD_SESSION_LIFETIME: str(session_lifetime),
+             env.PCSD_DEV: "true",
+-            env.HTTPS_PROXY: "proxy1",
+-            env.NO_PROXY: "host",
+             env.PCSD_DEV: "true",
+         }
+         self.assert_environ_produces_modified_pcsd_env(
+@@ -92,15 +86,9 @@ class Prepare(TestCase, create_setup_patch_mixin(env)):
+                 env.PCSD_DEBUG: True,
+                 env.PCSD_DISABLE_GUI: True,
+                 env.PCSD_SESSION_LIFETIME: session_lifetime,
+-                env.GEM_HOME: pcsd_dir(settings.pcsd_gem_path),
+-                env.PCSD_CMDLINE_ENTRY: pcsd_dir(
+-                    env.PCSD_CMDLINE_ENTRY_RB_SCRIPT
+-                ),
+                 env.PCSD_STATIC_FILES_DIR: pcsd_dir(
+                     env.PCSD_STATIC_FILES_DIR_NAME
+                 ),
+-                env.HTTPS_PROXY: environ[env.HTTPS_PROXY],
+-                env.NO_PROXY: environ[env.NO_PROXY],
+                 env.PCSD_DEV: True,
+             },
+         )
+@@ -167,13 +155,6 @@ class Prepare(TestCase, create_setup_patch_mixin(env)):
+         self.assert_environ_produces_modified_pcsd_env(
+             specific_env_values={"has_errors": True},
+             errors=[
+-                f"Ruby gem location '{pcsd_dir(settings.pcsd_gem_path)}'"
+-                    " does not exist"
+-                ,
+-                "Ruby handlers entrypoint"
+-                    f" '{pcsd_dir(env.PCSD_CMDLINE_ENTRY_RB_SCRIPT)}'"
+-                    " does not exist"
+-                ,
+                 "Directory with web UI assets"
+                     f" '{pcsd_dir(env.PCSD_STATIC_FILES_DIR_NAME)}'"
+                     " does not exist"
+@@ -181,54 +162,13 @@ class Prepare(TestCase, create_setup_patch_mixin(env)):
+             ]
+         )
+ 
+-    def test_errors_on_missing_paths_disabled_gui(self):
++    def test_no_errors_on_missing_paths_disabled_gui(self):
+         self.path_exists.return_value = False
+-        pcsd_dir = partial(join_path, settings.pcsd_exec_location)
+         self.assert_environ_produces_modified_pcsd_env(
+             environ={env.PCSD_DISABLE_GUI: "true"},
+             specific_env_values={
+                 env.PCSD_DISABLE_GUI: True,
+-                "has_errors": True,
++                "has_errors": False,
+             },
+-            errors=[
+-                f"Ruby gem location '{pcsd_dir(settings.pcsd_gem_path)}'"
+-                    " does not exist"
+-                ,
+-                "Ruby handlers entrypoint"
+-                    f" '{pcsd_dir(env.PCSD_CMDLINE_ENTRY_RB_SCRIPT)}'"
+-                    " does not exist"
+-                ,
+-            ]
++            errors=[]
+         )
+-
+-    def test_lower_case_no_proxy_has_precedence(self):
+-        def it_selects(proxy_value):
+-            self.assert_environ_produces_modified_pcsd_env(
+-                environ=environ,
+-                specific_env_values={env.NO_PROXY: proxy_value}
+-            )
+-
+-        environ = {"NO_PROXY": "no_proxy_1"}
+-        it_selects("no_proxy_1")
+-
+-        environ["no_proxy"] = "no_proxy_2"
+-        it_selects("no_proxy_2")
+-
+-    def test_http_proxy_is_setup_by_precedence(self):
+-        def it_selects(proxy_value):
+-            self.assert_environ_produces_modified_pcsd_env(
+-                environ=environ,
+-                specific_env_values={env.HTTPS_PROXY: proxy_value}
+-            )
+-
+-        environ = {"ALL_PROXY": "all_proxy_1"}
+-        it_selects("all_proxy_1")
+-
+-        environ["all_proxy"] = "all_proxy_2"
+-        it_selects("all_proxy_2")
+-
+-        environ["HTTPS_PROXY"] = "https_proxy_1"
+-        it_selects("https_proxy_1")
+-
+-        environ["https_proxy"] = "https_proxy_2"
+-        it_selects("https_proxy_2")
+diff --git a/pcs_test/tier0/daemon/test_ruby_pcsd.py b/pcs_test/tier0/daemon/test_ruby_pcsd.py
+index d7fd71a0..28f14c87 100644
+--- a/pcs_test/tier0/daemon/test_ruby_pcsd.py
++++ b/pcs_test/tier0/daemon/test_ruby_pcsd.py
+@@ -16,10 +16,7 @@ from pcs.daemon import ruby_pcsd
+ logging.getLogger("pcs.daemon").setLevel(logging.CRITICAL)
+ 
+ def create_wrapper():
+-    return ruby_pcsd.Wrapper(
+-        rc("/path/to/gem_home"),
+-        rc("/path/to/pcsd/cmdline/entry"),
+-    )
++    return ruby_pcsd.Wrapper(rc("/path/to/ruby_socket"))
+ 
+ def create_http_request():
+     return HTTPServerRequest(
+@@ -63,9 +60,7 @@ patch_ruby_pcsd = create_patcher(ruby_pcsd)
+ 
+ class RunRuby(AsyncTestCase):
+     def setUp(self):
+-        self.stdout = ""
+-        self.stderr = ""
+-        self.exit_status = 0
++        self.ruby_response = ""
+         self.request = self.create_request()
+         self.wrapper = create_wrapper()
+         patcher = mock.patch.object(
+@@ -79,14 +74,14 @@ class RunRuby(AsyncTestCase):
+ 
+     async def send_to_ruby(self, request_json):
+         self.assertEqual(json.loads(request_json), self.request)
+-        return self.stdout, self.stderr, self.exit_status
++        return self.ruby_response
+ 
+     @staticmethod
+     def create_request(_type=ruby_pcsd.SYNC_CONFIGS):
+         return {"type": _type}
+ 
+     def set_run_result(self, run_result):
+-        self.stdout = json.dumps({**run_result, "logs": []})
++        self.ruby_response = json.dumps({**run_result, "logs": []})
+ 
+     def assert_sinatra_result(self, result, headers, status, body):
+         self.assertEqual(result.headers, headers)
+diff --git a/pcsd/Gemfile b/pcsd/Gemfile
+index 27898f71..716991a6 100644
+--- a/pcsd/Gemfile
++++ b/pcsd/Gemfile
+@@ -10,3 +10,4 @@ gem 'json'
+ gem 'open4'
+ gem 'ffi'
+ gem 'ethon'
++gem 'thin'
+diff --git a/pcsd/Gemfile.lock b/pcsd/Gemfile.lock
+index 6f833888..c8b02a94 100644
+--- a/pcsd/Gemfile.lock
++++ b/pcsd/Gemfile.lock
+@@ -2,8 +2,10 @@ GEM
+   remote: https://rubygems.org/
+   specs:
+     backports (3.11.4)
++    daemons (1.3.1)
+     ethon (0.11.0)
+       ffi (>= 1.3.0)
++    eventmachine (1.2.7)
+     ffi (1.9.25)
+     json (2.1.0)
+     mustermann (1.0.3)
+@@ -18,6 +20,10 @@ GEM
+       rack (~> 2.0)
+       rack-protection (= 2.0.4)
+       tilt (~> 2.0)
++    thin (1.7.2)
++      daemons (~> 1.0, >= 1.0.9)
++      eventmachine (~> 1.0, >= 1.0.4)
++      rack (>= 1, < 3)
+     tilt (2.0.9)
+ 
+ PLATFORMS
+@@ -33,6 +39,7 @@ DEPENDENCIES
+   rack-protection
+   rack-test
+   sinatra
++  thin
+   tilt
+ 
+ BUNDLED WITH
+diff --git a/pcsd/Makefile b/pcsd/Makefile
+index 5fe3f3f3..5dde50e3 100644
+--- a/pcsd/Makefile
++++ b/pcsd/Makefile
+@@ -26,6 +26,9 @@ build_gems_without_bundler:
+ 	vendor/cache/rack-test-1.1.0.gem \
+ 	vendor/cache/sinatra-2.0.4.gem \
+ 	vendor/cache/tilt-2.0.9.gem \
++	vendor/cache/eventmachine-1.2.7.gem \
++	vendor/cache/daemons-1.3.1.gem \
++	vendor/cache/thin-1.7.2.gem \
+ 	-- '--with-ldflags="-Wl,-z,now -Wl,-z,relro"'
+ 
+ get_gems:
+diff --git a/pcsd/bootstrap.rb b/pcsd/bootstrap.rb
+index ec6b535c..fc9d9b8c 100644
+--- a/pcsd/bootstrap.rb
++++ b/pcsd/bootstrap.rb
+@@ -51,8 +51,23 @@ if not defined? $cur_node_name
+   $cur_node_name = `/bin/hostname`.chomp
+ end
+ 
+-def configure_logger(log_device)
+-  logger = Logger.new(log_device)
++def configure_logger()
++  logger = Logger.new(StringIO.new())
++  logger.formatter = proc {|severity, datetime, progname, msg|
++    if Thread.current.key?(:pcsd_logger_container)
++      Thread.current[:pcsd_logger_container] << {
++        :level => severity,
++        :timestamp_usec => (datetime.to_f * 1000000).to_i,
++        :message => msg,
++      }
++    else
++      STDERR.puts("#{datetime} #{progname} #{severity} #{msg}")
++    end
++  }
++  return logger
++end
++
++def early_log(logger)
+   if ENV['PCSD_DEBUG'] and ENV['PCSD_DEBUG'].downcase == "true" then
+     logger.level = Logger::DEBUG
+     logger.info "PCSD Debugging enabled"
+@@ -65,7 +80,6 @@ def configure_logger(log_device)
+   else
+     logger.debug "Detected systemd is not in use"
+   end
+-  return logger
+ end
+ 
+ def get_capabilities(logger)
+diff --git a/pcsd/cfgsync.rb b/pcsd/cfgsync.rb
+index 16bcfbdc..1cab512e 100644
+--- a/pcsd/cfgsync.rb
++++ b/pcsd/cfgsync.rb
+@@ -468,7 +468,8 @@ module Cfgsync
+       node_response = {}
+       threads = []
+       @nodes.each { |node|
+-        threads << Thread.new {
++        threads << Thread.new(Thread.current[:pcsd_logger_container]) { |logger|
++          Thread.current[:pcsd_logger_container] = logger
+           code, out = send_request_with_token(
+             @auth_user, node, 'set_configs', true, data, true, nil, 30,
+             @additional_known_hosts
+@@ -616,7 +617,8 @@ module Cfgsync
+       node_configs = {}
+       connected_to = {}
+       nodes.each { |node|
+-        threads << Thread.new {
++        threads << Thread.new(Thread.current[:pcsd_logger_container]) { |logger|
++          Thread.current[:pcsd_logger_container] = logger
+           code, out = send_request_with_token(
+             @auth_user, node, 'get_configs', false, data, true, nil, nil,
+             @additional_known_hosts
+diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb
+index 7b991ac0..9a0efb46 100644
+--- a/pcsd/pcs.rb
++++ b/pcsd/pcs.rb
+@@ -923,7 +923,8 @@ def is_auth_against_nodes(auth_user, node_names, timeout=10)
+   offline_nodes = []
+ 
+   node_names.uniq.each { |node_name|
+-    threads << Thread.new {
++    threads << Thread.new(Thread.current[:pcsd_logger_container]) { |logger|
++      Thread.current[:pcsd_logger_container] = logger
+       code, response = send_request_with_token(
+         auth_user, node_name, 'check_auth', false, {}, true, nil, timeout
+       )
+@@ -963,7 +964,8 @@ def pcs_auth(auth_user, nodes)
+   auth_responses = {}
+   threads = []
+   nodes.each { |node_name, node_data|
+-    threads << Thread.new {
++    threads << Thread.new(Thread.current[:pcsd_logger_container]) { |logger|
++      Thread.current[:pcsd_logger_container] = logger
+       begin
+         addr = node_data.fetch('dest_list').fetch(0).fetch('addr')
+         port = node_data.fetch('dest_list').fetch(0).fetch('port')
+@@ -1199,7 +1201,8 @@ def cluster_status_from_nodes(auth_user, cluster_nodes, cluster_name)
+ 
+   threads = []
+   cluster_nodes.uniq.each { |node|
+-    threads << Thread.new {
++    threads << Thread.new(Thread.current[:pcsd_logger_container]) { |logger|
++      Thread.current[:pcsd_logger_container] = logger
+       code, response = send_request_with_token(
+         auth_user,
+         node,
+diff --git a/pcsd/pcsd-cli.rb b/pcsd/pcsd-cli.rb
+index 942bae84..4daa93ba 100755
+--- a/pcsd/pcsd-cli.rb
++++ b/pcsd/pcsd-cli.rb
+@@ -29,7 +29,8 @@ end
+ auth_user = {}
+ PCS = get_pcs_path()
+ $logger_device = StringIO.new
+-$logger = configure_logger($logger_device)
++$logger = Logger.new($logger_device)
++early_log($logger)
+ 
+ capabilities, capabilities_pcsd = get_capabilities($logger)
+ CAPABILITIES = capabilities.freeze
+diff --git a/pcsd/pcsd-ruby.service b/pcsd/pcsd-ruby.service
+new file mode 100644
+index 00000000..deefdf4f
+--- /dev/null
++++ b/pcsd/pcsd-ruby.service
+@@ -0,0 +1,20 @@
++[Unit]
++Description=PCS GUI and remote configuration interface (Ruby)
++Documentation=man:pcsd(8)
++Documentation=man:pcs(8)
++Requires=network-online.target
++After=network-online.target
++# Stop the service automatically if nothing that depends on it is running
++StopWhenUnneeded=true
++# When stopping or restarting pcsd, stop or restart pcsd-ruby as well
++PartOf=pcsd.service
++
++[Service]
++EnvironmentFile=/etc/sysconfig/pcsd
++Environment=GEM_HOME=/usr/lib/pcsd/vendor/bundle/ruby
++# This file holds the selinux context
++ExecStart=/usr/lib/pcsd/pcsd
++Type=notify
++
++[Install]
++WantedBy=multi-user.target
+diff --git a/pcsd/pcsd.conf b/pcsd/pcsd.conf
+index 4761c73f..a968f459 100644
+--- a/pcsd/pcsd.conf
++++ b/pcsd/pcsd.conf
+@@ -38,3 +38,7 @@ PCSD_SESSION_LIFETIME=3600
+ #HTTPS_PROXY=
+ # Do not use proxy for specified hostnames
+ #NO_PROXY=
++
++
++# Do not change
++RACK_ENV=production
+diff --git a/pcsd/pcsd.rb b/pcsd/pcsd.rb
+index eff5c9a8..4cb98799 100644
+--- a/pcsd/pcsd.rb
++++ b/pcsd/pcsd.rb
+@@ -22,6 +22,7 @@ require 'permissions.rb'
+ use Rack::CommonLogger
+ 
+ set :app_file, __FILE__
++set :logging, false
+ 
+ def __msg_cluster_name_already_used(cluster_name)
+   return "The cluster name '#{cluster_name}' has already been added. You may not add two clusters with the same name."
+@@ -44,17 +45,17 @@ end
+ 
+ def getAuthUser()
+   return {
+-    :username => $tornado_username,
+-    :usergroups => $tornado_groups,
++    :username => Thread.current[:tornado_username],
++    :usergroups => Thread.current[:tornado_groups],
+   }
+ end
+ 
+ before do
+   # nobody is logged in yet
+   @auth_user = nil
+-  @tornado_session_username = $tornado_username
+-  @tornado_session_groups = $tornado_groups
+-  @tornado_is_authenticated = $tornado_is_authenticated
++  @tornado_session_username = Thread.current[:tornado_username]
++  @tornado_session_groups = Thread.current[:tornado_groups]
++  @tornado_is_authenticated = Thread.current[:tornado_is_authenticated]
+ 
+   if(request.path.start_with?('/remote/') and request.path != "/remote/auth") or request.path == '/run_pcs'
+     # Sets @auth_user to a hash containing info about logged in user or halts
+@@ -71,18 +72,8 @@ end
+ configure do
+   PCS = get_pcs_path()
+   PCS_INTERNAL = get_pcs_internal_path()
+-  $logger = configure_logger(StringIO.new())
+-  $logger.formatter = proc {|severity, datetime, progname, msg|
+-    # rushing a raw logging info into the global
+-    $tornado_logs << {
+-      :level => severity,
+-      :timestamp_usec => (datetime.to_f * 1000000).to_i,
+-      :message => msg,
+-    }
+-    # don't need any log to the stream
+-    ""
+-  }
+-
++  $logger = configure_logger()
++  early_log($logger)
+   capabilities, capabilities_pcsd = get_capabilities($logger)
+   CAPABILITIES = capabilities.freeze
+   CAPABILITIES_PCSD = capabilities_pcsd.freeze
+@@ -599,7 +590,8 @@ get '/manage/get_nodes_sw_versions' do
+     nodes = params[:node_list]
+   end
+   nodes.each {|node|
+-    threads << Thread.new {
++    threads << Thread.new(Thread.current[:pcsd_logger_container]) { |logger|
++      Thread.current[:pcsd_logger_container] = logger
+       code, response = send_request_with_token(
+         auth_user, node, 'get_sw_versions'
+       )
+@@ -625,7 +617,8 @@ post '/manage/auth_gui_against_nodes' do
+ 
+   data = JSON.parse(params.fetch('data_json'))
+   data.fetch('nodes').each { |node_name, node_data|
+-    threads << Thread.new {
++    threads << Thread.new(Thread.current[:pcsd_logger_container]) { |logger|
++      Thread.current[:pcsd_logger_container] = logger
+       dest_list = node_data.fetch('dest_list')
+       addr = dest_list.fetch(0).fetch('addr')
+       port = dest_list.fetch(0).fetch('port')
+diff --git a/pcsd/pcsd.service b/pcsd/pcsd.service
+index 88d237af..0cab20ef 100644
+--- a/pcsd/pcsd.service
++++ b/pcsd/pcsd.service
+@@ -4,6 +4,8 @@ Documentation=man:pcsd(8)
+ Documentation=man:pcs(8)
+ Requires=network-online.target
+ After=network-online.target
++Requires=pcsd-ruby.service
++After=pcsd-ruby.service
+ 
+ [Service]
+ EnvironmentFile=/etc/sysconfig/pcsd
+diff --git a/pcsd/pcsd.service-runner b/pcsd/pcsd.service-runner
+new file mode 100644
+index 00000000..40c401fa
+--- /dev/null
++++ b/pcsd/pcsd.service-runner
+@@ -0,0 +1,24 @@
++#!/usr/bin/ruby
++# This file is a runner for ruby part of pcsd callable from a systemd unit.
++# It also serves as a holder of a selinux context.
++
++begin
++  # add pcsd to the load path (ruby -I)
++  libdir = File.dirname(__FILE__)
++  $LOAD_PATH.unshift(libdir) unless $LOAD_PATH.include?(libdir)
++
++  # change current directory (ruby -C)
++  Dir.chdir('/var/lib/pcsd')
++
++  # import and run ruby daemon
++  require 'rserver.rb'
++rescue SignalException => e
++  if [Signal.list['INT'], Signal.list['TERM']].include?(e.signo)
++    # gracefully exit on SIGINT and SIGTERM
++    # pcsd sets up signal handlers later, this catches exceptions which occur
++    # by recieving signals before the handlers have been set up.
++    exit
++  else
++    raise
++  end
++end
+diff --git a/pcsd/remote.rb b/pcsd/remote.rb
+index 28b91382..760d3374 100644
+--- a/pcsd/remote.rb
++++ b/pcsd/remote.rb
+@@ -938,7 +938,8 @@ def status_all(params, request, auth_user, nodes=[], dont_update_config=false)
+   threads = []
+   forbidden_nodes = {}
+   nodes.each {|node|
+-    threads << Thread.new {
++    threads << Thread.new(Thread.current[:pcsd_logger_container]) { |logger|
++      Thread.current[:pcsd_logger_container] = logger
+       code, response = send_request_with_token(auth_user, node, 'status')
+       if 403 == code
+         forbidden_nodes[node] = true
+@@ -994,7 +995,8 @@ def clusters_overview(params, request, auth_user)
+   threads = []
+   config = PCSConfig.new(Cfgsync::PcsdSettings.from_file().text())
+   config.clusters.each { |cluster|
+-    threads << Thread.new {
++    threads << Thread.new(Thread.current[:pcsd_logger_container]) { |logger|
++      Thread.current[:pcsd_logger_container] = logger
+       cluster_map[cluster.name] = {
+         'cluster_name' => cluster.name,
+         'error_list' => [
+diff --git a/pcsd/rserver.rb b/pcsd/rserver.rb
+new file mode 100644
+index 00000000..6002a73c
+--- /dev/null
++++ b/pcsd/rserver.rb
+@@ -0,0 +1,98 @@
++require "base64"
++require "date"
++require "json"
++require 'rack'
++require 'sinatra'
++require 'thin'
++
++require 'settings.rb'
++
++def pack_response(response)
++  return [200, {}, [response.to_json.to_str]]
++end
++
++def unpack_request(transport_env)
++  return JSON.parse(Base64.strict_decode64(
++    transport_env["rack.request.form_hash"]["TORNADO_REQUEST"]
++  ))
++end
++
++class TornadoCommunicationMiddleware
++  def initialize(app)
++    @app = app
++  end
++
++  def call(transport_env)
++    Thread.current[:pcsd_logger_container] = []
++    begin
++      request = unpack_request(transport_env)
++
++      if ["sinatra_gui", "sinatra_remote"].include?(request["type"])
++        if request["type"] == "sinatra_gui"
++          session = request["session"]
++          Thread.current[:tornado_username] = session["username"]
++          Thread.current[:tornado_groups] = session["groups"]
++          Thread.current[:tornado_is_authenticated] = session["is_authenticated"]
++        end
++
++        # Keys rack.input and rack.errors are required. We make sure they are
++        # there.
++        request_env = request["env"]
++        request_env["rack.input"] = StringIO.new(request_env["rack.input"])
++        request_env["rack.errors"] = StringIO.new()
++
++        status, headers, body = @app.call(request_env)
++
++        rack_errors = request_env['rack.errors'].string()
++        if not rack_errors.empty?()
++          $logger.error(rack_errors)
++        end
++
++        return pack_response({
++          :status => status,
++          :headers => headers,
++          :body => Base64.encode64(body.join("")),
++          :logs => Thread.current[:pcsd_logger_container],
++        })
++      end
++
++      if request["type"] == "sync_configs"
++        return pack_response({
++          :next => Time.now.to_i + run_cfgsync(),
++          :logs => Thread.current[:pcsd_logger_container],
++        })
++      end
++
++      raise "Unexpected value for key 'type': '#{request['type']}'"
++    rescue => e
++      return pack_response({:error => "Processing request error: '#{e}'"})
++    end
++  end
++end
++
++
++use TornadoCommunicationMiddleware
++
++require 'pcsd'
++
++::Rack::Handler.get('thin').run(Sinatra::Application, {
++  :Host => PCSD_RUBY_SOCKET,
++}) do |server|
++  puts server.class
++  server.threaded = true
++  # notify systemd we are running
++  if ISSYSTEMCTL
++    if ENV['NOTIFY_SOCKET']
++      socket_name = ENV['NOTIFY_SOCKET'].dup
++      if socket_name.start_with?('@')
++        # abstract namespace socket
++        socket_name[0] = "\0"
++      end
++      $logger.info("Notifying systemd we are running (socket #{socket_name})")
++      sd_socket = Socket.new(Socket::AF_UNIX, Socket::SOCK_DGRAM)
++      sd_socket.connect(Socket.pack_sockaddr_un(socket_name))
++      sd_socket.send('READY=1', 0)
++      sd_socket.close()
++    end
++  end
++end
+diff --git a/pcsd/settings.rb b/pcsd/settings.rb
+index e8dc0c96..4caa5b4c 100644
+--- a/pcsd/settings.rb
++++ b/pcsd/settings.rb
+@@ -3,6 +3,7 @@ PCS_INTERNAL_EXEC = '/usr/lib/pcs/pcs_internal'
+ PCSD_EXEC_LOCATION = '/usr/lib/pcsd/'
+ PCSD_VAR_LOCATION = '/var/lib/pcsd/'
+ PCSD_DEFAULT_PORT = 2224
++PCSD_RUBY_SOCKET = '/run/pcsd-ruby.socket'
+ 
+ CRT_FILE = PCSD_VAR_LOCATION + 'pcsd.crt'
+ KEY_FILE = PCSD_VAR_LOCATION + 'pcsd.key'
+diff --git a/pcsd/settings.rb.debian b/pcsd/settings.rb.debian
+index daaae37b..c547bc51 100644
+--- a/pcsd/settings.rb.debian
++++ b/pcsd/settings.rb.debian
+@@ -3,6 +3,7 @@ PCS_INTERNAL_EXEC = '/usr/lib/pcs/pcs_internal'
+ PCSD_EXEC_LOCATION = '/usr/share/pcsd/'
+ PCSD_VAR_LOCATION = '/var/lib/pcsd/'
+ PCSD_DEFAULT_PORT = 2224
++PCSD_RUBY_SOCKET = '/run/pcsd-ruby.socket'
+ 
+ CRT_FILE = PCSD_VAR_LOCATION + 'pcsd.crt'
+ KEY_FILE = PCSD_VAR_LOCATION + 'pcsd.key'
+diff --git a/pcsd/sinatra_cmdline_wrapper.rb b/pcsd/sinatra_cmdline_wrapper.rb
+deleted file mode 100644
+index f7b22008..00000000
+--- a/pcsd/sinatra_cmdline_wrapper.rb
++++ /dev/null
+@@ -1,63 +0,0 @@
+-require "base64"
+-require "date"
+-require "json"
+-
+-request_json = ARGF.read()
+-
+-begin
+-  request = JSON.parse(request_json)
+-rescue => e
+-  puts e
+-  exit
+-end
+-
+-if !request.include?("type")
+-  result = {:error => "Type not specified"}
+-  print result.to_json
+-  exit
+-end
+-
+-$tornado_logs = []
+-
+-require 'pcsd'
+-
+-if ["sinatra_gui", "sinatra_remote"].include?(request["type"])
+-  if request["type"] == "sinatra_gui"
+-    $tornado_username = request["session"]["username"]
+-    $tornado_groups = request["session"]["groups"]
+-    $tornado_is_authenticated = request["session"]["is_authenticated"]
+-  end
+-
+-  set :logging, true
+-  set :run, false
+-  # Do not turn exceptions into fancy 100kB HTML pages and print them on stdout.
+-  # Instead, rack.errors is logged and therefore returned in result[:log].
+-  set :show_exceptions, false
+-  app = [Sinatra::Application][0]
+-
+-  env = request["env"]
+-  env["rack.input"] = StringIO.new(env["rack.input"])
+-  env["rack.errors"] = StringIO.new()
+-
+-  status, headers, body = app.call(env)
+-  rack_errors = env['rack.errors'].string()
+-  if not rack_errors.empty?()
+-    $logger.error(rack_errors)
+-  end
+-
+-  result = {
+-    :status => status,
+-    :headers => headers,
+-    :body => Base64.encode64(body.join("")),
+-  }
+-
+-elsif request["type"] == "sync_configs"
+-  result = {
+-    :next => Time.now.to_i + run_cfgsync()
+-  }
+-else
+-  result = {:error => "Unknown type: '#{request["type"]}'"}
+-end
+-
+-result[:logs] = $tornado_logs
+-print result.to_json
+-- 
+2.21.1
+
diff --git a/SOURCES/bz1783106-02-send-request-from-python-to-ruby-more-directly.patch b/SOURCES/bz1783106-02-send-request-from-python-to-ruby-more-directly.patch
new file mode 100644
index 0000000..142cd99
--- /dev/null
+++ b/SOURCES/bz1783106-02-send-request-from-python-to-ruby-more-directly.patch
@@ -0,0 +1,533 @@
+From 770252b476bc342ea08da2bc5b83de713463d14a Mon Sep 17 00:00:00 2001
+From: Ivan Devat <idevat@redhat.com>
+Date: Thu, 12 Mar 2020 15:32:31 +0100
+Subject: [PATCH 1/2] send request from python to ruby more directly
+
+Rack protection middleware is launched before
+TornadoCommunicationMiddleware. When request parts are unpacked in
+TornadoCommunicationMiddleware they are not checked by rack protection.
+
+This commit changes communication between python and ruby - request is
+sent to ruby more directly (without need to unpack request in sinatra
+middleware).
+---
+ pcs/daemon/ruby_pcsd.py                   | 217 ++++++++++++++--------
+ pcs_test/tier0/daemon/app/fixtures_app.py |   7 +-
+ pcs_test/tier0/daemon/test_ruby_pcsd.py   |  61 ++----
+ pcsd/rserver.rb                           |  39 ++--
+ 4 files changed, 175 insertions(+), 149 deletions(-)
+
+diff --git a/pcs/daemon/ruby_pcsd.py b/pcs/daemon/ruby_pcsd.py
+index e612f8da..53c53eaf 100644
+--- a/pcs/daemon/ruby_pcsd.py
++++ b/pcs/daemon/ruby_pcsd.py
+@@ -7,8 +7,8 @@ from time import time as now
+ import pycurl
+ from tornado.gen import convert_yielded
+ from tornado.web import HTTPError
+-from tornado.httputil import split_host_and_port, HTTPServerRequest
+-from tornado.httpclient import AsyncHTTPClient
++from tornado.httputil import HTTPServerRequest, HTTPHeaders
++from tornado.httpclient import AsyncHTTPClient, HTTPClientError
+ from tornado.curl_httpclient import CurlError
+ 
+ 
+@@ -29,6 +29,11 @@ RUBY_LOG_LEVEL_MAP = {
+     "DEBUG": logging.DEBUG,
+ }
+ 
++__id_dict = {"id": 0}
++def get_request_id():
++    __id_dict["id"] += 1
++    return __id_dict["id"]
++
+ class SinatraResult(namedtuple("SinatraResult", "headers, status, body")):
+     @classmethod
+     def from_response(cls, response):
+@@ -60,6 +65,59 @@ def process_response_logs(rb_log_list):
+             group_id=group_id
+         )
+ 
++class RubyDaemonRequest(namedtuple(
++    "RubyDaemonRequest",
++    "request_type, path, query, headers, method, body"
++)):
++    def __new__(
++        cls,
++        request_type,
++        http_request: HTTPServerRequest = None,
++        payload=None,
++    ):
++        headers = http_request.headers if http_request else HTTPHeaders()
++        headers.add("X-Pcsd-Type", request_type)
++        if payload:
++            headers.add(
++                "X-Pcsd-Payload",
++                b64encode(json.dumps(payload).encode()).decode()
++            )
++        return super(RubyDaemonRequest, cls).__new__(
++            cls,
++            request_type,
++            http_request.path if http_request else "",
++            http_request.query if http_request else "",
++            headers,
++            http_request.method if http_request else "GET",
++            http_request.body if http_request else None,
++        )
++
++    @property
++    def url(self):
++        # We do not need location for communication with ruby itself since we
++        # communicate via unix socket. But it is required by AsyncHTTPClient so
++        # "localhost" is used.
++        query = f"?{self.query}" if self.query else ""
++        return f"localhost/{self.path}{query}"
++
++    @property
++    def is_get(self):
++        return self.method.upper() == "GET"
++
++    @property
++    def has_http_request_detail(self):
++        return self.path or self.query or self.method != "GET" or self.body
++
++def log_ruby_daemon_request(label, request: RubyDaemonRequest):
++    log.pcsd.debug("%s type: '%s'", label, request.request_type)
++    if request.has_http_request_detail:
++        log.pcsd.debug("%s path: '%s'", label, request.path)
++        if request.query:
++            log.pcsd.debug("%s query: '%s'", label, request.query)
++        log.pcsd.debug("%s method: '%s'", label, request.method)
++        if request.body:
++            log.pcsd.debug("%s body: '%s'", label, request.body)
++
+ class Wrapper:
+     def __init__(self, pcsd_ruby_socket, debug=False):
+         self.__debug = debug
+@@ -67,74 +125,87 @@ class Wrapper:
+         self.__client = AsyncHTTPClient()
+         self.__pcsd_ruby_socket = pcsd_ruby_socket
+ 
+-    @staticmethod
+-    def get_sinatra_request(request: HTTPServerRequest):
+-        host, port = split_host_and_port(request.host)
+-        return {"env": {
+-            "PATH_INFO": request.path,
+-            "QUERY_STRING": request.query,
+-            "REMOTE_ADDR": request.remote_ip,
+-            "REMOTE_HOST": request.host,
+-            "REQUEST_METHOD": request.method,
+-            "REQUEST_URI": f"{request.protocol}://{request.host}{request.uri}",
+-            "SCRIPT_NAME": "",
+-            "SERVER_NAME": host,
+-            "SERVER_PORT": port,
+-            "SERVER_PROTOCOL": request.version,
+-            "HTTP_HOST": request.host,
+-            "HTTP_ACCEPT": "*/*",
+-            "HTTP_COOKIE": ";".join([
+-                v.OutputString() for v in request.cookies.values()
+-            ]),
+-            "HTTPS": "on" if request.protocol == "https" else "off",
+-            "HTTP_VERSION": request.version,
+-            "REQUEST_PATH": request.path,
+-            "rack.input": request.body.decode("utf8"),
+-        }}
+-
+     def prepare_curl_callback(self, curl):
+         curl.setopt(pycurl.UNIX_SOCKET_PATH, self.__pcsd_ruby_socket)
+         curl.setopt(pycurl.TIMEOUT, 70)
+ 
+-    async def send_to_ruby(self, request_json):
+-        # We do not need location for communication with ruby itself since we
+-        # communicate via unix socket. But it is required by AsyncHTTPClient so
+-        # "localhost" is used.
+-        tornado_request = b64encode(request_json.encode()).decode()
+-        return (await self.__client.fetch(
+-            "localhost",
+-            method="POST",
+-            body=f"TORNADO_REQUEST={tornado_request}",
+-            prepare_curl_callback=self.prepare_curl_callback,
+-        )).body
+-
+-    async def run_ruby(self, request_type, request=None):
+-        """
+-        request_type: SINATRA_GUI|SINATRA_REMOTE|SYNC_CONFIGS
+-        request: result of get_sinatra_request|None
+-            i.e. it has structure returned by get_sinatra_request if the request
+-            is not None - so we can get SERVER_NAME and  SERVER_PORT
+-        """
+-        request = request or {}
+-        request.update({"type": request_type})
+-        request_json = json.dumps(request)
+-
+-        if self.__debug:
+-            log.pcsd.debug("Ruby daemon request: '%s'", request_json)
++    async def send_to_ruby(self, request: RubyDaemonRequest):
+         try:
+-            ruby_response = await self.send_to_ruby(request_json)
++            return (await self.__client.fetch(
++                request.url,
++                headers=request.headers,
++                method=request.method,
++                # Tornado enforces body=None for GET method:
++                # Even with `allow_nonstandard_methods` we disallow GET with a
++                # body (because libcurl doesn't allow it unless we use
++                # CUSTOMREQUEST).  While the spec doesn't forbid clients from
++                # sending a body, it arguably disallows the server from doing
++                # anything with them.
++                body=(request.body if not request.is_get else None),
++                prepare_curl_callback=self.prepare_curl_callback,
++            )).body
+         except CurlError as e:
++            # This error we can get e.g. when ruby daemon is down.
+             log.pcsd.error(
+                 "Cannot connect to ruby daemon (message: '%s'). Is it running?",
+                 e
+             )
+             raise HTTPError(500)
++        except HTTPClientError as e:
++            # This error we can get e.g. when rack protection raises exception.
++            log.pcsd.error(
++                (
++                    "Got error from ruby daemon (message: '%s')."
++                    " Try checking system logs (e.g. journal, systemctl status"
++                    " pcsd.service) for more information.."
++                ),
++                e
++            )
++            raise HTTPError(500)
++
++    async def run_ruby(
++        self,
++        request_type,
++        http_request: HTTPServerRequest = None,
++        payload=None,
++    ):
++        request = RubyDaemonRequest(request_type, http_request, payload)
++        request_id = get_request_id()
++
++        def log_request():
++            log_ruby_daemon_request(
++                f"Ruby daemon request (id: {request_id})",
++                request,
++            )
++
++        if self.__debug:
++            log_request()
++
++        return self.process_ruby_response(
++            f"Ruby daemon response (id: {request_id})",
++            log_request,
++            await self.send_to_ruby(request),
++        )
++
++    def process_ruby_response(self, label, log_request, ruby_response):
++        """
++        Return relevant part of unpacked ruby response. As a side effect
++        relevant logs are writen.
+ 
++        string label -- is used as a log prefix
++        callable log_request -- is used to log request when some errors happen;
++            we want to log request before error even if there is not debug mode
++        string ruby_response -- body of response from ruby; it should contain
++            json with dictionary with response specific keys
++        """
+         try:
+             response = json.loads(ruby_response)
+             if "error" in response:
++                if not self.__debug:
++                    log_request()
+                 log.pcsd.error(
+-                    "Ruby daemon response contains an error: '%s'",
++                    "%s contains an error: '%s'",
++                    label,
+                     json.dumps(response)
+                 )
+                 raise HTTPError(500)
+@@ -144,56 +215,52 @@ class Wrapper:
+                 body = b64decode(response.pop("body"))
+                 if self.__debug:
+                     log.pcsd.debug(
+-                        "Ruby daemon response (without logs and body): '%s'",
++                        "%s (without logs and body): '%s'",
++                        label,
+                         json.dumps(response)
+                     )
+-                    log.pcsd.debug("Ruby daemon response body: '%s'", body)
++                    log.pcsd.debug("%s body: '%s'", label, body)
+                 response["body"] = body
+ 
+             elif self.__debug:
+                 log.pcsd.debug(
+-                    "Ruby daemon response (without logs): '%s'",
++                    "%s (without logs): '%s'",
++                    label,
+                     json.dumps(response)
+                 )
+             process_response_logs(logs)
+             return response
+         except (json.JSONDecodeError, binascii.Error) as e:
+             if self.__debug:
+-                log.pcsd.debug("Ruby daemon response: '%s'", ruby_response)
++                log.pcsd.debug("%s: '%s'", label, ruby_response)
++            else:
++                log_request()
++
+             log.pcsd.error("Cannot decode json from ruby pcsd wrapper: '%s'", e)
+             raise HTTPError(500)
+ 
+     async def request_gui(
+         self, request: HTTPServerRequest, user, groups, is_authenticated
+     ) -> SinatraResult:
+-        sinatra_request = self.get_sinatra_request(request)
+         # Sessions handling was removed from ruby. However, some session
+         # information is needed for ruby code (e.g. rendering some parts of
+         # templates). So this information must be sent to ruby by another way.
+-        sinatra_request.update({
+-            "session": {
++        return SinatraResult.from_response(
++            await convert_yielded(self.run_ruby(SINATRA_GUI, request, {
+                 "username": user,
+                 "groups": groups,
+                 "is_authenticated": is_authenticated,
+-            }
+-        })
+-        response = await convert_yielded(self.run_ruby(
+-            SINATRA_GUI,
+-            sinatra_request
+-        ))
+-        return SinatraResult.from_response(response)
++            }))
++        )
+ 
+     async def request_remote(self, request: HTTPServerRequest) -> SinatraResult:
+-        response = await convert_yielded(self.run_ruby(
+-            SINATRA_REMOTE,
+-            self.get_sinatra_request(request)
+-        ))
+-        return SinatraResult.from_response(response)
++        return SinatraResult.from_response(
++            await convert_yielded(self.run_ruby(SINATRA_REMOTE, request))
++        )
+ 
+     async def sync_configs(self):
+         try:
+-            response = await convert_yielded(self.run_ruby(SYNC_CONFIGS))
+-            return response["next"]
++            return (await convert_yielded(self.run_ruby(SYNC_CONFIGS)))["next"]
+         except HTTPError:
+             log.pcsd.error("Config synchronization failed")
+             return int(now()) + DEFAULT_SYNC_CONFIG_DELAY
+diff --git a/pcs_test/tier0/daemon/app/fixtures_app.py b/pcs_test/tier0/daemon/app/fixtures_app.py
+index 8d5b8f4c..590203b4 100644
+--- a/pcs_test/tier0/daemon/app/fixtures_app.py
++++ b/pcs_test/tier0/daemon/app/fixtures_app.py
+@@ -20,7 +20,12 @@ class RubyPcsdWrapper(ruby_pcsd.Wrapper):
+         self.headers = {"Some": "value"}
+         self.body = b"Success action"
+ 
+-    async def run_ruby(self, request_type, request=None):
++    async def run_ruby(
++        self,
++        request_type,
++        http_request=None,
++        payload=None,
++    ):
+         if request_type != self.request_type:
+             raise AssertionError(
+                 f"Wrong request type: expected '{self.request_type}'"
+diff --git a/pcs_test/tier0/daemon/test_ruby_pcsd.py b/pcs_test/tier0/daemon/test_ruby_pcsd.py
+index 28f14c87..32eb74cc 100644
+--- a/pcs_test/tier0/daemon/test_ruby_pcsd.py
++++ b/pcs_test/tier0/daemon/test_ruby_pcsd.py
+@@ -4,7 +4,7 @@ from base64 import b64encode
+ from unittest import TestCase, mock
+ from urllib.parse import urlencode
+ 
+-from tornado.httputil import HTTPServerRequest
++from tornado.httputil import HTTPServerRequest, HTTPHeaders
+ from tornado.testing import AsyncTestCase, gen_test
+ from tornado.web import HTTPError
+ 
+@@ -22,46 +22,17 @@ def create_http_request():
+     return HTTPServerRequest(
+         method="POST",
+         uri="/pcsd/uri",
+-        headers={"Cookie": "cookie1=first;cookie2=second"},
++        headers=HTTPHeaders({"Cookie": "cookie1=first;cookie2=second"}),
+         body=str.encode(urlencode({"post-key": "post-value"})),
+         host="pcsd-host:2224"
+     )
+ 
+-class GetSinatraRequest(TestCase):
+-    def test_translate_request(self):
+-        # pylint: disable=invalid-name
+-        self.maxDiff = None
+-        self.assertEqual(
+-            create_wrapper().get_sinatra_request(create_http_request()),
+-            {
+-                'env': {
+-                    'HTTPS': 'off',
+-                    'HTTP_ACCEPT': '*/*',
+-                    'HTTP_COOKIE': 'cookie1=first;cookie2=second',
+-                    'HTTP_HOST': 'pcsd-host:2224',
+-                    'HTTP_VERSION': 'HTTP/1.0',
+-                    'PATH_INFO': '/pcsd/uri',
+-                    'QUERY_STRING': '',
+-                    'REMOTE_ADDR': None, # It requires complicated request args
+-                    'REMOTE_HOST': 'pcsd-host:2224',
+-                    'REQUEST_METHOD': 'POST',
+-                    'REQUEST_PATH': '/pcsd/uri',
+-                    'REQUEST_URI': 'http://pcsd-host:2224/pcsd/uri',
+-                    'SCRIPT_NAME': '',
+-                    'SERVER_NAME': 'pcsd-host',
+-                    'SERVER_PORT': 2224,
+-                    'SERVER_PROTOCOL': 'HTTP/1.0',
+-                    'rack.input': 'post-key=post-value'
+-                }
+-            }
+-        )
+-
+ patch_ruby_pcsd = create_patcher(ruby_pcsd)
+ 
+ class RunRuby(AsyncTestCase):
+     def setUp(self):
+         self.ruby_response = ""
+-        self.request = self.create_request()
++        self.request = ruby_pcsd.RubyDaemonRequest(ruby_pcsd.SYNC_CONFIGS)
+         self.wrapper = create_wrapper()
+         patcher = mock.patch.object(
+             self.wrapper,
+@@ -72,14 +43,10 @@ class RunRuby(AsyncTestCase):
+         patcher.start()
+         super().setUp()
+ 
+-    async def send_to_ruby(self, request_json):
+-        self.assertEqual(json.loads(request_json), self.request)
++    async def send_to_ruby(self, ruby_request):
++        self.assertEqual(ruby_request, self.request)
+         return self.ruby_response
+ 
+-    @staticmethod
+-    def create_request(_type=ruby_pcsd.SYNC_CONFIGS):
+-        return {"type": _type}
+-
+     def set_run_result(self, run_result):
+         self.ruby_response = json.dumps({**run_result, "logs": []})
+ 
+@@ -125,10 +92,10 @@ class RunRuby(AsyncTestCase):
+             "body": b64encode(str.encode(body)).decode(),
+         })
+         http_request = create_http_request()
+-        self.request = {
+-            **self.create_request(ruby_pcsd.SINATRA_REMOTE),
+-            **self.wrapper.get_sinatra_request(http_request),
+-        }
++        self.request = ruby_pcsd.RubyDaemonRequest(
++            ruby_pcsd.SINATRA_REMOTE,
++            http_request,
++        )
+         result = yield self.wrapper.request_remote(http_request)
+         self.assert_sinatra_result(result, headers, status, body)
+ 
+@@ -148,15 +115,15 @@ class RunRuby(AsyncTestCase):
+             "body": b64encode(str.encode(body)).decode(),
+         })
+         http_request = create_http_request()
+-        self.request = {
+-            **self.create_request(ruby_pcsd.SINATRA_GUI),
+-            **self.wrapper.get_sinatra_request(http_request),
+-            "session": {
++        self.request = ruby_pcsd.RubyDaemonRequest(
++            ruby_pcsd.SINATRA_GUI,
++            http_request,
++            {
+                 "username": user,
+                 "groups": groups,
+                 "is_authenticated": is_authenticated,
+             }
+-        }
++        )
+         result = yield self.wrapper.request_gui(
+             http_request,
+             user=user,
+diff --git a/pcsd/rserver.rb b/pcsd/rserver.rb
+index 6002a73c..4b58f252 100644
+--- a/pcsd/rserver.rb
++++ b/pcsd/rserver.rb
+@@ -11,42 +11,25 @@ def pack_response(response)
+   return [200, {}, [response.to_json.to_str]]
+ end
+ 
+-def unpack_request(transport_env)
+-  return JSON.parse(Base64.strict_decode64(
+-    transport_env["rack.request.form_hash"]["TORNADO_REQUEST"]
+-  ))
+-end
+-
+ class TornadoCommunicationMiddleware
+   def initialize(app)
+     @app = app
+   end
+ 
+-  def call(transport_env)
++  def call(env)
+     Thread.current[:pcsd_logger_container] = []
+     begin
+-      request = unpack_request(transport_env)
++      type = env["HTTP_X_PCSD_TYPE"]
+ 
+-      if ["sinatra_gui", "sinatra_remote"].include?(request["type"])
+-        if request["type"] == "sinatra_gui"
+-          session = request["session"]
++      if ["sinatra_gui", "sinatra_remote"].include?(type)
++        if type == "sinatra_gui"
++          session = JSON.parse(Base64.strict_decode64(env["HTTP_X_PCSD_PAYLOAD"]))
+           Thread.current[:tornado_username] = session["username"]
+           Thread.current[:tornado_groups] = session["groups"]
+           Thread.current[:tornado_is_authenticated] = session["is_authenticated"]
+         end
+ 
+-        # Keys rack.input and rack.errors are required. We make sure they are
+-        # there.
+-        request_env = request["env"]
+-        request_env["rack.input"] = StringIO.new(request_env["rack.input"])
+-        request_env["rack.errors"] = StringIO.new()
+-
+-        status, headers, body = @app.call(request_env)
+-
+-        rack_errors = request_env['rack.errors'].string()
+-        if not rack_errors.empty?()
+-          $logger.error(rack_errors)
+-        end
++        status, headers, body = @app.call(env)
+ 
+         return pack_response({
+           :status => status,
+@@ -56,16 +39,20 @@ class TornadoCommunicationMiddleware
+         })
+       end
+ 
+-      if request["type"] == "sync_configs"
++      if type == "sync_configs"
+         return pack_response({
+           :next => Time.now.to_i + run_cfgsync(),
+           :logs => Thread.current[:pcsd_logger_container],
+         })
+       end
+ 
+-      raise "Unexpected value for key 'type': '#{request['type']}'"
++      return pack_response({
++        :error => "Unexpected value for key 'type': '#{type}'"
++      })
+     rescue => e
+-      return pack_response({:error => "Processing request error: '#{e}'"})
++      return pack_response({
++        :error => "Processing request error: '#{e}' '#{e.backtrace}'"
++      })
+     end
+   end
+ end
+-- 
+2.21.1
+
diff --git a/SOURCES/bz1792946-01-tests-update-for-pacemaker-2.0.3-4.patch b/SOURCES/bz1792946-01-tests-update-for-pacemaker-2.0.3-4.patch
new file mode 100644
index 0000000..0168f2c
--- /dev/null
+++ b/SOURCES/bz1792946-01-tests-update-for-pacemaker-2.0.3-4.patch
@@ -0,0 +1,367 @@
+From 9fbeeed4e43dc37800de3c3f0cf6f7520dc31ccf Mon Sep 17 00:00:00 2001
+From: Tomas Jelinek <tojeline@redhat.com>
+Date: Mon, 20 Jan 2020 12:34:55 +0100
+Subject: [PATCH] tests: update for pacemaker-2.0.3-4
+
+---
+ pcs_test/tier0/test_resource.py | 59 +++++++++++++-------------
+ pcs_test/tier0/test_stonith.py  | 75 +++++++++++++++++----------------
+ pcs_test/tools/assertions.py    | 24 +++++++++--
+ 3 files changed, 88 insertions(+), 70 deletions(-)
+
+diff --git a/pcs_test/tier0/test_resource.py b/pcs_test/tier0/test_resource.py
+index b8b85dd2..45d98dff 100644
+--- a/pcs_test/tier0/test_resource.py
++++ b/pcs_test/tier0/test_resource.py
+@@ -10,6 +10,7 @@ from pcs_test.tier0.cib_resource.common import ResourceTest
+ from pcs_test.tools.assertions import (
+     ac,
+     AssertPcsMixin,
++    assert_pcs_status,
+ )
+ from pcs_test.tools.bin_mock import get_mock_settings
+ from pcs_test.tools.cib import get_assert_pcs_effect_mixin
+@@ -953,11 +954,11 @@ monitor interval=20 (A-monitor-interval-20)
+         o,r = pcs(temp_cib, "resource status")
+         assert r == 0
+         if PCMK_2_0_3_PLUS:
+-            ac(o,"""\
++            assert_pcs_status(o,"""\
+   * Resource Group: AGroup:
+-    * A1\t(ocf::heartbeat:Dummy):\t Stopped
+-    * A2\t(ocf::heartbeat:Dummy):\t Stopped
+-    * A3\t(ocf::heartbeat:Dummy):\t Stopped
++    * A1\t(ocf::heartbeat:Dummy):\tStopped
++    * A2\t(ocf::heartbeat:Dummy):\tStopped
++    * A3\t(ocf::heartbeat:Dummy):\tStopped
+ """)
+         else:
+             ac(o,"""\
+@@ -1208,19 +1209,19 @@ monitor interval=20 (A-monitor-interval-20)
+         output, returnVal = pcs(temp_cib, "resource")
+         assert returnVal == 0
+         if PCMK_2_0_3_PLUS:
+-            ac(output, """\
+-  * F\t(ocf::heartbeat:Dummy):\t Stopped
+-  * G\t(ocf::heartbeat:Dummy):\t Stopped
+-  * H\t(ocf::heartbeat:Dummy):\t Stopped
++            assert_pcs_status(output, """\
++  * F\t(ocf::heartbeat:Dummy):\tStopped
++  * G\t(ocf::heartbeat:Dummy):\tStopped
++  * H\t(ocf::heartbeat:Dummy):\tStopped
+   * Resource Group: RGA:
+-    * A\t(ocf::heartbeat:Dummy):\t Stopped
+-    * B\t(ocf::heartbeat:Dummy):\t Stopped
+-    * C\t(ocf::heartbeat:Dummy):\t Stopped
+-    * E\t(ocf::heartbeat:Dummy):\t Stopped
+-    * D\t(ocf::heartbeat:Dummy):\t Stopped
+-    * K\t(ocf::heartbeat:Dummy):\t Stopped
+-    * J\t(ocf::heartbeat:Dummy):\t Stopped
+-    * I\t(ocf::heartbeat:Dummy):\t Stopped
++    * A\t(ocf::heartbeat:Dummy):\tStopped
++    * B\t(ocf::heartbeat:Dummy):\tStopped
++    * C\t(ocf::heartbeat:Dummy):\tStopped
++    * E\t(ocf::heartbeat:Dummy):\tStopped
++    * D\t(ocf::heartbeat:Dummy):\tStopped
++    * K\t(ocf::heartbeat:Dummy):\tStopped
++    * J\t(ocf::heartbeat:Dummy):\tStopped
++    * I\t(ocf::heartbeat:Dummy):\tStopped
+ """)
+         else:
+             ac(output, """\
+@@ -2004,9 +2005,9 @@ monitor interval=20 (A-monitor-interval-20)
+ 
+         o,r = pcs(temp_cib, "resource")
+         if PCMK_2_0_3_PLUS:
+-            ac(o,"""\
++            assert_pcs_status(o,"""\
+   * Resource Group: AG:
+-    * D1\t(ocf::heartbeat:Dummy):\t Stopped
++    * D1\t(ocf::heartbeat:Dummy):\tStopped
+   * Clone Set: D0-clone [D0]:
+ """)
+         else:
+@@ -2348,10 +2349,10 @@ monitor interval=20 (A-monitor-interval-20)
+         o,r = pcs(temp_cib, "resource status")
+         assert r == 0
+         if PCMK_2_0_3_PLUS:
+-            ac(o,"""\
++            assert_pcs_status(o,"""\
+   * Resource Group: DGroup:
+-    * D1\t(ocf::heartbeat:Dummy):\t Stopped
+-    * D2\t(ocf::heartbeat:Dummy):\t Stopped
++    * D1\t(ocf::heartbeat:Dummy):\tStopped
++    * D2\t(ocf::heartbeat:Dummy):\tStopped
+ """)
+         else:
+             ac(o,"""\
+@@ -3560,12 +3561,12 @@ Error: role must be: Stopped, Started, Slave or Master (use --force to override)
+         assert retVal == 0
+         output, retVal = pcs(temp_cib, "resource status")
+         if PCMK_2_0_3_PLUS:
+-            ac(output, outdent(
++            assert_pcs_status(output, outdent(
+                 """\
+                   * Resource Group: dummies:
+-                    * dummy1\t(ocf::heartbeat:Dummy):\t Stopped
+-                    * dummy2\t(ocf::heartbeat:Dummy):\t Stopped
+-                    * dummy3\t(ocf::heartbeat:Dummy):\t Stopped
++                    * dummy1\t(ocf::heartbeat:Dummy):\tStopped
++                    * dummy2\t(ocf::heartbeat:Dummy):\tStopped
++                    * dummy3\t(ocf::heartbeat:Dummy):\tStopped
+                 """
+             ))
+         else:
+@@ -3652,12 +3653,12 @@ Error: role must be: Stopped, Started, Slave or Master (use --force to override)
+         assert retVal == 0
+         output, retVal = pcs(temp_cib, "resource status")
+         if PCMK_2_0_3_PLUS:
+-            ac(output, outdent(
++            assert_pcs_status(output, outdent(
+                 """\
+                   * Resource Group: dummies:
+-                    * dummy1\t(ocf::heartbeat:Dummy):\t Stopped
+-                    * dummy2\t(ocf::heartbeat:Dummy):\t Stopped
+-                    * dummy3\t(ocf::heartbeat:Dummy):\t Stopped
++                    * dummy1\t(ocf::heartbeat:Dummy):\tStopped
++                    * dummy2\t(ocf::heartbeat:Dummy):\tStopped
++                    * dummy3\t(ocf::heartbeat:Dummy):\tStopped
+                 """
+             ))
+         else:
+diff --git a/pcs_test/tier0/test_stonith.py b/pcs_test/tier0/test_stonith.py
+index 46938e75..097a79b9 100644
+--- a/pcs_test/tier0/test_stonith.py
++++ b/pcs_test/tier0/test_stonith.py
+@@ -517,13 +517,13 @@ class StonithTest(TestCase, AssertPcsMixin):
+         if PCMK_2_0_3_PLUS:
+             self.assert_pcs_success("stonith", outdent(
+                 """\
+-                  * n1-ipmi\t(stonith:fence_apc):\t Stopped
+-                  * n2-ipmi\t(stonith:fence_apc):\t Stopped
+-                  * n1-apc1\t(stonith:fence_apc):\t Stopped
+-                  * n1-apc2\t(stonith:fence_apc):\t Stopped
+-                  * n2-apc1\t(stonith:fence_apc):\t Stopped
+-                  * n2-apc2\t(stonith:fence_apc):\t Stopped
+-                  * n2-apc3\t(stonith:fence_apc):\t Stopped
++                  * n1-ipmi\t(stonith:fence_apc):\tStopped
++                  * n2-ipmi\t(stonith:fence_apc):\tStopped
++                  * n1-apc1\t(stonith:fence_apc):\tStopped
++                  * n1-apc2\t(stonith:fence_apc):\tStopped
++                  * n2-apc1\t(stonith:fence_apc):\tStopped
++                  * n2-apc2\t(stonith:fence_apc):\tStopped
++                  * n2-apc3\t(stonith:fence_apc):\tStopped
+                  Target: rh7-1
+                    Level 1 - n1-ipmi
+                    Level 2 - n1-apc1,n1-apc2,n2-apc2
+@@ -531,7 +531,7 @@ class StonithTest(TestCase, AssertPcsMixin):
+                    Level 1 - n2-ipmi
+                    Level 2 - n2-apc1,n2-apc2,n2-apc3
+                 """
+-            ))
++            ), despace=True)
+         else:
+             self.assert_pcs_success("stonith", outdent(
+                 """\
+@@ -559,12 +559,12 @@ class StonithTest(TestCase, AssertPcsMixin):
+         if PCMK_2_0_3_PLUS:
+             self.assert_pcs_success("stonith", outdent(
+                 """\
+-                  * n1-ipmi\t(stonith:fence_apc):\t Stopped
+-                  * n2-ipmi\t(stonith:fence_apc):\t Stopped
+-                  * n1-apc1\t(stonith:fence_apc):\t Stopped
+-                  * n1-apc2\t(stonith:fence_apc):\t Stopped
+-                  * n2-apc1\t(stonith:fence_apc):\t Stopped
+-                  * n2-apc3\t(stonith:fence_apc):\t Stopped
++                  * n1-ipmi\t(stonith:fence_apc):\tStopped
++                  * n2-ipmi\t(stonith:fence_apc):\tStopped
++                  * n1-apc1\t(stonith:fence_apc):\tStopped
++                  * n1-apc2\t(stonith:fence_apc):\tStopped
++                  * n2-apc1\t(stonith:fence_apc):\tStopped
++                  * n2-apc3\t(stonith:fence_apc):\tStopped
+                  Target: rh7-1
+                    Level 1 - n1-ipmi
+                    Level 2 - n1-apc1,n1-apc2
+@@ -572,7 +572,7 @@ class StonithTest(TestCase, AssertPcsMixin):
+                    Level 1 - n2-ipmi
+                    Level 2 - n2-apc1,n2-apc3
+                 """
+-            ))
++            ), despace=True)
+         else:
+             self.assert_pcs_success("stonith", outdent(
+                 """\
+@@ -599,11 +599,11 @@ class StonithTest(TestCase, AssertPcsMixin):
+         if PCMK_2_0_3_PLUS:
+             self.assert_pcs_success("stonith", outdent(
+                 """\
+-                  * n1-ipmi\t(stonith:fence_apc):\t Stopped
+-                  * n2-ipmi\t(stonith:fence_apc):\t Stopped
+-                  * n1-apc1\t(stonith:fence_apc):\t Stopped
+-                  * n1-apc2\t(stonith:fence_apc):\t Stopped
+-                  * n2-apc3\t(stonith:fence_apc):\t Stopped
++                  * n1-ipmi\t(stonith:fence_apc):\tStopped
++                  * n2-ipmi\t(stonith:fence_apc):\tStopped
++                  * n1-apc1\t(stonith:fence_apc):\tStopped
++                  * n1-apc2\t(stonith:fence_apc):\tStopped
++                  * n2-apc3\t(stonith:fence_apc):\tStopped
+                  Target: rh7-1
+                    Level 1 - n1-ipmi
+                    Level 2 - n1-apc1,n1-apc2
+@@ -611,7 +611,7 @@ class StonithTest(TestCase, AssertPcsMixin):
+                    Level 1 - n2-ipmi
+                    Level 2 - n2-apc3
+                 """
+-            ))
++            ), despace=True)
+         else:
+             self.assert_pcs_success("stonith", outdent(
+                 """\
+@@ -637,17 +637,17 @@ class StonithTest(TestCase, AssertPcsMixin):
+         if PCMK_2_0_3_PLUS:
+             self.assert_pcs_success("stonith", outdent(
+                 """\
+-                  * n1-ipmi\t(stonith:fence_apc):\t Stopped
+-                  * n2-ipmi\t(stonith:fence_apc):\t Stopped
+-                  * n1-apc1\t(stonith:fence_apc):\t Stopped
+-                  * n1-apc2\t(stonith:fence_apc):\t Stopped
++                  * n1-ipmi\t(stonith:fence_apc):\tStopped
++                  * n2-ipmi\t(stonith:fence_apc):\tStopped
++                  * n1-apc1\t(stonith:fence_apc):\tStopped
++                  * n1-apc2\t(stonith:fence_apc):\tStopped
+                  Target: rh7-1
+                    Level 1 - n1-ipmi
+                    Level 2 - n1-apc1,n1-apc2
+                  Target: rh7-2
+                    Level 1 - n2-ipmi
+                 """
+-            ))
++            ), despace=True)
+         else:
+             self.assert_pcs_success("stonith", outdent(
+                 """\
+@@ -671,16 +671,16 @@ class StonithTest(TestCase, AssertPcsMixin):
+         if PCMK_2_0_3_PLUS:
+             self.assert_pcs_success("stonith", outdent(
+                 """\
+-                  * n1-ipmi\t(stonith:fence_apc):\t Stopped
+-                  * n2-ipmi\t(stonith:fence_apc):\t Stopped
+-                  * n1-apc2\t(stonith:fence_apc):\t Stopped
++                  * n1-ipmi\t(stonith:fence_apc):\tStopped
++                  * n2-ipmi\t(stonith:fence_apc):\tStopped
++                  * n1-apc2\t(stonith:fence_apc):\tStopped
+                  Target: rh7-1
+                    Level 1 - n1-ipmi
+                    Level 2 - n1-apc2
+                  Target: rh7-2
+                    Level 1 - n2-ipmi
+                 """
+-            ))
++            ), despace=True)
+         else:
+             self.assert_pcs_success("stonith", outdent(
+                 """\
+@@ -704,14 +704,14 @@ class StonithTest(TestCase, AssertPcsMixin):
+         if PCMK_2_0_3_PLUS:
+             self.assert_pcs_success("stonith", outdent(
+                 """\
+-                  * n1-ipmi\t(stonith:fence_apc):\t Stopped
+-                  * n2-ipmi\t(stonith:fence_apc):\t Stopped
++                  * n1-ipmi\t(stonith:fence_apc):\tStopped
++                  * n2-ipmi\t(stonith:fence_apc):\tStopped
+                  Target: rh7-1
+                    Level 1 - n1-ipmi
+                  Target: rh7-2
+                    Level 1 - n2-ipmi
+                 """
+-            ))
++            ), despace=True)
+         else:
+             self.assert_pcs_success("stonith", outdent(
+                 """\
+@@ -1219,9 +1219,9 @@ class LevelConfig(LevelTestsBase):
+         if PCMK_2_0_3_PLUS:
+             result = outdent(
+                 """\
+-                  * F1\t(stonith:fence_apc):\t Stopped
+-                  * F2\t(stonith:fence_apc):\t Stopped
+-                  * F3\t(stonith:fence_apc):\t Stopped
++                  * F1\t(stonith:fence_apc):\tStopped
++                  * F2\t(stonith:fence_apc):\tStopped
++                  * F3\t(stonith:fence_apc):\tStopped
+                 """
+             )
+         else:
+@@ -1234,7 +1234,8 @@ class LevelConfig(LevelTestsBase):
+             )
+         self.assert_pcs_success(
+             "stonith",
+-            result + "\n".join(indent(self.config_lines, 1)) + "\n"
++            result + "\n".join(indent(self.config_lines, 1)) + "\n",
++            despace=True
+         )
+         self.pcs_runner.mock_settings["corosync_conf_file"] = rc(
+             "corosync.conf"
+diff --git a/pcs_test/tools/assertions.py b/pcs_test/tools/assertions.py
+index db8f4df5..a2b7b4ac 100644
+--- a/pcs_test/tools/assertions.py
++++ b/pcs_test/tools/assertions.py
+@@ -59,7 +59,8 @@ class AssertPcsMixin:
+                 )
+ 
+     def assert_pcs_success(
+-        self, command, stdout_full=None, stdout_start=None, stdout_regexp=None
++        self, command, stdout_full=None, stdout_start=None, stdout_regexp=None,
++        despace=False
+     ):
+         full = stdout_full
+         if (
+@@ -75,7 +76,8 @@ class AssertPcsMixin:
+             stdout_full=full,
+             stdout_start=stdout_start,
+             stdout_regexp=stdout_regexp,
+-            returncode=0
++            returncode=0,
++            despace=despace,
+         )
+ 
+     def assert_pcs_fail(
+@@ -99,7 +101,7 @@ class AssertPcsMixin:
+ 
+     def assert_pcs_result(
+         self, command, stdout_full=None, stdout_start=None, stdout_regexp=None,
+-        returncode=0
++        returncode=0, despace=False
+     ):
+         msg = (
+             "Please specify exactly one: stdout_start or stdout_full or"
+@@ -162,7 +164,11 @@ class AssertPcsMixin:
+                 )
+         else:
+             expected_full = self.__prepare_output(stdout_full)
+-            if stdout != expected_full:
++            if (
++                (despace and _despace(stdout) != _despace(expected_full))
++                or
++                (not despace and stdout != expected_full)
++            ):
+                 self.assertEqual(
+                     stdout,
+                     expected_full,
+@@ -386,3 +392,13 @@ def __report_item_equal(real_report_item, report_item_info):
+             )
+         )
+     )
++
++def assert_pcs_status(status1, status2):
++    if _despace(status1) != _despace(status2):
++        raise AssertionError(
++            "strings not equal:\n{0}".format(prepare_diff(status1, status2))
++        )
++
++def _despace(string):
++    # ignore whitespace changes between various pacemaker versions
++    return re.sub(r"[ \t]+", " ", string)
+-- 
+2.20.1
+
diff --git a/SOURCES/bz1793574-01-fix-detecting-fence-history-support.patch b/SOURCES/bz1793574-01-fix-detecting-fence-history-support.patch
new file mode 100644
index 0000000..b8f7672
--- /dev/null
+++ b/SOURCES/bz1793574-01-fix-detecting-fence-history-support.patch
@@ -0,0 +1,541 @@
+From ac0305a8b6bb040ef06dcbfff309c91321400d44 Mon Sep 17 00:00:00 2001
+From: Tomas Jelinek <tojeline@redhat.com>
+Date: Mon, 27 Jan 2020 17:05:42 +0100
+Subject: [PATCH 3/7] fix detecting fence history support
+
+---
+ pcs/lib/commands/stonith.py                   | 38 ++++++++------
+ pcs/lib/pacemaker/live.py                     | 45 +++++++++-------
+ .../crm_mon.rng.with_fence_history.xml        | 13 -----
+ .../crm_mon.rng.without_fence_history.xml     |  9 ----
+ pcs_test/tier0/lib/commands/test_status.py    | 35 +++----------
+ .../lib/commands/test_stonith_history.py      | 52 ++++++-------------
+ pcs_test/tier0/lib/pacemaker/test_live.py     | 31 ++++++++++-
+ .../tools/command_env/config_runner_pcmk.py   | 41 +++++++++++++++
+ pcs_test/tools/command_env/mock_runner.py     |  1 +
+ 9 files changed, 141 insertions(+), 124 deletions(-)
+ delete mode 100644 pcs_test/resources/crm_mon.rng.with_fence_history.xml
+ delete mode 100644 pcs_test/resources/crm_mon.rng.without_fence_history.xml
+
+diff --git a/pcs/lib/commands/stonith.py b/pcs/lib/commands/stonith.py
+index c0849a54..ff87c852 100644
+--- a/pcs/lib/commands/stonith.py
++++ b/pcs/lib/commands/stonith.py
+@@ -1,3 +1,7 @@
++from typing import (
++    Optional,
++)
++
+ from pcs.lib import reports
+ from pcs.lib.cib import resource
+ from pcs.lib.cib.resource.common import are_meta_disabled
+@@ -6,13 +10,14 @@ from pcs.lib.commands.resource import (
+     _ensure_disabled_after_wait,
+     resource_environment
+ )
++from pcs.lib.env import LibraryEnvironment
+ from pcs.lib.errors import LibraryError
+ from pcs.lib.pacemaker.live import (
+     FenceHistoryCommandErrorException,
+     fence_history_cleanup,
+     fence_history_text,
+     fence_history_update,
+-    is_fence_history_supported,
++    is_fence_history_supported_management,
+ )
+ from pcs.lib.pacemaker.values import validate_id
+ from pcs.lib.resource_agent import find_valid_stonith_agent_by_name as get_agent
+@@ -162,51 +167,54 @@ def create_in_group(
+             put_after_adjacent,
+         )
+ 
+-def history_get_text(env, node=None):
++def history_get_text(env: LibraryEnvironment, node: Optional[str] = None):
+     """
+     Get full fencing history in plain text
+ 
+-    LibraryEnvironment env
+-    string node -- get history for the specified node or all nodes if None
++    env
++    node -- get history for the specified node or all nodes if None
+     """
+-    if not is_fence_history_supported():
++    runner = env.cmd_runner()
++    if not is_fence_history_supported_management(runner):
+         raise LibraryError(reports.fence_history_not_supported())
+ 
+     try:
+-        return fence_history_text(env.cmd_runner(), node)
++        return fence_history_text(runner, node)
+     except FenceHistoryCommandErrorException as e:
+         raise LibraryError(
+             reports.fence_history_command_error(str(e), "show")
+         )
+ 
+-def history_cleanup(env, node=None):
++def history_cleanup(env: LibraryEnvironment, node: Optional[str] = None):
+     """
+     Clear fencing history
+ 
+-    LibraryEnvironment env
+-    string node -- clear history for the specified node or all nodes if None
++    env
++    node -- clear history for the specified node or all nodes if None
+     """
+-    if not is_fence_history_supported():
++    runner = env.cmd_runner()
++    if not is_fence_history_supported_management(runner):
+         raise LibraryError(reports.fence_history_not_supported())
+ 
+     try:
+-        return fence_history_cleanup(env.cmd_runner(), node)
++        return fence_history_cleanup(runner, node)
+     except FenceHistoryCommandErrorException as e:
+         raise LibraryError(
+             reports.fence_history_command_error(str(e), "cleanup")
+         )
+ 
+-def history_update(env):
++def history_update(env: LibraryEnvironment):
+     """
+     Update fencing history in a cluster (sync with other nodes)
+ 
+-    LibraryEnvironment env
++    env
+     """
+-    if not is_fence_history_supported():
++    runner = env.cmd_runner()
++    if not is_fence_history_supported_management(runner):
+         raise LibraryError(reports.fence_history_not_supported())
+ 
+     try:
+-        return fence_history_update(env.cmd_runner())
++        return fence_history_update(runner)
+     except FenceHistoryCommandErrorException as e:
+         raise LibraryError(
+             reports.fence_history_command_error(str(e), "update")
+diff --git a/pcs/lib/pacemaker/live.py b/pcs/lib/pacemaker/live.py
+index 233f2e2d..d6741441 100644
+--- a/pcs/lib/pacemaker/live.py
++++ b/pcs/lib/pacemaker/live.py
+@@ -1,6 +1,7 @@
+ import os.path
+ import re
+ from typing import (
++    Iterable,
+     List,
+     Tuple,
+ )
+@@ -56,7 +57,7 @@ def get_cluster_status_text(
+         cmd.extend(["--show-detail", "--show-node-attributes", "--failcounts"])
+         # by default, pending and failed actions are displayed
+         # with verbose==True, we display the whole history
+-        if is_fence_history_supported():
++        if is_fence_history_supported_status(runner):
+             cmd.append("--fence-history=3")
+     stdout, stderr, retval = runner.run(cmd)
+ 
+@@ -523,25 +524,15 @@ def _resource_move_ban_clear(
+ 
+ ### fence history
+ 
+-def is_fence_history_supported():
+-    try:
+-        crm_mon_rng = xml_fromstring(open(settings.crm_mon_schema, "r").read())
+-        # Namespaces must be provided otherwise xpath won't match anything.
+-        # 'None' namespace is not supported, so we rename it.
+-        namespaces_map = {
+-            "ns": crm_mon_rng.nsmap.pop(None)
+-        }
+-        history_elements = crm_mon_rng.xpath(
+-            ".//ns:element[@name='fence_history']",
+-            namespaces=namespaces_map
+-        )
+-        if history_elements:
+-            return True
+-    except (EnvironmentError, etree.XMLSyntaxError):
+-        # if we cannot tell for sure fence_history is supported, we will
+-        # continue as if it was not supported
+-        pass
+-    return False
++def is_fence_history_supported_status(runner: CommandRunner) -> bool:
++    return _is_in_pcmk_tool_help(
++        runner, "crm_mon", ["--fence-history"]
++    )
++
++def is_fence_history_supported_management(runner: CommandRunner) -> bool:
++    return _is_in_pcmk_tool_help(
++        runner, "stonith_admin", ["--history", "--broadcast", "--cleanup"]
++    )
+ 
+ def fence_history_cleanup(runner, node=None):
+     return _run_fence_history_command(runner, "--cleanup", node)
+@@ -583,3 +574,17 @@ def __is_in_crm_resource_help(runner, text):
+     )
+     # help goes to stderr but we check stdout as well if that gets changed
+     return text in stderr or text in stdout
++
++def _is_in_pcmk_tool_help(
++    runner: CommandRunner, tool: str, text_list: Iterable[str]
++) -> bool:
++    stdout, stderr, dummy_retval = runner.run(
++        [__exec(tool), "--help-all"]
++    )
++    # Help goes to stderr but we check stdout as well if that gets changed. Use
++    # generators in all to return early.
++    return (
++        all(text in stderr for text in text_list)
++        or
++        all(text in stdout for text in text_list)
++    )
+diff --git a/pcs_test/resources/crm_mon.rng.with_fence_history.xml b/pcs_test/resources/crm_mon.rng.with_fence_history.xml
+deleted file mode 100644
+index 45b380bd..00000000
+--- a/pcs_test/resources/crm_mon.rng.with_fence_history.xml
++++ /dev/null
+@@ -1,13 +0,0 @@
+-<?xml version="1.0" encoding="UTF-8"?>
+-<grammar xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+-    <start>
+-        <ref name="element-crm_mon"/>
+-    </start>
+-    <define name="element-crm_mon">
+-        <element name="crm_mon">
+-            <optional>
+-                <element name="fence_history"/>
+-            </optional>
+-        </element>
+-    </define>
+-</grammar>
+diff --git a/pcs_test/resources/crm_mon.rng.without_fence_history.xml b/pcs_test/resources/crm_mon.rng.without_fence_history.xml
+deleted file mode 100644
+index f7efe52c..00000000
+--- a/pcs_test/resources/crm_mon.rng.without_fence_history.xml
++++ /dev/null
+@@ -1,9 +0,0 @@
+-<?xml version="1.0" encoding="UTF-8"?>
+-<grammar xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+-    <start>
+-        <ref name="element-crm_mon"/>
+-    </start>
+-    <define name="element-crm_mon">
+-        <element name="crm_mon"/>
+-    </define>
+-</grammar>
+diff --git a/pcs_test/tier0/lib/commands/test_status.py b/pcs_test/tier0/lib/commands/test_status.py
+index 517aa908..06878668 100644
+--- a/pcs_test/tier0/lib/commands/test_status.py
++++ b/pcs_test/tier0/lib/commands/test_status.py
+@@ -1,15 +1,12 @@
+ from textwrap import dedent
+-from unittest import mock, TestCase
++from unittest import TestCase
+ 
+-from pcs import settings
+ from pcs.common import file_type_codes, report_codes
+ from pcs.lib.commands import status
+ from pcs_test.tools import fixture
+ from pcs_test.tools.command_env import get_env_tools
+ from pcs_test.tools.misc import read_test_resource as rc_read
+ 
+-crm_mon_rng_with_history = rc_read("crm_mon.rng.with_fence_history.xml")
+-crm_mon_rng_without_history = rc_read("crm_mon.rng.without_fence_history.xml")
+ 
+ class FullClusterStatusPlaintext(TestCase):
+     def setUp(self):
+@@ -212,11 +209,7 @@ class FullClusterStatusPlaintext(TestCase):
+     def test_success_live_verbose(self):
+         (self.config
+             .env.set_known_nodes(self.node_name_list)
+-            .fs.open(
+-                settings.crm_mon_schema,
+-                mock.mock_open(read_data=crm_mon_rng_without_history)(),
+-                name="fs.open.crm_mon_rng"
+-            )
++            .runner.pcmk.can_fence_history_status(stderr="not supported")
+             .runner.pcmk.load_state_plaintext(
+                 verbose=True,
+                 stdout="crm_mon cluster status",
+@@ -288,11 +281,7 @@ class FullClusterStatusPlaintext(TestCase):
+         (self.config
+             .env.set_corosync_conf_data(rc_read("corosync.conf"))
+             .env.set_cib_data("<cib/>")
+-            .fs.open(
+-                settings.crm_mon_schema,
+-                mock.mock_open(read_data=crm_mon_rng_without_history)(),
+-                name="fs.open.crm_mon_rng"
+-            )
++            .runner.pcmk.can_fence_history_status(stderr="not supported")
+             .runner.pcmk.load_state_plaintext(
+                 verbose=True, stdout="crm_mon cluster status",
+             )
+@@ -320,11 +309,7 @@ class FullClusterStatusPlaintext(TestCase):
+     def test_success_verbose_inactive_and_fence_history(self):
+         (self.config
+             .env.set_known_nodes(self.node_name_list)
+-            .fs.open(
+-                settings.crm_mon_schema,
+-                mock.mock_open(read_data=crm_mon_rng_with_history)(),
+-                name="fs.open.crm_mon_rng"
+-            )
++            .runner.pcmk.can_fence_history_status()
+             .runner.pcmk.load_state_plaintext(
+                 verbose=True,
+                 inactive=False,
+@@ -375,11 +360,7 @@ class FullClusterStatusPlaintext(TestCase):
+     def _assert_success_with_ticket_status_failure(self, stderr="", msg=""):
+         (self.config
+             .env.set_known_nodes(self.node_name_list)
+-            .fs.open(
+-                settings.crm_mon_schema,
+-                mock.mock_open(read_data=crm_mon_rng_without_history)(),
+-                name="fs.open.crm_mon_rng"
+-            )
++            .runner.pcmk.can_fence_history_status(stderr="not supported")
+             .runner.pcmk.load_state_plaintext(
+                 verbose=True,
+                 stdout="crm_mon cluster status",
+@@ -553,11 +534,7 @@ class FullClusterStatusPlaintext(TestCase):
+ 
+         (self.config
+             .env.set_known_nodes(self.node_name_list[1:])
+-            .fs.open(
+-                settings.crm_mon_schema,
+-                mock.mock_open(read_data=crm_mon_rng_without_history)(),
+-                name="fs.open.crm_mon_rng"
+-            )
++            .runner.pcmk.can_fence_history_status(stderr="not supported")
+             .runner.pcmk.load_state_plaintext(
+                 verbose=True,
+                 stdout="crm_mon cluster status",
+diff --git a/pcs_test/tier0/lib/commands/test_stonith_history.py b/pcs_test/tier0/lib/commands/test_stonith_history.py
+index e1bd35cb..cfdef13c 100644
+--- a/pcs_test/tier0/lib/commands/test_stonith_history.py
++++ b/pcs_test/tier0/lib/commands/test_stonith_history.py
+@@ -1,25 +1,16 @@
+-from unittest import mock, TestCase
++from unittest import TestCase
+ 
+ from pcs_test.tools import fixture
+ from pcs_test.tools.command_env import get_env_tools
+-from pcs_test.tools.misc import read_test_resource as rc_read
+ 
+-from pcs import settings
+ from pcs.common import report_codes
+ from pcs.lib.commands import stonith
+ 
+ 
+-crm_mon_rng_with_history = rc_read("crm_mon.rng.with_fence_history.xml")
+-crm_mon_rng_without_history = rc_read("crm_mon.rng.without_fence_history.xml")
+-
+ class HistoryGetText(TestCase):
+     def setUp(self):
+         self.env_assist, self.config = get_env_tools(test_case=self)
+-        self.config.fs.open(
+-            settings.crm_mon_schema,
+-            mock.mock_open(read_data=crm_mon_rng_with_history)(),
+-            name="fs.open.crm_mon_rng"
+-        )
++        self.config.runner.pcmk.can_fence_history_manage()
+ 
+     def test_success_all_nodes(self):
+         history = (
+@@ -68,11 +59,10 @@ class HistoryGetText(TestCase):
+         )
+ 
+     def test_history_not_supported(self):
+-        self.config.fs.open(
+-            settings.crm_mon_schema,
+-            mock.mock_open(read_data=crm_mon_rng_without_history)(),
+-            name="fs.open.crm_mon_rng",
+-            instead="fs.open.crm_mon_rng"
++        self.config.runner.pcmk.can_fence_history_manage(
++            stderr="not supported",
++            name="runner.pcmk.can_fence_history_manage",
++            instead="runner.pcmk.can_fence_history_manage",
+         )
+         self.env_assist.assert_raise_library_error(
+             lambda: stonith.history_get_text(self.env_assist.get_env()),
+@@ -88,11 +78,7 @@ class HistoryGetText(TestCase):
+ class HistoryCleanup(TestCase):
+     def setUp(self):
+         self.env_assist, self.config = get_env_tools(test_case=self)
+-        self.config.fs.open(
+-            settings.crm_mon_schema,
+-            mock.mock_open(read_data=crm_mon_rng_with_history)(),
+-            name="fs.open.crm_mon_rng"
+-        )
++        self.config.runner.pcmk.can_fence_history_manage()
+ 
+     def test_success_all_nodes(self):
+         msg = "cleaning up fencing-history for node *\n"
+@@ -129,11 +115,10 @@ class HistoryCleanup(TestCase):
+         )
+ 
+     def test_history_not_supported(self):
+-        self.config.fs.open(
+-            settings.crm_mon_schema,
+-            mock.mock_open(read_data=crm_mon_rng_without_history)(),
+-            name="fs.open.crm_mon_rng",
+-            instead="fs.open.crm_mon_rng"
++        self.config.runner.pcmk.can_fence_history_manage(
++            stderr="not supported",
++            name="runner.pcmk.can_fence_history_manage",
++            instead="runner.pcmk.can_fence_history_manage",
+         )
+         self.env_assist.assert_raise_library_error(
+             lambda: stonith.history_cleanup(self.env_assist.get_env()),
+@@ -149,11 +134,7 @@ class HistoryCleanup(TestCase):
+ class HistoryUpdate(TestCase):
+     def setUp(self):
+         self.env_assist, self.config = get_env_tools(test_case=self)
+-        self.config.fs.open(
+-            settings.crm_mon_schema,
+-            mock.mock_open(read_data=crm_mon_rng_with_history)(),
+-            name="fs.open.crm_mon_rng"
+-        )
++        self.config.runner.pcmk.can_fence_history_manage()
+ 
+     def test_success_all_nodes(self):
+         msg = "gather fencing-history from all nodes\n"
+@@ -182,11 +163,10 @@ class HistoryUpdate(TestCase):
+         )
+ 
+     def test_history_not_supported(self):
+-        self.config.fs.open(
+-            settings.crm_mon_schema,
+-            mock.mock_open(read_data=crm_mon_rng_without_history)(),
+-            name="fs.open.crm_mon_rng",
+-            instead="fs.open.crm_mon_rng"
++        self.config.runner.pcmk.can_fence_history_manage(
++            stderr="not supported",
++            name="runner.pcmk.can_fence_history_manage",
++            instead="runner.pcmk.can_fence_history_manage",
+         )
+         self.env_assist.assert_raise_library_error(
+             lambda: stonith.history_update(self.env_assist.get_env()),
+diff --git a/pcs_test/tier0/lib/pacemaker/test_live.py b/pcs_test/tier0/lib/pacemaker/test_live.py
+index 1ea5454e..d69d8b34 100644
+--- a/pcs_test/tier0/lib/pacemaker/test_live.py
++++ b/pcs_test/tier0/lib/pacemaker/test_live.py
+@@ -79,7 +79,7 @@ class GetClusterStatusXmlTest(LibraryPacemakerTest):
+ class GetClusterStatusText(TestCase):
+     def setUp(self):
+         self.mock_fencehistory_supported = mock.patch(
+-            "pcs.lib.pacemaker.live.is_fence_history_supported",
++            "pcs.lib.pacemaker.live.is_fence_history_supported_status",
+             return_value=True
+         )
+         self.mock_fencehistory_supported.start()
+@@ -125,7 +125,7 @@ class GetClusterStatusText(TestCase):
+     def test_success_no_fence_history(self):
+         self.mock_fencehistory_supported.stop()
+         self.mock_fencehistory_supported = mock.patch(
+-            "pcs.lib.pacemaker.live.is_fence_history_supported",
++            "pcs.lib.pacemaker.live.is_fence_history_supported_status",
+             return_value=False
+         )
+         self.mock_fencehistory_supported.start()
+@@ -1399,3 +1399,30 @@ class ResourcesWaitingTest(LibraryPacemakerTest):
+         mock_runner.run.assert_called_once_with(
+             [self.path("crm_resource"), "--wait"]
+         )
++
++
++class IsInPcmkToolHelp(TestCase):
++    # pylint: disable=protected-access
++    def test_all_in_stderr(self):
++        mock_runner = get_runner("", "ABCDE", 0)
++        self.assertTrue(
++            lib._is_in_pcmk_tool_help(mock_runner, "", ["A", "C", "E"])
++        )
++
++    def test_all_in_stdout(self):
++        mock_runner = get_runner("ABCDE", "", 0)
++        self.assertTrue(
++            lib._is_in_pcmk_tool_help(mock_runner, "", ["A", "C", "E"])
++        )
++
++    def test_some_in_stderr_all_in_stdout(self):
++        mock_runner = get_runner("ABCDE", "ABC", 0)
++        self.assertTrue(
++            lib._is_in_pcmk_tool_help(mock_runner, "", ["A", "C", "E"])
++        )
++
++    def test_some_in_stderr_some_in_stdout(self):
++        mock_runner = get_runner("CDE", "ABC", 0)
++        self.assertFalse(
++            lib._is_in_pcmk_tool_help(mock_runner, "", ["A", "C", "E"])
++        )
+diff --git a/pcs_test/tools/command_env/config_runner_pcmk.py b/pcs_test/tools/command_env/config_runner_pcmk.py
+index 5bb9755b..0580e8d6 100644
+--- a/pcs_test/tools/command_env/config_runner_pcmk.py
++++ b/pcs_test/tools/command_env/config_runner_pcmk.py
+@@ -70,11 +70,52 @@ def _fixture_state_node_xml(
+ 
+ 
+ class PcmkShortcuts():
++    #pylint: disable=too-many-public-methods
+     def __init__(self, calls):
+         self.__calls = calls
+         self.default_wait_timeout = DEFAULT_WAIT_TIMEOUT
+         self.default_wait_error_returncode = WAIT_TIMEOUT_EXPIRED_RETURNCODE
+ 
++    def can_fence_history_manage(
++        self,
++        name="runner.pcmk.can_fence_history_manage",
++        stderr="--history --cleanup --broadcast",
++        instead=None,
++    ):
++        """
++        Create a call to check if fence_history is supported by stonith_admin
++
++        string name -- key of the call
++        string stderr -- stonith_admin help text
++        string instead -- key of call instead of which this new call is to be
++            placed
++        """
++        self.__calls.place(
++            name,
++            RunnerCall("stonith_admin --help-all", stderr=stderr),
++            instead=instead,
++        )
++
++    def can_fence_history_status(
++        self,
++        name="runner.pcmk.can_fence_history_status",
++        stderr="--fence-history",
++        instead=None,
++    ):
++        """
++        Create a call to check if fence_history is supported by crm_mon
++
++        string name -- key of the call
++        string stderr -- crm_mon help text
++        string instead -- key of call instead of which this new call is to be
++            placed
++        """
++        self.__calls.place(
++            name,
++            RunnerCall("crm_mon --help-all", stderr=stderr),
++            instead=instead,
++        )
++
+     def fence_history_get(
+         self, name="runner.pcmk.fence_history_get", node=None, stdout="",
+         stderr="", returncode=0
+diff --git a/pcs_test/tools/command_env/mock_runner.py b/pcs_test/tools/command_env/mock_runner.py
+index 2fe43137..8b9cb771 100644
+--- a/pcs_test/tools/command_env/mock_runner.py
++++ b/pcs_test/tools/command_env/mock_runner.py
+@@ -61,6 +61,7 @@ COMMAND_COMPLETIONS = {
+     "crm_ticket": path.join(settings.pacemaker_binaries, "crm_ticket"),
+     "crm_verify": path.join(settings.pacemaker_binaries, "crm_verify"),
+     "sbd": settings.sbd_binary,
++    "stonith_admin": path.join(settings.pacemaker_binaries, "stonith_admin"),
+ }
+ 
+ def complete_command(command):
+-- 
+2.21.1
+
diff --git a/SOURCES/daemon-fix-cookie-options.patch b/SOURCES/daemon-fix-cookie-options.patch
new file mode 100644
index 0000000..f23287c
--- /dev/null
+++ b/SOURCES/daemon-fix-cookie-options.patch
@@ -0,0 +1,54 @@
+From 898cfe8212a5940dba6552196ddd243f912b5942 Mon Sep 17 00:00:00 2001
+From: Tomas Jelinek <tojeline@redhat.com>
+Date: Tue, 11 Feb 2020 10:18:33 +0100
+Subject: [PATCH 5/7] daemon: fix cookie options
+
+---
+ pcs/daemon/app/session.py | 14 +++++++++++---
+ 1 file changed, 11 insertions(+), 3 deletions(-)
+
+diff --git a/pcs/daemon/app/session.py b/pcs/daemon/app/session.py
+index b4d29add..dcbb4c23 100644
+--- a/pcs/daemon/app/session.py
++++ b/pcs/daemon/app/session.py
+@@ -4,10 +4,16 @@ from pcs.daemon.auth import check_user_groups, authorize_user
+ PCSD_SESSION = "pcsd.sid"
+ 
+ class Mixin:
+-    __session = None
+     """
+     Mixin for tornado.web.RequestHandler
+     """
++
++    __session = None
++    __cookie_options = {
++        "secure": True,
++        "httponly": True,
++    }
++
+     def initialize(self, session_storage: Storage):
+         self.__storage = session_storage
+ 
+@@ -63,7 +69,7 @@ class Mixin:
+         """
+         Write the session id into a response cookie.
+         """
+-        self.set_cookie(PCSD_SESSION, self.session.sid)
++        self.set_cookie(PCSD_SESSION, self.session.sid, **self.__cookie_options)
+ 
+     def put_request_cookies_sid_to_response_cookies_sid(self):
+         """
+@@ -73,7 +79,9 @@ class Mixin:
+         #TODO this method should exist temporarily (for sinatra compatibility)
+         #pylint: disable=invalid-name
+         if self.__sid_from_client is not None:
+-            self.set_cookie(PCSD_SESSION, self.__sid_from_client)
++            self.set_cookie(
++                PCSD_SESSION, self.__sid_from_client, **self.__cookie_options
++            )
+ 
+     def was_sid_in_request_cookies(self):
+         return self.__sid_from_client is not None
+-- 
+2.21.1
+
diff --git a/SOURCES/do-not-support-cluster-setup-with-udp-u-transport.patch b/SOURCES/do-not-support-cluster-setup-with-udp-u-transport.patch
index f45f056..8a08bef 100644
--- a/SOURCES/do-not-support-cluster-setup-with-udp-u-transport.patch
+++ b/SOURCES/do-not-support-cluster-setup-with-udp-u-transport.patch
@@ -1,7 +1,7 @@
-From 588620d73c3bfac080f6a1918de0e7fdc1550646 Mon Sep 17 00:00:00 2001
+From 10d13839883a96b35fc609eb51939ec97bc4aac6 Mon Sep 17 00:00:00 2001
 From: Ivan Devat <idevat@redhat.com>
 Date: Tue, 20 Nov 2018 15:03:56 +0100
-Subject: [PATCH] do not support cluster setup with udp(u) transport
+Subject: [PATCH 2/2] do not support cluster setup with udp(u) transport
 
 ---
  pcs/pcs.8                 | 2 ++
@@ -10,10 +10,10 @@ Subject: [PATCH] do not support cluster setup with udp(u) transport
  3 files changed, 6 insertions(+)
 
 diff --git a/pcs/pcs.8 b/pcs/pcs.8
-index 4ae646e2..85575412 100644
+index ff2ba0b0..7278c8dc 100644
 --- a/pcs/pcs.8
 +++ b/pcs/pcs.8
-@@ -254,6 +254,8 @@ By default, encryption is enabled with cipher=aes256 and hash=sha256. To disable
+@@ -283,6 +283,8 @@ By default, encryption is enabled with cipher=aes256 and hash=sha256. To disable
  
  Transports udp and udpu:
  .br
@@ -23,10 +23,10 @@ index 4ae646e2..85575412 100644
  .br
  Transport options are: ip_version, netmtu
 diff --git a/pcs/usage.py b/pcs/usage.py
-index c0c6d712..d9f240da 100644
+index 30c63964..60373d82 100644
 --- a/pcs/usage.py
 +++ b/pcs/usage.py
-@@ -647,6 +647,7 @@ Commands:
+@@ -689,6 +689,7 @@ Commands:
              hash=sha256. To disable encryption, set cipher=none and hash=none.
  
          Transports udp and udpu:
@@ -49,5 +49,5 @@ index b857cbae..b8d48d92 100644
  #csetup-transport-options.knet .without-knet
  {
 -- 
-2.11.0
+2.21.1
 
diff --git a/SOURCES/update-a-hint-for-resource-create-master.patch b/SOURCES/update-a-hint-for-resource-create-master.patch
new file mode 100644
index 0000000..512f999
--- /dev/null
+++ b/SOURCES/update-a-hint-for-resource-create-master.patch
@@ -0,0 +1,39 @@
+From a6708c6bde467cfced3c4a950eadff0375908303 Mon Sep 17 00:00:00 2001
+From: Tomas Jelinek <tojeline@redhat.com>
+Date: Thu, 23 Jan 2020 14:47:49 +0100
+Subject: [PATCH 2/7] update a hint for 'resource create ... master'
+
+---
+ pcs/cli/resource/parse_args.py | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/pcs/cli/resource/parse_args.py b/pcs/cli/resource/parse_args.py
+index 92dddac9..86280edb 100644
+--- a/pcs/cli/resource/parse_args.py
++++ b/pcs/cli/resource/parse_args.py
+@@ -1,5 +1,5 @@
+ from pcs.cli.common.parse_args import group_by_keywords, prepare_options
+-from pcs.cli.common.errors import CmdLineInputError, HINT_SYNTAX_CHANGE
++from pcs.cli.common.errors import CmdLineInputError, SEE_MAN_CHANGES
+ 
+ 
+ def parse_create_simple(arg_list):
+@@ -51,7 +51,14 @@ def parse_create(arg_list):
+         # manpage.
+         # To be removed in the next significant version.
+         if e.message == "missing value of 'master' option":
+-            raise CmdLineInputError(message=e.message, hint=HINT_SYNTAX_CHANGE)
++            raise CmdLineInputError(
++                message=e.message,
++                hint=(
++                    "Master/Slave resources have been renamed to promotable "
++                    "clones, please use the 'promotable' keyword instead of "
++                    "'master'. " + SEE_MAN_CHANGES
++                )
++            )
+         raise e
+ 
+     return parts
+-- 
+2.21.1
+
diff --git a/SPECS/pcs.spec b/SPECS/pcs.spec
index 2699125..3804aab 100644
--- a/SPECS/pcs.spec
+++ b/SPECS/pcs.spec
@@ -1,6 +1,6 @@
 Name: pcs
-Version: 0.10.2
-Release: 4%{?dist}
+Version: 0.10.4
+Release: 6%{?dist}
 # https://fedoraproject.org/wiki/Licensing:Main?rd=Licensing#Good_Licenses
 # GPLv2: pcs
 # ASL 2.0: tornado
@@ -10,22 +10,25 @@ URL: https://github.com/ClusterLabs/pcs
 Group: System Environment/Base
 Summary: Pacemaker Configuration System
 #building only for architectures with pacemaker and corosync available
-ExclusiveArch: i686 x86_64 s390x ppc64le aarch64 %{arm}
+ExclusiveArch: i686 x86_64 s390x ppc64le aarch64
 
 %global version_or_commit %{version}
-# %%global version_or_commit f556c4adb538c8a95d763472d370f3f5d27eb38a
+# %%global version_or_commit 5c3f35d2819b0e8be0dcbe0ee8f81b9b24b20b54
+
 %global pcs_source_name %{name}-%{version_or_commit}
 
 # ui_commit can be determined by hash, tag or branch 
-%global ui_commit 0.1.1
+%global ui_commit 0.1.2
 %global ui_src_name pcs-web-ui-%{ui_commit}
 
 %global pcs_snmp_pkg_name  pcs-snmp
 
 %global pyagentx_version   0.4.pcs.2
-%global tornado_version    5.0.2
+%global tornado_version    6.0.3
 %global version_rubygem_backports  3.11.4
+%global version_rubygem_daemons  1.3.1
 %global version_rubygem_ethon  0.11.0
+%global version_rubygem_eventmachine  1.2.7
 %global version_rubygem_ffi  1.9.25
 %global version_rubygem_json  2.1.0
 %global version_rubygem_mustermann  1.0.3
@@ -34,6 +37,7 @@ ExclusiveArch: i686 x86_64 s390x ppc64le aarch64 %{arm}
 %global version_rubygem_rack_protection  2.0.4
 %global version_rubygem_rack_test  1.0.0
 %global version_rubygem_sinatra  2.0.4
+%global version_rubygem_thin  1.7.2
 %global version_rubygem_tilt  2.0.9
 
 # We do not use _libdir macro because upstream is not prepared for it.
@@ -67,8 +71,8 @@ Source0: %{url}/archive/%{version_or_commit}/%{pcs_source_name}.tar.gz
 Source1: HAM-logo.png
 Source2: pcsd-bundle-config-2
 
-Source41: https://github.com/ondrejmular/pyagentx/archive/v%{pyagentx_version}.tar.gz#/pyagentx-%{pyagentx_version}.tar.gz
-Source42: https://github.com/tornadoweb/tornado/archive/v%{tornado_version}.tar.gz#/tornado-%{tornado_version}.tar.gz
+Source41: https://github.com/ondrejmular/pyagentx/archive/v%{pyagentx_version}/pyagentx-%{pyagentx_version}.tar.gz
+Source42: https://github.com/tornadoweb/tornado/archive/v%{tornado_version}/tornado-%{tornado_version}.tar.gz
 
 Source81: https://rubygems.org/downloads/backports-%{version_rubygem_backports}.gem
 Source82: https://rubygems.org/downloads/ethon-%{version_rubygem_ethon}.gem
@@ -84,6 +88,9 @@ Source89: https://rubygems.org/downloads/rack-protection-%{version_rubygem_rack_
 Source90: https://rubygems.org/downloads/rack-test-%{version_rubygem_rack_test}.gem
 Source91: https://rubygems.org/downloads/sinatra-%{version_rubygem_sinatra}.gem
 Source92: https://rubygems.org/downloads/tilt-%{version_rubygem_tilt}.gem
+Source93: https://rubygems.org/downloads/eventmachine-%{version_rubygem_eventmachine}.gem
+Source94: https://rubygems.org/downloads/daemons-%{version_rubygem_daemons}.gem
+Source95: https://rubygems.org/downloads/thin-%{version_rubygem_thin}.gem
 
 Source100: https://github.com/idevat/pcs-web-ui/archive/%{ui_commit}/%{ui_src_name}.tar.gz
 Source101: https://github.com/idevat/pcs-web-ui/releases/download/%{ui_commit}/pcs-web-ui-node-modules-%{ui_commit}.tar.xz
@@ -92,10 +99,16 @@ Source101: https://github.com/idevat/pcs-web-ui/releases/download/%{ui_commit}/p
 # They should come before downstream patches to avoid unnecessary conflicts.
 # Z-streams are exception here: they can come from upstream but should be
 # applied at the end to keep z-stream changes as straightforward as possible.
-Patch1: bz1676957-01-fix-crashes-in-pcs-host-auth.patch
-Patch2: bz1657166-01-Updating-a-bundle-is-a-bit-cumber.patch
-Patch3: bz1725183-01-fix-and-options-for-non-root-users.patch
-Patch4: bz1740218-01-set-authkey-length-to-256-bytes.patch
+Patch1: bz1676431-01-Display-status-of-disaster-recovery.patch
+Patch2: bz1743731-01-fix-error-msg-when-cluster-is-not-set-up.patch
+Patch3: bz1792946-01-tests-update-for-pacemaker-2.0.3-4.patch
+Patch4: bz1781303-01-fix-safe-disabling-clones-groups-bundles.patch
+Patch5: update-a-hint-for-resource-create-master.patch
+Patch6: bz1793574-01-fix-detecting-fence-history-support.patch
+Patch7: bz1750427-01-link-to-sbd-man-page-from-sbd-enable-doc.patch
+Patch8: daemon-fix-cookie-options.patch
+Patch9: bz1783106-01-fix-sinatra-wrapper-performance-issue.patch
+Patch10: bz1783106-02-send-request-from-python-to-ruby-more-directly.patch
 
 # Downstream patches do not come from upstream. They adapt pcs for specific
 # RHEL needs.
@@ -132,6 +145,10 @@ BuildRequires: pacemaker-cli >= 2.0.0
 BuildRequires: fence-agents-apc
 BuildRequires: fence-agents-scsi
 BuildRequires: fence-agents-ipmilan
+# for tests
+%ifarch i686 x86_64
+BuildRequires: fence-virt
+%endif
 BuildRequires: booth-site
 # pcsd fonts and font management tools for creating symlinks to fonts
 BuildRequires: fontconfig
@@ -179,7 +196,9 @@ Requires: redhat-logos
 
 Provides: bundled(tornado) = %{tornado_version}
 Provides: bundled(backports) = %{version_rubygem_backports}
+Provides: bundled(daemons) = %{version_rubygem_daemons}
 Provides: bundled(ethon) = %{version_rubygem_ethon}
+Provides: bundled(eventmachine) = %{version_rubygem_eventmachine}
 Provides: bundled(ffi) = %{version_rubygem_ffi}
 Provides: bundled(json) = %{version_rubygem_json}
 Provides: bundled(mustermann) = %{version_rubygem_mustermann}
@@ -188,6 +207,7 @@ Provides: bundled(rack) = %{version_rubygem_rack}
 Provides: bundled(rack) = %{version_rubygem_rack_protection}
 Provides: bundled(rack) = %{version_rubygem_rack_test}
 Provides: bundled(sinatra) = %{version_rubygem_sinatra}
+Provides: bundled(thin) = %{version_rubygem_thin}
 Provides: bundled(tilt) = %{version_rubygem_tilt}
 
 %description
@@ -258,6 +278,12 @@ update_times_patch %{PATCH1}
 update_times_patch %{PATCH2}
 update_times_patch %{PATCH3}
 update_times_patch %{PATCH4}
+update_times_patch %{PATCH5}
+update_times_patch %{PATCH6}
+update_times_patch %{PATCH7}
+update_times_patch %{PATCH8}
+update_times_patch %{PATCH9}
+update_times_patch %{PATCH10}
 update_times_patch %{PATCH101}
 
 cp -f %SOURCE1 pcsd/public/images
@@ -287,6 +313,9 @@ cp -f %SOURCE89 pcsd/vendor/cache
 cp -f %SOURCE90 pcsd/vendor/cache
 cp -f %SOURCE91 pcsd/vendor/cache
 cp -f %SOURCE92 pcsd/vendor/cache
+cp -f %SOURCE93 pcsd/vendor/cache
+cp -f %SOURCE94 pcsd/vendor/cache
+cp -f %SOURCE95 pcsd/vendor/cache
 
 
 # 3) dir for python bundles
@@ -320,15 +349,18 @@ gem install \
   --force --verbose --no-rdoc --no-ri -l --no-user-install \
   -i %{rubygem_bundle_dir} \
   %{rubygem_cache_dir}/backports-%{version_rubygem_backports}.gem \
+  %{rubygem_cache_dir}/daemons-%{version_rubygem_daemons}.gem \
   %{rubygem_cache_dir}/ethon-%{version_rubygem_ethon}.gem \
+  %{rubygem_cache_dir}/eventmachine-%{version_rubygem_eventmachine}.gem \
   %{rubygem_cache_dir}/ffi-%{version_rubygem_ffi}.gem \
   %{rubygem_cache_dir}/json-%{version_rubygem_json}.gem \
   %{rubygem_cache_dir}/mustermann-%{version_rubygem_mustermann}.gem \
   %{rubygem_cache_dir}/open4-%{version_rubygem_open4}.gem \
-  %{rubygem_cache_dir}/rack-%{version_rubygem_rack}.gem \
   %{rubygem_cache_dir}/rack-protection-%{version_rubygem_rack_protection}.gem \
   %{rubygem_cache_dir}/rack-test-%{version_rubygem_rack_test}.gem \
+  %{rubygem_cache_dir}/rack-%{version_rubygem_rack}.gem \
   %{rubygem_cache_dir}/sinatra-%{version_rubygem_sinatra}.gem \
+  %{rubygem_cache_dir}/thin-%{version_rubygem_thin}.gem \
   %{rubygem_cache_dir}/tilt-%{version_rubygem_tilt}.gem \
   -- '--with-ldflags="-Wl,-z,relro -Wl,-z,ibt -Wl,-z,now -Wl,--gc-sections"' \
      '--with-cflags="-O2 -ffunction-sections"'
@@ -402,6 +434,8 @@ run_all_tests(){
   BUNDLED_LIB_LOCATION=$RPM_BUILD_ROOT%{pcs_libdir}/pcs/bundled/packages \
     %{__python3} pcs_test/suite.py -v --vanilla --all-but \
     pcs_test.tier0.lib.commands.test_resource_agent.DescribeAgentUtf8.test_describe \
+    pcs_test.tier0.daemon.app.test_app_remote.SyncConfigMutualExclusive.test_get_not_locked \
+    pcs_test.tier0.daemon.app.test_app_remote.SyncConfigMutualExclusive.test_post_not_locked \
 
   test_result_python=$?
 
@@ -431,20 +465,31 @@ remove_all_tests() {
 run_all_tests
 remove_all_tests
 
+%posttrans
+# Make sure the new version of the daemon is runnning.
+# Also, make sure to start pcsd-ruby if it hasn't been started or even
+# installed before. This is done by restarting pcsd.service.
+%{_bindir}/systemctl daemon-reload
+%{_bindir}/systemctl try-restart pcsd.service
+
+
 %post
 %systemd_post pcsd.service
+%systemd_post pcsd-ruby.service
 
 %post -n %{pcs_snmp_pkg_name}
 %systemd_post pcs_snmp_agent.service
 
 %preun
 %systemd_preun pcsd.service
+%systemd_preun pcsd-ruby.service
 
 %preun -n %{pcs_snmp_pkg_name}
 %systemd_preun pcs_snmp_agent.service
 
 %postun
 %systemd_postun_with_restart pcsd.service
+%systemd_postun_with_restart pcsd-ruby.service
 
 %postun -n %{pcs_snmp_pkg_name}
 %systemd_postun_with_restart pcs_snmp_agent.service
@@ -464,6 +509,7 @@ remove_all_tests
 %{pcs_libdir}/pcsd/.bundle/config
 %{pcs_libdir}/pcs/bundled/packages/tornado*
 %{_unitdir}/pcsd.service
+%{_unitdir}/pcsd-ruby.service
 %{_datadir}/bash-completion/completions/pcs
 %{_sharedstatedir}/pcsd
 %{_sysconfdir}/pam.d/pcsd
@@ -481,6 +527,7 @@ remove_all_tests
 %{_mandir}/man8/pcsd.*
 %exclude %{pcs_libdir}/pcsd/*.debian
 %exclude %{pcs_libdir}/pcsd/pcsd.service
+%exclude %{pcs_libdir}/pcsd/pcsd-ruby.service
 %exclude %{pcs_libdir}/pcsd/pcsd.conf
 %exclude %{pcs_libdir}/pcsd/pcsd.8
 %exclude %{pcs_libdir}/pcsd/public/js/dev/*
@@ -505,6 +552,48 @@ remove_all_tests
 %license pyagentx_LICENSE.txt
 
 %changelog
+* Fri Mar 20 2020 Miroslav Lisik <mlisik@redhat.com> - 0.10.4-6
+- Fixed communication between python and ruby daemons
+- Resolves: rhbz#1783106
+
+* Thu Feb 13 2020 Miroslav Lisik <mlisik@redhat.com> - 0.10.4-5
+- Fixed link to sbd man page from `sbd enable` doc
+- Fixed safe-disabling clones, groups, bundles
+- Fixed sinatra wrapper performance issue
+- Fixed detecting fence history support
+- Fixed cookie options
+- Updated hint for 'resource create ... master'
+- Updated gating tests execution, smoke tests run from upstream sources
+- Resolves: rhbz#1750427 rhbz#1781303 rhbz#1783106 rhbz#1793574
+
+* Mon Jan 20 2020 Tomas Jelinek <tojeline@redhat.com> - 0.10.4-4
+- Fix testsuite for pacemaker-2.0.3-4
+- Resolves: rhbz#1792946
+
+* Mon Dec 02 2019 Ivan Devat <idevat@redhat.com> - 0.10.4-3
+- Added basic resource views in new webUI
+- Resolves: rhbz#1744060
+
+* Fri Nov 29 2019 Miroslav Lisik <mlisik@redhat.com> - 0.10.4-2
+- Added disaster recovery support
+- Fixed error message when cluster is not set up
+- Removed '-g' option from rubygem's cflags because it does not generate .gnu_debugdata and option '-K' for strip command was removed
+- Resolves: rhbz#1676431 rhbz#1743731
+
+* Thu Nov 28 2019 Miroslav Lisik <mlisik@redhat.com> - 0.10.4-1
+- Rebased to latest upstream sources (see CHANGELOG.md)
+- Add '-g' to rubygem's cflags
+- Resolves: rhbz#1743704 rhbz#1741586 rhbz#1750427
+
+* Mon Nov 18 2019 Miroslav Lisik <mlisik@redhat.com> - 0.10.3-2
+- Rebased to latest upstream sources (see CHANGELOG.md)
+- Do not strip .gnu_debugdata section from binaries
+- Resolves: rhbz#1631514 rhbz#1631519 rhbz#1734361 rhbz#1743704
+
+* Mon Oct 21 2019 Miroslav Lisik <mlisik@redhat.com> - 0.10.3-1
+- Rebased to latest upstream sources (see CHANGELOG.md)
+- Resolves: rhbz#1442116 rhbz#1631514 rhbz#1631519 rhbz#1673835 rhbz#1698763 rhbz#1728890 rhbz#1734361 rhbz#1743704 rhbz#1743735 rhbz#1744056
+
 * Tue Aug 13 2019 Tomas Jelinek <tojeline@redhat.com> - 0.10.2-4
 - Generate 256 bytes long corosync authkey so clusters can start when FIPS is enabled
 - Resolves: rhbz#1740218